summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bson/bson-inl.h100
-rw-r--r--bson/bson.h26
-rw-r--r--bson/bson_db.h20
-rw-r--r--[-rwxr-xr-x]bson/bsondemo/bsondemo.cpp21
-rw-r--r--bson/bsonelement.h85
-rw-r--r--bson/bsonmisc.h70
-rw-r--r--bson/bsonobj.h120
-rw-r--r--bson/bsonobjbuilder.h188
-rw-r--r--bson/bsonobjiterator.h58
-rw-r--r--bson/bsontypes.h112
-rw-r--r--bson/inline_decls.h2
-rw-r--r--bson/oid.cpp26
-rw-r--r--bson/oid.h26
-rw-r--r--bson/ordering.h10
-rw-r--r--bson/stringdata.h12
-rw-r--r--bson/util/atomic_int.h32
-rw-r--r--bson/util/builder.h55
-rw-r--r--bson/util/misc.h4
-rw-r--r--client/clientOnly.cpp14
-rw-r--r--client/connpool.cpp104
-rw-r--r--client/connpool.h66
-rw-r--r--client/constants.h20
-rw-r--r--client/dbclient.cpp277
-rw-r--r--client/dbclient.h254
-rw-r--r--client/dbclient_rs.cpp184
-rw-r--r--client/dbclient_rs.h70
-rw-r--r--client/dbclientcursor.cpp63
-rw-r--r--client/dbclientcursor.h104
-rw-r--r--client/dbclientmockcursor.h2
-rw-r--r--client/distlock.cpp118
-rw-r--r--client/distlock.h18
-rw-r--r--client/distlock_test.cpp26
-rw-r--r--client/examples/authTest.cpp7
-rw-r--r--client/examples/clientTest.cpp28
-rw-r--r--client/examples/first.cpp11
-rw-r--r--client/examples/httpClientTest.cpp6
-rw-r--r--client/examples/rs.cpp16
-rw-r--r--client/examples/second.cpp2
-rw-r--r--client/examples/tail.cpp40
-rw-r--r--client/examples/tutorial.cpp64
-rw-r--r--client/examples/whereExample.cpp8
-rw-r--r--client/gridfs.cpp60
-rw-r--r--client/gridfs.h22
-rw-r--r--client/model.cpp44
-rw-r--r--client/model.h8
-rw-r--r--[-rwxr-xr-x]client/mongo_client_lib.cpp10
-rw-r--r--client/parallel.cpp241
-rw-r--r--client/parallel.h84
-rw-r--r--[-rwxr-xr-x]client/simple_client_demo.cpp22
-rw-r--r--client/syncclusterconnection.cpp158
-rw-r--r--client/syncclusterconnection.h36
-rw-r--r--db/background.h12
-rw-r--r--db/btree.cpp388
-rw-r--r--db/btree.h107
-rw-r--r--db/btreecursor.cpp96
-rw-r--r--db/cap.cpp59
-rw-r--r--db/client.cpp194
-rw-r--r--db/client.h64
-rw-r--r--db/clientcursor.cpp182
-rw-r--r--db/clientcursor.h97
-rw-r--r--db/cloner.cpp152
-rw-r--r--db/cmdline.cpp132
-rw-r--r--db/cmdline.h49
-rw-r--r--db/commands.cpp24
-rw-r--r--db/commands.h18
-rw-r--r--db/commands/distinct.cpp48
-rw-r--r--db/commands/group.cpp80
-rw-r--r--db/commands/isself.cpp86
-rw-r--r--db/commands/mr.cpp468
-rw-r--r--db/commands/mr.h102
-rw-r--r--db/common.cpp2
-rw-r--r--db/compact.cpp44
-rw-r--r--db/concurrency.h51
-rw-r--r--db/curop-inl.h4
-rw-r--r--db/curop.h106
-rw-r--r--db/cursor.cpp18
-rw-r--r--db/cursor.h28
-rw-r--r--db/database.cpp88
-rw-r--r--db/database.h32
-rw-r--r--db/db.cpp367
-rw-r--r--db/db.h70
-rw-r--r--db/dbcommands.cpp459
-rw-r--r--db/dbcommands_admin.cpp124
-rw-r--r--db/dbcommands_generic.cpp64
-rw-r--r--db/dbeval.cpp14
-rw-r--r--db/dbhelpers.cpp113
-rw-r--r--db/dbhelpers.h20
-rw-r--r--db/dbmessage.h26
-rw-r--r--db/dbwebserver.cpp232
-rw-r--r--db/dbwebserver.h20
-rw-r--r--db/diskloc.h16
-rw-r--r--db/driverHelpers.cpp12
-rw-r--r--db/dur.cpp168
-rw-r--r--db/dur.h54
-rw-r--r--db/dur_commitjob.cpp21
-rw-r--r--db/dur_commitjob.h52
-rw-r--r--db/dur_journal.cpp107
-rw-r--r--db/dur_journal.h12
-rw-r--r--db/dur_journalformat.h18
-rw-r--r--db/dur_journalimpl.h4
-rw-r--r--db/dur_preplogbuffer.cpp28
-rw-r--r--db/dur_recover.cpp166
-rw-r--r--db/dur_recover.h6
-rw-r--r--db/dur_stats.h90
-rw-r--r--db/dur_writetodatafiles.cpp32
-rw-r--r--db/durop.cpp36
-rw-r--r--db/durop.h18
-rw-r--r--db/extsort.cpp138
-rw-r--r--db/extsort.h46
-rw-r--r--db/filever.h8
-rw-r--r--db/geo/2d.cpp710
-rw-r--r--db/geo/core.h144
-rw-r--r--db/geo/haystack.cpp142
-rw-r--r--db/helpers/dblogger.h4
-rw-r--r--db/index.cpp77
-rw-r--r--db/index.h32
-rw-r--r--db/indexkey.cpp103
-rw-r--r--db/indexkey.h46
-rw-r--r--db/instance.cpp253
-rw-r--r--db/instance.h32
-rw-r--r--db/introspect.cpp3
-rw-r--r--db/jsobj.cpp304
-rw-r--r--db/jsobj.h2
-rw-r--r--db/jsobjmanipulator.h24
-rw-r--r--db/json.cpp64
-rw-r--r--db/lasterror.cpp87
-rw-r--r--db/lasterror.h31
-rw-r--r--db/matcher.cpp463
-rw-r--r--db/matcher.h60
-rw-r--r--db/matcher_covered.cpp36
-rw-r--r--db/minilex.h184
-rw-r--r--db/module.cpp16
-rw-r--r--db/module.h10
-rw-r--r--db/modules/mms.cpp88
-rw-r--r--db/mongommf.cpp49
-rw-r--r--db/mongommf.h22
-rw-r--r--db/mongomutex.h72
-rw-r--r--db/namespace-inl.h22
-rw-r--r--db/namespace.cpp252
-rw-r--r--db/namespace.h118
-rw-r--r--db/nonce.cpp38
-rw-r--r--db/nonce.h22
-rw-r--r--db/oplog.cpp126
-rw-r--r--db/oplog.h111
-rw-r--r--db/oplogreader.h40
-rw-r--r--db/pdfile.cpp341
-rw-r--r--db/pdfile.h46
-rw-r--r--db/projection.cpp140
-rw-r--r--db/projection.h32
-rw-r--r--db/query.cpp286
-rw-r--r--db/query.h90
-rw-r--r--db/queryoptimizer.cpp311
-rw-r--r--db/queryoptimizer.h106
-rw-r--r--db/queryutil.cpp477
-rw-r--r--db/queryutil.h76
-rw-r--r--db/repl.cpp393
-rw-r--r--db/repl.h57
-rw-r--r--db/repl/connections.h30
-rw-r--r--db/repl/consensus.cpp122
-rw-r--r--db/repl/health.cpp120
-rw-r--r--db/repl/health.h8
-rw-r--r--db/repl/heartbeat.cpp44
-rw-r--r--db/repl/manager.cpp67
-rw-r--r--db/repl/replset_commands.cpp65
-rw-r--r--db/repl/rs.cpp167
-rw-r--r--db/repl/rs.h87
-rw-r--r--db/repl/rs_config.cpp113
-rw-r--r--db/repl/rs_config.h12
-rw-r--r--[-rwxr-xr-x]db/repl/rs_exception.h18
-rw-r--r--db/repl/rs_initialsync.cpp78
-rw-r--r--db/repl/rs_initiate.cpp48
-rw-r--r--db/repl/rs_member.h14
-rw-r--r--db/repl/rs_optime.h6
-rw-r--r--db/repl/rs_rollback.cpp654
-rw-r--r--db/repl/rs_sync.cpp105
-rw-r--r--db/repl_block.cpp68
-rw-r--r--db/repl_block.h4
-rw-r--r--db/replpair.h25
-rw-r--r--[-rwxr-xr-x]db/resource.h32
-rw-r--r--db/restapi.cpp44
-rw-r--r--db/restapi.h2
-rw-r--r--db/scanandorder.h17
-rw-r--r--db/security.cpp14
-rw-r--r--db/security.h24
-rw-r--r--db/security_commands.cpp58
-rw-r--r--db/security_key.cpp16
-rw-r--r--db/stats/counters.cpp84
-rw-r--r--db/stats/counters.h54
-rw-r--r--db/stats/fine_clock.h13
-rw-r--r--db/stats/service_stats.cpp6
-rw-r--r--db/stats/snapshots.cpp111
-rw-r--r--db/stats/snapshots.h18
-rw-r--r--db/stats/top.cpp56
-rw-r--r--db/stats/top.h43
-rw-r--r--db/taskqueue.h28
-rw-r--r--db/update.cpp423
-rw-r--r--db/update.h224
-rw-r--r--dbtests/background_job_test.cpp20
-rw-r--r--dbtests/balancer_policy_tests.cpp30
-rw-r--r--dbtests/basictests.cpp196
-rw-r--r--dbtests/btreetests.cpp308
-rw-r--r--dbtests/clienttests.cpp60
-rw-r--r--dbtests/commandtests.cpp18
-rw-r--r--dbtests/cursortests.cpp33
-rw-r--r--dbtests/d_chunk_manager_tests.cpp140
-rw-r--r--dbtests/directclienttests.cpp8
-rw-r--r--dbtests/framework.cpp126
-rw-r--r--dbtests/framework.h52
-rw-r--r--dbtests/histogram_test.cpp20
-rw-r--r--dbtests/jsobjtests.cpp300
-rw-r--r--dbtests/jsontests.cpp74
-rw-r--r--dbtests/jstests.cpp314
-rw-r--r--dbtests/matchertests.cpp48
-rw-r--r--[-rwxr-xr-x]dbtests/mmaptests.cpp48
-rw-r--r--dbtests/mockdbclient.h4
-rw-r--r--dbtests/namespacetests.cpp46
-rw-r--r--dbtests/pairingtests.cpp24
-rw-r--r--dbtests/pdfiletests.cpp40
-rw-r--r--dbtests/perf/btreeperf.cpp65
-rw-r--r--dbtests/perf/perftest.cpp86
-rw-r--r--dbtests/perftests.cpp68
-rw-r--r--dbtests/queryoptimizertests.cpp328
-rw-r--r--dbtests/querytests.cpp207
-rw-r--r--dbtests/repltests.cpp335
-rw-r--r--dbtests/sharding.cpp12
-rw-r--r--dbtests/socktests.cpp10
-rw-r--r--dbtests/spin_lock_test.cpp68
-rw-r--r--dbtests/threadedtests.cpp72
-rw-r--r--dbtests/updatetests.cpp166
-rw-r--r--pch.cpp2
-rw-r--r--pch.h12
-rw-r--r--s/balance.cpp98
-rw-r--r--s/balance.h18
-rw-r--r--s/balancer_policy.cpp74
-rw-r--r--s/balancer_policy.h14
-rw-r--r--s/chunk.cpp455
-rw-r--r--s/chunk.h78
-rw-r--r--s/client.cpp128
-rw-r--r--s/client.h36
-rw-r--r--s/commands_admin.cpp293
-rw-r--r--s/commands_public.cpp392
-rw-r--r--s/config.cpp286
-rw-r--r--s/config.h76
-rw-r--r--s/config_migrate.cpp76
-rw-r--r--s/cursors.cpp118
-rw-r--r--s/cursors.h26
-rw-r--r--s/d_background_splitter.cpp10
-rw-r--r--s/d_background_splitter.h6
-rw-r--r--s/d_chunk_manager.cpp60
-rw-r--r--s/d_chunk_manager.h14
-rw-r--r--s/d_logic.cpp28
-rw-r--r--s/d_logic.h60
-rw-r--r--s/d_migrate.cpp568
-rw-r--r--s/d_split.cpp211
-rw-r--r--s/d_state.cpp259
-rw-r--r--s/d_writeback.cpp42
-rw-r--r--s/d_writeback.h2
-rw-r--r--s/grid.cpp153
-rw-r--r--s/grid.h12
-rw-r--r--s/request.cpp64
-rw-r--r--s/request.h24
-rw-r--r--s/s_only.cpp37
-rw-r--r--s/server.cpp130
-rw-r--r--s/server.h4
-rw-r--r--s/shard.cpp96
-rw-r--r--s/shard.h58
-rw-r--r--s/shard_version.cpp56
-rw-r--r--s/shard_version.h2
-rw-r--r--s/shardconnection.cpp104
-rw-r--r--s/shardkey.cpp121
-rw-r--r--s/shardkey.h30
-rw-r--r--s/stats.cpp2
-rw-r--r--s/stats.h2
-rw-r--r--s/strategy.cpp30
-rw-r--r--s/strategy.h10
-rw-r--r--s/strategy_shard.cpp171
-rw-r--r--s/strategy_single.cpp112
-rw-r--r--s/util.h62
-rw-r--r--s/writeback_listener.cpp66
-rw-r--r--s/writeback_listener.h6
-rw-r--r--scripting/bench.cpp50
-rw-r--r--scripting/engine.cpp214
-rw-r--r--scripting/engine.h68
-rw-r--r--scripting/engine_java.cpp77
-rw-r--r--scripting/engine_java.h16
-rw-r--r--scripting/engine_none.cpp2
-rw-r--r--scripting/engine_spidermonkey.cpp529
-rw-r--r--scripting/engine_spidermonkey.h16
-rw-r--r--scripting/engine_v8.cpp205
-rw-r--r--scripting/engine_v8.h38
-rw-r--r--scripting/sm_db.cpp586
-rw-r--r--scripting/utils.cpp14
-rw-r--r--scripting/v8_db.cpp315
-rw-r--r--scripting/v8_db.h36
-rw-r--r--scripting/v8_utils.cpp118
-rw-r--r--scripting/v8_utils.h4
-rw-r--r--scripting/v8_wrapper.cpp275
-rw-r--r--scripting/v8_wrapper.h4
-rw-r--r--shell/dbshell.cpp198
-rw-r--r--shell/mongo_vstudio.cpp6756
-rw-r--r--shell/shell_utils.cpp313
-rw-r--r--shell/utils.h6
-rw-r--r--tools/bridge.cpp23
-rw-r--r--tools/bsondump.cpp50
-rw-r--r--tools/dump.cpp44
-rw-r--r--tools/export.cpp43
-rw-r--r--tools/files.cpp50
-rw-r--r--tools/import.cpp176
-rw-r--r--tools/restore.cpp60
-rw-r--r--tools/sniffer.cpp194
-rw-r--r--tools/stat.cpp336
-rw-r--r--tools/tool.cpp167
-rw-r--r--tools/tool.h48
-rw-r--r--util/admin_access.h4
-rw-r--r--util/alignedbuilder.cpp4
-rw-r--r--util/alignedbuilder.h12
-rw-r--r--util/allocator.h8
-rw-r--r--util/array.h52
-rw-r--r--util/assert_util.cpp30
-rw-r--r--util/assert_util.h78
-rw-r--r--util/background.cpp28
-rw-r--r--util/background.h24
-rw-r--r--util/base64.cpp40
-rw-r--r--util/base64.h25
-rw-r--r--util/bufreader.h20
-rw-r--r--util/concurrency/list.h96
-rw-r--r--util/concurrency/msg.h6
-rw-r--r--util/concurrency/mutex.h58
-rw-r--r--util/concurrency/mvar.h28
-rw-r--r--util/concurrency/rwlock.h86
-rw-r--r--util/concurrency/spin_lock.cpp12
-rw-r--r--util/concurrency/spin_lock.h2
-rw-r--r--util/concurrency/synchronization.cpp8
-rw-r--r--util/concurrency/synchronization.h6
-rw-r--r--util/concurrency/task.cpp51
-rw-r--r--util/concurrency/task.h10
-rw-r--r--util/concurrency/thread_pool.cpp45
-rw-r--r--util/concurrency/thread_pool.h108
-rw-r--r--util/concurrency/value.h24
-rw-r--r--util/concurrency/vars.cpp22
-rw-r--r--util/debug_util.cpp9
-rw-r--r--util/debug_util.h16
-rw-r--r--util/embedded_builder.h12
-rw-r--r--util/file.h220
-rw-r--r--util/file_allocator.cpp76
-rw-r--r--util/file_allocator.h24
-rw-r--r--util/goodies.h149
-rw-r--r--util/hashtab.h24
-rw-r--r--util/heapcheck.h4
-rw-r--r--util/hex.h10
-rw-r--r--util/histogram.cpp40
-rw-r--r--util/histogram.h8
-rw-r--r--util/hostandport.h40
-rw-r--r--util/httpclient.cpp52
-rw-r--r--util/httpclient.h22
-rw-r--r--util/log.cpp41
-rw-r--r--util/log.h101
-rw-r--r--util/logfile.cpp50
-rw-r--r--util/logfile.h8
-rw-r--r--util/lruishmap.h4
-rw-r--r--util/md5.h16
-rw-r--r--util/md5main.cpp104
-rw-r--r--util/message.cpp176
-rw-r--r--util/message.h156
-rw-r--r--util/message_server.h10
-rw-r--r--util/message_server_asio.cpp112
-rw-r--r--util/message_server_port.cpp46
-rw-r--r--util/miniwebserver.cpp32
-rw-r--r--util/miniwebserver.h2
-rw-r--r--util/mmap.cpp48
-rw-r--r--util/mmap.h34
-rw-r--r--util/mmap_mm.cpp4
-rw-r--r--util/mmap_posix.cpp67
-rw-r--r--util/mmap_win.cpp51
-rw-r--r--util/mongoutils/checksum.h2
-rw-r--r--util/mongoutils/hash.h2
-rw-r--r--util/mongoutils/html.h44
-rw-r--r--util/mongoutils/str.h75
-rw-r--r--[-rwxr-xr-x]util/mongoutils/test.cpp90
-rw-r--r--util/moveablebuffer.h8
-rw-r--r--util/ntservice.cpp359
-rw-r--r--util/ntservice.h16
-rw-r--r--util/optime.h26
-rw-r--r--util/password.h4
-rw-r--r--util/paths.h22
-rw-r--r--util/processinfo.cpp18
-rw-r--r--util/processinfo.h10
-rw-r--r--util/processinfo_darwin.cpp48
-rw-r--r--util/processinfo_linux2.cpp192
-rw-r--r--util/processinfo_none.cpp24
-rw-r--r--util/processinfo_win32.cpp28
-rw-r--r--util/queue.h36
-rw-r--r--util/ramlog.h24
-rw-r--r--util/signal_handlers.cpp116
-rw-r--r--util/signal_handlers.h2
-rw-r--r--util/sock.cpp36
-rw-r--r--util/sock.h84
-rw-r--r--util/stringutils.cpp8
-rw-r--r--util/stringutils.h10
-rw-r--r--util/text.cpp92
-rw-r--r--util/text.h45
-rw-r--r--util/time_support.h12
-rw-r--r--util/timer.h2
-rw-r--r--util/unittest.h2
-rw-r--r--util/util.cpp38
-rw-r--r--util/version.cpp27
-rw-r--r--util/version.h2
407 files changed, 20706 insertions, 20294 deletions
diff --git a/bson/bson-inl.h b/bson/bson-inl.h
index 5ed7f0d6d03..cf52747b886 100644
--- a/bson/bson-inl.h
+++ b/bson/bson-inl.h
@@ -24,7 +24,7 @@
namespace mongo {
- inline BSONObjIterator BSONObj::begin() {
+ inline BSONObjIterator BSONObj::begin() {
return BSONObjIterator(*this);
}
@@ -48,7 +48,7 @@ namespace mongo {
return BSONObj( value() + 4 + 4 + strSizeWNull );
}
- inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
+ inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
StringBuilder ss;
int os = objsize();
ss << "Invalid BSONObj size: " << os << " (0x" << toHex( &os, 4 ) << ')';
@@ -59,8 +59,8 @@ namespace mongo {
catch ( ... ) { }
massert( 10334 , ss.str() , 0 );
}
-
- /* the idea with NOINLINE_DECL here is to keep this from inlining in the
+
+ /* the idea with NOINLINE_DECL here is to keep this from inlining in the
getOwned() method. the presumption being that is better.
*/
inline NOINLINE_DECL BSONObj BSONObj::copy() const {
@@ -121,14 +121,14 @@ namespace mongo {
return *this;
}
- inline bool BSONObj::isValid(){
+ inline bool BSONObj::isValid() {
int x = objsize();
return x > 0 && x <= BSONObjMaxInternalSize;
}
- inline bool BSONObj::getObjectID(BSONElement& e) const {
+ inline bool BSONObj::getObjectID(BSONElement& e) const {
BSONElement f = getField("_id");
- if( !f.eoo() ) {
+ if( !f.eoo() ) {
e = f;
return true;
}
@@ -139,21 +139,21 @@ namespace mongo {
_fieldName = 0;
_builder = builder;
}
-
- template<class T>
- inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( T value ) {
+
+ template<class T>
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( T value ) {
_builder->append(_fieldName, value);
_fieldName = 0;
return *_builder;
}
- inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const BSONElement& e ) {
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const BSONElement& e ) {
_builder->appendAs( e , _fieldName );
_fieldName = 0;
return *_builder;
}
- inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::Label &l ) {
+ inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::Label &l ) {
return Labeler( l, this );
}
@@ -163,29 +163,29 @@ namespace mongo {
}
_subobj.reset();
_fieldName = nextFieldName;
- }
+ }
inline BSONObjBuilder *BSONObjBuilderValueStream::subobj() {
if ( !haveSubobj() )
_subobj.reset( new BSONObjBuilder() );
return _subobj.get();
}
-
+
template<class T> inline
BSONObjBuilder& Labeler::operator<<( T value ) {
s_->subobj()->append( l_.l_, value );
return *s_->_builder;
- }
+ }
inline
BSONObjBuilder& Labeler::operator<<( const BSONElement& e ) {
s_->subobj()->appendAs( e, l_.l_ );
return *s_->_builder;
- }
+ }
// {a: {b:1}} -> {a.b:1}
void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base="");
- inline BSONObj nested2dotted(const BSONObj& obj){
+ inline BSONObj nested2dotted(const BSONObj& obj) {
BSONObjBuilder b;
nested2dotted(b, obj);
return b.obj();
@@ -193,7 +193,7 @@ namespace mongo {
// {a.b:1} -> {a: {b:1}}
void dotted2nested(BSONObjBuilder& b, const BSONObj& obj);
- inline BSONObj dotted2nested(const BSONObj& obj){
+ inline BSONObj dotted2nested(const BSONObj& obj) {
BSONObjBuilder b;
dotted2nested(b, obj);
return b.obj();
@@ -204,17 +204,17 @@ namespace mongo {
const char * e = _b.buf() + _b.len();
return BSONObjIterator( s , e );
}
-
+
/* WARNING: nested/dotted conversions are not 100% reversible
* nested2dotted(dotted2nested({a.b: {c:1}})) -> {a.b.c: 1}
* also, dotted2nested ignores order
*/
typedef map<string, BSONElement> BSONMap;
- inline BSONMap bson2map(const BSONObj& obj){
+ inline BSONMap bson2map(const BSONObj& obj) {
BSONMap m;
BSONObjIterator it(obj);
- while (it.more()){
+ while (it.more()) {
BSONElement e = it.next();
m[e.fieldName()] = e;
}
@@ -228,7 +228,7 @@ namespace mongo {
};
typedef set<BSONElement, BSONElementFieldNameCmp> BSONSortedElements;
- inline BSONSortedElements bson2set( const BSONObj& obj ){
+ inline BSONSortedElements bson2set( const BSONObj& obj ) {
BSONSortedElements s;
BSONObjIterator it(obj);
while ( it.more() )
@@ -243,7 +243,7 @@ namespace mongo {
return s.str();
}
inline void BSONObj::toString(StringBuilder& s, bool isArray, bool full ) const {
- if ( isEmpty() ){
+ if ( isEmpty() ) {
s << "{}";
return;
}
@@ -258,7 +258,7 @@ namespace mongo {
massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) );
int offset = (int) (e.rawdata() - this->objdata());
massert( 10330 , "Element extends past end of object",
- e.size() + offset <= this->objsize() );
+ e.size() + offset <= this->objsize() );
e.validate();
bool end = ( e.size() + offset == this->objsize() );
if ( e.eoo() ) {
@@ -278,7 +278,7 @@ namespace mongo {
inline void BSONElement::validate() const {
const BSONType t = type();
-
+
switch( t ) {
case DBRef:
case Code:
@@ -290,7 +290,7 @@ namespace mongo {
return;
StringBuilder buf;
buf << "Invalid dbref/code/string/symbol size: " << x;
- if( lenOk )
+ if( lenOk )
buf << " strnlen:" << mongo::strnlen( valuestr() , x );
msgasserted( 10321 , buf.str() );
break;
@@ -368,8 +368,7 @@ namespace mongo {
massert( 10317 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
x = valuestrsize() + 4 + 1/*subtype*/;
break;
- case RegEx:
- {
+ case RegEx: {
const char *p = value();
size_t len1 = ( maxLen == -1 ) ? strlen( p ) : mongo::strnlen( p, remain );
//massert( 10318 , "Invalid regex string", len1 != -1 ); // ERH - 4/28/10 - don't think this does anything
@@ -413,13 +412,12 @@ namespace mongo {
case mongo::Date:
s << "new Date(" << date() << ')';
break;
- case RegEx:
- {
- s << "/" << regex() << '/';
- const char *p = regexFlags();
- if ( p ) s << p;
- }
- break;
+ case RegEx: {
+ s << "/" << regex() << '/';
+ const char *p = regexFlags();
+ if ( p ) s << p;
+ }
+ break;
case NumberDouble:
s.appendDoubleNice( number() );
break;
@@ -452,13 +450,14 @@ namespace mongo {
break;
case CodeWScope:
s << "CodeWScope( "
- << codeWScopeCode() << ", " << codeWScopeObject().toString(false, full) << ")";
+ << codeWScopeCode() << ", " << codeWScopeObject().toString(false, full) << ")";
break;
case Code:
if ( !full && valuestrsize() > 80 ) {
s.write(valuestr(), 70);
s << "...";
- } else {
+ }
+ else {
s.write(valuestr(), valuestrsize()-1);
}
break;
@@ -468,7 +467,8 @@ namespace mongo {
if ( !full && valuestrsize() > 80 ) {
s.write(valuestr(), 70);
s << "...\"";
- } else {
+ }
+ else {
s.write(valuestr(), valuestrsize()-1);
s << '"';
}
@@ -486,7 +486,7 @@ namespace mongo {
break;
case BinData:
s << "BinData";
- if (full){
+ if (full) {
int len;
const char* data = binDataClean(len);
s << '(' << binDataType() << ", " << toHex(data, len) << ')';
@@ -537,8 +537,8 @@ namespace mongo {
}
inline BSONObj::BSONObj() {
- /* little endian ordering here, but perhaps that is ok regardless as BSON is spec'd
- to be little endian external to the system. (i.e. the rest of the implementation of bson,
+ /* little endian ordering here, but perhaps that is ok regardless as BSON is spec'd
+ to be little endian external to the system. (i.e. the rest of the implementation of bson,
not this part, fails to support big endian)
*/
static char p[] = { /*size*/5, 0, 0, 0, /*eoo*/0 };
@@ -547,7 +547,7 @@ namespace mongo {
inline BSONObj BSONElement::Obj() const { return embeddedObjectUserCheck(); }
- inline BSONElement BSONElement::operator[] (const string& field) const {
+ inline BSONElement BSONElement::operator[] (const string& field) const {
BSONObj o = Obj();
return o[field];
}
@@ -558,14 +558,14 @@ namespace mongo {
v.push_back(i.next());
}
- inline void BSONObj::elems(list<BSONElement> &v) const {
+ inline void BSONObj::elems(list<BSONElement> &v) const {
BSONObjIterator i(*this);
while( i.more() )
v.push_back(i.next());
}
template <class T>
- void BSONObj::Vals(vector<T>& v) const {
+ void BSONObj::Vals(vector<T>& v) const {
BSONObjIterator i(*this);
while( i.more() ) {
T t;
@@ -574,7 +574,7 @@ namespace mongo {
}
}
template <class T>
- void BSONObj::Vals(list<T>& v) const {
+ void BSONObj::Vals(list<T>& v) const {
BSONObjIterator i(*this);
while( i.more() ) {
T t;
@@ -584,25 +584,27 @@ namespace mongo {
}
template <class T>
- void BSONObj::vals(vector<T>& v) const {
+ void BSONObj::vals(vector<T>& v) const {
BSONObjIterator i(*this);
while( i.more() ) {
try {
T t;
i.next().Val(t);
v.push_back(t);
- } catch(...) { }
+ }
+ catch(...) { }
}
}
template <class T>
- void BSONObj::vals(list<T>& v) const {
+ void BSONObj::vals(list<T>& v) const {
BSONObjIterator i(*this);
while( i.more() ) {
try {
T t;
i.next().Val(t);
v.push_back(t);
- } catch(...) { }
+ }
+ catch(...) { }
}
}
diff --git a/bson/bson.h b/bson/bson.h
index 98b1f145bb3..ba1b7513f5b 100644
--- a/bson/bson.h
+++ b/bson/bson.h
@@ -1,10 +1,10 @@
-/* NOTE: Standalone bson header for when not using MongoDB.
+/* NOTE: Standalone bson header for when not using MongoDB.
See also: bsondemo.
MongoDB includes ../db/jsobj.h instead. This file, however, pulls in much less code / dependencies.
*/
-/** @file bson.h
+/** @file bson.h
BSON classes
*/
@@ -47,15 +47,15 @@
#include <boost/utility.hpp>
#include "util/builder.h"
-namespace bson {
+namespace bson {
using std::string;
using std::stringstream;
- class assertion : public std::exception {
+ class assertion : public std::exception {
public:
assertion( unsigned u , const string& s )
- : id( u ) , msg( s ){
+ : id( u ) , msg( s ) {
mongo::StringBuilder ss;
ss << "BsonAssertion id: " << u << " " << s;
full = ss.str();
@@ -64,7 +64,7 @@ namespace bson {
virtual ~assertion() throw() {}
virtual const char* what() const throw() { return full.c_str(); }
-
+
unsigned id;
string msg;
string full;
@@ -72,9 +72,9 @@ namespace bson {
}
namespace mongo {
-#if !defined(assert)
+#if !defined(assert)
inline void assert(bool expr) {
- if(!expr) {
+ if(!expr) {
throw bson::assertion( 0 , "assertion failure in bson library" );
}
}
@@ -88,12 +88,12 @@ namespace mongo {
if( !expr )
uasserted( msgid , msg );
}
- inline void msgasserted(int msgid, const char *msg) {
+ inline void msgasserted(int msgid, const char *msg) {
throw bson::assertion( msgid , msg );
}
inline void msgasserted(int msgid, const std::string &msg) { msgasserted(msgid, msg.c_str()); }
- inline void massert(unsigned msgid, std::string msg, bool expr) {
- if(!expr) {
+ inline void massert(unsigned msgid, std::string msg, bool expr) {
+ if(!expr) {
std::cout << "assertion failure in bson library: " << msgid << ' ' << msg << std::endl;
throw bson::assertion( msgid , msg );
}
@@ -110,13 +110,13 @@ namespace mongo {
#include "../bson/bsonobjiterator.h"
#include "../bson/bson-inl.h"
-namespace mongo {
+namespace mongo {
inline unsigned getRandomNumber() {
#if defined(_WIN32)
return rand();
#else
- return random();
+ return random();
#endif
}
diff --git a/bson/bson_db.h b/bson/bson_db.h
index 8011a50cd42..71f92aae58c 100644
--- a/bson/bson_db.h
+++ b/bson/bson_db.h
@@ -1,10 +1,10 @@
-/** @file bson_db.h
+/** @file bson_db.h
- This file contains the implementation of BSON-related methods that are required
+ This file contains the implementation of BSON-related methods that are required
by the MongoDB database server.
- Normally, for standalone BSON usage, you do not want this file - it will tend to
- pull in some other files from the MongoDB project. Thus, bson.h (the main file
+ Normally, for standalone BSON usage, you do not want this file - it will tend to
+ pull in some other files from the MongoDB project. Thus, bson.h (the main file
one would use) does not include this file.
*/
@@ -35,10 +35,10 @@ namespace mongo {
Append a timestamp element to the object being ebuilt.
@param time - in millis (but stored in seconds)
*/
- inline BSONObjBuilder& BSONObjBuilder::appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc ){
+ inline BSONObjBuilder& BSONObjBuilder::appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc ) {
OpTime t( (unsigned) (time / 1000) , inc );
appendTimestamp( fieldName , t.asDate() );
- return *this;
+ return *this;
}
inline OpTime BSONElement::_opTime() const {
@@ -48,7 +48,7 @@ namespace mongo {
}
inline string BSONElement::_asCode() const {
- switch( type() ){
+ switch( type() ) {
case mongo::String:
case Code:
return string(valuestr(), valuestrsize()-1);
@@ -61,19 +61,19 @@ namespace mongo {
return "";
}
- inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(DateNowLabeler& id){
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(DateNowLabeler& id) {
_builder->appendDate(_fieldName, jsTime());
_fieldName = 0;
return *_builder;
}
- inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MinKeyLabeler& id){
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MinKeyLabeler& id) {
_builder->appendMinKey(_fieldName);
_fieldName = 0;
return *_builder;
}
- inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MaxKeyLabeler& id){
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MaxKeyLabeler& id) {
_builder->appendMaxKey(_fieldName);
_fieldName = 0;
return *_builder;
diff --git a/bson/bsondemo/bsondemo.cpp b/bson/bsondemo/bsondemo.cpp
index 711c9bcec48..ec83f5e6867 100755..100644
--- a/bson/bsondemo/bsondemo.cpp
+++ b/bson/bsondemo/bsondemo.cpp
@@ -1,4 +1,4 @@
-/** @file bsondemo.cpp
+/** @file bsondemo.cpp
Example of use of BSON from C++.
@@ -29,16 +29,15 @@
using namespace std;
using namespace bson;
-void iter(bo o) {
+void iter(bo o) {
/* iterator example */
cout << "\niter()\n";
- for( bo::iterator i(o); i.more(); ) {
+ for( bo::iterator i(o); i.more(); ) {
cout << ' ' << i.next().toString() << '\n';
}
}
-int main()
-{
+int main() {
cout << "build bits: " << 8 * sizeof(char *) << '\n' << endl;
/* a bson object defaults on construction to { } */
@@ -47,7 +46,7 @@ int main()
/* make a simple { name : 'joe', age : 33.7 } object */
{
- bob b;
+ bob b;
b.append("name", "joe");
b.append("age", 33.7);
b.obj();
@@ -73,7 +72,7 @@ int main()
/* reach in and get subobj.z */
cout << "subobj.z: " << y.getFieldDotted("subobj.z").Number() << endl;
-
+
/* alternate syntax: */
cout << "subobj.z: " << y["subobj"]["z"].Number() << endl;
@@ -83,19 +82,19 @@ int main()
cout << v[0] << endl;
/* into an array */
- list<be> L;
+ list<be> L;
y.elems(L);
bo sub = y["subobj"].Obj();
- /* grab all the int's that were in subobj. if it had elements that were not ints, we throw an exception
- (capital V on Vals() means exception if wrong type found
+ /* grab all the int's that were in subobj. if it had elements that were not ints, we throw an exception
+ (capital V on Vals() means exception if wrong type found
*/
vector<int> myints;
sub.Vals(myints);
cout << "my ints: " << myints[0] << ' ' << myints[1] << endl;
- /* grab all the string values from x. if the field isn't of string type, just skip it --
+ /* grab all the string values from x. if the field isn't of string type, just skip it --
lowercase v on vals() indicates skip don't throw.
*/
vector<string> strs;
diff --git a/bson/bsonelement.h b/bson/bsonelement.h
index da1d8772241..52dbee3084e 100644
--- a/bson/bsonelement.h
+++ b/bson/bsonelement.h
@@ -38,7 +38,7 @@ namespace mongo {
/** BSONElement represents an "element" in a BSONObj. So for the object { a : 3, b : "abc" },
'a : 3' is the first element (key+value).
-
+
The BSONElement object points into the BSONObj's data. Thus the BSONObj must stay in scope
for the life of the BSONElement.
@@ -51,7 +51,7 @@ namespace mongo {
*/
class BSONElement {
public:
- /** These functions, which start with a capital letter, throw a UserException if the
+ /** These functions, which start with a capital letter, throw a UserException if the
element is not of the required type. Example:
string foo = obj["foo"].String(); // exception if not a string type or DNE
@@ -69,7 +69,7 @@ namespace mongo {
void Null() const { chk(isNull()); }
void OK() const { chk(ok()); }
- /** populate v with the value of the element. If type does not match, throw exception.
+ /** populate v with the value of the element. If type does not match, throw exception.
useful in templates -- see also BSONObj::Vals().
*/
void Val(Date_t& v) const { v = Date(); }
@@ -94,24 +94,24 @@ namespace mongo {
/** Returns the type of the element */
BSONType type() const { return (BSONType) *data; }
- /** retrieve a field within this element
+ /** retrieve a field within this element
throws exception if *this is not an embedded object
*/
BSONElement operator[] (const string& field) const;
-
+
/** returns the tyoe of the element fixed for the main type
the main purpose is numbers. any numeric type will return NumberDouble
Note: if the order changes, indexes have to be re-built or than can be corruption
*/
int canonicalType() const;
- /** Indicates if it is the end-of-object element, which is present at the end of
+ /** Indicates if it is the end-of-object element, which is present at the end of
every BSON object.
*/
bool eoo() const { return type() == EOO; }
/** Size of the element.
- @param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
+ @param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
*/
int size( int maxLen = -1 ) const;
@@ -121,7 +121,7 @@ namespace mongo {
/** Wrap this element up as a singleton object with a new name. */
BSONObj wrap( const char* newName) const;
- /** field name of the element. e.g., for
+ /** field name of the element. e.g., for
name : "Joe"
"name" is the fieldname
*/
@@ -141,21 +141,21 @@ namespace mongo {
bool isBoolean() const { return type() == mongo::Bool; }
- /** @return value of a boolean element.
- You must assure element is a boolean before
+ /** @return value of a boolean element.
+ You must assure element is a boolean before
calling. */
bool boolean() const {
return *value() ? true : false;
}
- /** Retrieve a java style date value from the element.
+ /** Retrieve a java style date value from the element.
Ensure element is of type Date before calling.
*/
Date_t date() const {
return *reinterpret_cast< const Date_t* >( value() );
}
- /** Convert the value to boolean, regardless of its type, in a javascript-like fashion
+ /** Convert the value to boolean, regardless of its type, in a javascript-like fashion
(i.e., treat zero and null as false).
*/
bool trueValue() const;
@@ -177,16 +177,16 @@ namespace mongo {
int numberInt() const;
/** Retrieve long value for the element safely. Zero returned if not a number. */
long long numberLong() const;
- /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
*/
double numberDouble() const;
- /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
*/
double number() const { return numberDouble(); }
- /** Retrieve the object ID stored in the object.
+ /** Retrieve the object ID stored in the object.
You must ensure the element is of type jstOID first. */
const mongo::OID &__oid() const { return *reinterpret_cast< const mongo::OID* >( value() ); }
@@ -194,8 +194,8 @@ namespace mongo {
bool isNull() const {
return type() == jstNULL;
}
-
- /** Size (length) of a string element.
+
+ /** Size (length) of a string element.
You must assure of type String first. */
int valuestrsize() const {
return *reinterpret_cast< const int* >( value() );
@@ -206,7 +206,7 @@ namespace mongo {
return *reinterpret_cast< const int* >( value() );
}
- /** Get a string's value. Also gives you start of the real data for an embedded object.
+ /** Get a string's value. Also gives you start of the real data for an embedded object.
You must assure data is of an appropriate type first -- see also valuestrsafe().
*/
const char * valuestr() const {
@@ -241,24 +241,25 @@ namespace mongo {
BSONObj codeWScopeObject() const;
/** Get raw binary data. Element must be of type BinData. Doesn't handle type 2 specially */
- const char *binData(int& len) const {
+ const char *binData(int& len) const {
// BinData: <int len> <byte subtype> <byte[len] data>
assert( type() == BinData );
len = valuestrsize();
return value() + 5;
}
/** Get binary data. Element must be of type BinData. Handles type 2 */
- const char *binDataClean(int& len) const {
+ const char *binDataClean(int& len) const {
// BinData: <int len> <byte subtype> <byte[len] data>
- if (binDataType() != ByteArrayDeprecated){
+ if (binDataType() != ByteArrayDeprecated) {
return binData(len);
- } else {
+ }
+ else {
// Skip extra size
len = valuestrsize() - 4;
return value() + 5 + 4;
}
}
-
+
BinDataType binDataType() const {
// BinData: <int len> <byte subtype> <byte[len] data>
assert( type() == BinData );
@@ -298,19 +299,19 @@ namespace mongo {
int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
const char * rawdata() const { return data; }
-
+
/** 0 == Equality, just not defined yet */
int getGtLtOp( int def = 0 ) const;
/** Constructs an empty element */
BSONElement();
-
+
/** Check that data is internally consistent. */
void validate() const;
/** True if this element may contain subobjects. */
bool mayEncapsulate() const {
- switch ( type() ){
+ switch ( type() ) {
case Object:
case mongo::Array:
case CodeWScope:
@@ -322,7 +323,7 @@ namespace mongo {
/** True if this element can be a BSONObj */
bool isABSONObj() const {
- switch( type() ){
+ switch( type() ) {
case Object:
case mongo::Array:
return true;
@@ -331,11 +332,11 @@ namespace mongo {
}
}
- Date_t timestampTime() const{
+ Date_t timestampTime() const {
unsigned long long t = ((unsigned int*)(value() + 4 ))[0];
return t * 1000;
}
- unsigned int timestampInc() const{
+ unsigned int timestampInc() const {
return ((unsigned int*)(value() ))[0];
}
@@ -357,7 +358,7 @@ namespace mongo {
else if ( x > 0 ) return false;
return compareElementValues(*this,other) < 0;
}
-
+
// If maxLen is specified, don't scan more than maxLen bytes.
explicit BSONElement(const char *d, int maxLen = -1) : data(d) {
fieldNameSize_ = -1;
@@ -388,15 +389,15 @@ namespace mongo {
friend class BSONObjIterator;
friend class BSONObj;
- const BSONElement& chk(int t) const {
- if ( t != type() ){
+ const BSONElement& chk(int t) const {
+ if ( t != type() ) {
StringBuilder ss;
ss << "wrong type for BSONElement (" << fieldName() << ") " << type() << " != " << t;
uasserted(13111, ss.str() );
}
return *this;
}
- const BSONElement& chk(bool expr) const {
+ const BSONElement& chk(bool expr) const {
uassert(13118, "unexpected or missing type value in BSON object", expr);
return *this;
}
@@ -405,7 +406,7 @@ namespace mongo {
inline int BSONElement::canonicalType() const {
BSONType t = type();
- switch ( t ){
+ switch ( t ) {
case MinKey:
case MaxKey:
return t;
@@ -446,7 +447,7 @@ namespace mongo {
assert(0);
return -1;
}
- }
+ }
inline bool BSONElement::trueValue() const {
switch( type() ) {
@@ -462,7 +463,7 @@ namespace mongo {
case jstNULL:
case Undefined:
return false;
-
+
default:
;
}
@@ -476,13 +477,13 @@ namespace mongo {
case NumberDouble:
case NumberInt:
return true;
- default:
+ default:
return false;
}
}
inline bool BSONElement::isSimpleType() const {
- switch( type() ){
+ switch( type() ) {
case NumberLong:
case NumberDouble:
case NumberInt:
@@ -491,7 +492,7 @@ namespace mongo {
case mongo::Date:
case jstOID:
return true;
- default:
+ default:
return false;
}
}
@@ -510,7 +511,7 @@ namespace mongo {
}
/** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if another numeric type. */
- inline int BSONElement::numberInt() const {
+ inline int BSONElement::numberInt() const {
switch( type() ) {
case NumberDouble:
return (int) _numberDouble();
@@ -524,7 +525,7 @@ namespace mongo {
}
/** Retrieve long value for the element safely. Zero returned if not a number. */
- inline long long BSONElement::numberLong() const {
+ inline long long BSONElement::numberLong() const {
switch( type() ) {
case NumberDouble:
return (long long) _numberDouble();
@@ -535,7 +536,7 @@ namespace mongo {
default:
return 0;
}
- }
+ }
inline BSONElement::BSONElement() {
static char z = 0;
diff --git a/bson/bsonmisc.h b/bson/bsonmisc.h
index d78ccc16fea..96be12a2a80 100644
--- a/bson/bsonmisc.h
+++ b/bson/bsonmisc.h
@@ -26,7 +26,7 @@ namespace mongo {
return l.woCompare( r, false ) < 0;
}
};
-
+
class BSONObjCmp {
public:
BSONObjCmp( const BSONObj &_order = BSONObj() ) : order( _order ) {}
@@ -54,26 +54,26 @@ namespace mongo {
FieldCompareResult compareDottedFieldNames( const string& l , const string& r );
-/** Use BSON macro to build a BSONObj from a stream
+ /** Use BSON macro to build a BSONObj from a stream
+
+ e.g.,
+ BSON( "name" << "joe" << "age" << 33 )
- e.g.,
- BSON( "name" << "joe" << "age" << 33 )
+ with auto-generated object id:
+ BSON( GENOID << "name" << "joe" << "age" << 33 )
- with auto-generated object id:
- BSON( GENOID << "name" << "joe" << "age" << 33 )
-
- The labels GT, GTE, LT, LTE, NE can be helpful for stream-oriented construction
- of a BSONObj, particularly when assembling a Query. For example,
- BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
- { a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
-*/
+ The labels GT, GTE, LT, LTE, NE can be helpful for stream-oriented construction
+ of a BSONObj, particularly when assembling a Query. For example,
+ BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
+ { a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
+ */
#define BSON(x) (( mongo::BSONObjBuilder(64) << x ).obj())
-/** Use BSON_ARRAY macro like BSON macro, but without keys
+ /** Use BSON_ARRAY macro like BSON macro, but without keys
- BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
- */
+ */
#define BSON_ARRAY(x) (( mongo::BSONArrayBuilder() << x ).arr())
/* Utility class to auto assign object IDs.
@@ -83,14 +83,14 @@ namespace mongo {
extern struct GENOIDLabeler { } GENOID;
/* Utility class to add a Date element with the current time
- Example:
+ Example:
cout << BSON( "created" << DATENOW ); // { created : "2009-10-09 11:41:42" }
*/
extern struct DateNowLabeler { } DATENOW;
/* Utility class to add the minKey (minus infinity) to a given attribute
Example:
- cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } }
+ cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } }
*/
extern struct MinKeyLabeler { } MINKEY;
extern struct MaxKeyLabeler { } MAXKEY;
@@ -106,17 +106,17 @@ namespace mongo {
template<class T>
BSONObjBuilder& operator<<( T value );
- /* the value of the element e is appended i.e. for
+ /* the value of the element e is appended i.e. for
"age" << GT << someElement
- one gets
- { age : { $gt : someElement's value } }
+ one gets
+ { age : { $gt : someElement's value } }
*/
BSONObjBuilder& operator<<( const BSONElement& e );
private:
const Label &l_;
BSONObjBuilderValueStream *s_;
};
-
+
extern Labeler::Label GT;
extern Labeler::Label GTE;
extern Labeler::Label LT;
@@ -133,7 +133,7 @@ namespace mongo {
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e);
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f);
// definitions in bsonobjbuilder.h b/c of incomplete types
-
+
// Utility class to implement BSON( key << val ) as described above.
class BSONObjBuilderValueStream : public boost::noncopyable {
public:
@@ -141,20 +141,20 @@ namespace mongo {
BSONObjBuilderValueStream( BSONObjBuilder * builder );
BSONObjBuilder& operator<<( const BSONElement& e );
-
- template<class T>
+
+ template<class T>
BSONObjBuilder& operator<<( T value );
BSONObjBuilder& operator<<(DateNowLabeler& id);
BSONObjBuilder& operator<<(MinKeyLabeler& id);
BSONObjBuilder& operator<<(MaxKeyLabeler& id);
-
+
Labeler operator<<( const Labeler::Label &l );
void endField( const char *nextFieldName = 0 );
bool subobjStarted() const { return _fieldName != 0; }
-
+
private:
const char * _fieldName;
BSONObjBuilder * _builder;
@@ -163,39 +163,39 @@ namespace mongo {
BSONObjBuilder *subobj();
auto_ptr< BSONObjBuilder > _subobj;
};
-
+
/**
used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
*/
class BSONSizeTracker {
public:
- BSONSizeTracker(){
+ BSONSizeTracker() {
_pos = 0;
for ( int i=0; i<SIZE; i++ )
_sizes[i] = 512; // this is the default, so just be consistent
}
-
- ~BSONSizeTracker(){
+
+ ~BSONSizeTracker() {
}
-
- void got( int size ){
+
+ void got( int size ) {
_sizes[_pos++] = size;
if ( _pos >= SIZE )
_pos = 0;
}
-
+
/**
* right now choosing largest size
*/
int getSize() const {
int x = 16; // sane min
- for ( int i=0; i<SIZE; i++ ){
+ for ( int i=0; i<SIZE; i++ ) {
if ( _sizes[i] > x )
x = _sizes[i];
}
return x;
}
-
+
private:
enum { SIZE = 10 };
int _pos;
diff --git a/bson/bsonobj.h b/bson/bsonobj.h
index 0e4ad0696f1..c4b7af61cb6 100644
--- a/bson/bsonobj.h
+++ b/bson/bsonobj.h
@@ -28,23 +28,23 @@ namespace mongo {
typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
/**
- C++ representation of a "BSON" object -- that is, an extended JSON-style
+ C++ representation of a "BSON" object -- that is, an extended JSON-style
object in a binary representation.
See bsonspec.org.
- Note that BSONObj's have a smart pointer capability built in -- so you can
+ Note that BSONObj's have a smart pointer capability built in -- so you can
pass them around by value. The reference counts used to implement this
do not use locking, so copying and destroying BSONObj's are not thread-safe
operations.
BSON object format:
-
+
code
<unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
-
+
totalSize includes itself.
-
+
Data:
Bool: <byte>
EOO: nothing follows
@@ -67,11 +67,11 @@ namespace mongo {
*/
class BSONObj {
public:
-
- /** Construct a BSONObj from data in the proper format.
- @param ifree true if the BSONObj should free() the msgdata when
- it destructs.
- */
+
+ /** Construct a BSONObj from data in the proper format.
+ @param ifree true if the BSONObj should free() the msgdata when
+ it destructs.
+ */
explicit BSONObj(const char *msgdata, bool ifree = false) {
init(msgdata, ifree);
}
@@ -84,24 +84,24 @@ namespace mongo {
~BSONObj() { /*defensive:*/ _objdata = 0; }
/**
- A BSONObj can use a buffer it "owns" or one it does not.
-
+ A BSONObj can use a buffer it "owns" or one it does not.
+
OWNED CASE
- If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by assignment).
+ If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by assignment).
In this case the buffer is basically implemented as a shared_ptr.
Since BSONObj's are typically immutable, this works well.
UNOWNED CASE
A BSONObj can also point to BSON data in some other data structure it does not "own" or free later.
- For example, in a memory mapped file. In this case, it is important the original data stays in
- scope for as long as the BSONObj is in use. If you think the original data may go out of scope,
- call BSONObj::getOwned() to promote your BSONObj to having its own copy.
+ For example, in a memory mapped file. In this case, it is important the original data stays in
+ scope for as long as the BSONObj is in use. If you think the original data may go out of scope,
+ call BSONObj::getOwned() to promote your BSONObj to having its own copy.
- On a BSONObj assignment, if the source is unowned, both the source and dest will have unowned
+ On a BSONObj assignment, if the source is unowned, both the source and dest will have unowned
pointers to the original buffer after the assignment.
-
- If you are not sure about ownership but need the buffer to last as long as the BSONObj, call
- getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a malloc
+
+ If you are not sure about ownership but need the buffer to last as long as the BSONObj, call
+ getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a malloc
and memcpy will result.
Most ways to create BSONObj's create 'owned' variants. Unowned versions can be created with:
@@ -119,13 +119,13 @@ namespace mongo {
/** @return a new full (and owned) copy of the object. */
BSONObj copy() const;
- /** Readable representation of a BSON object in an extended JSON-style notation.
+ /** Readable representation of a BSON object in an extended JSON-style notation.
This is an abbreviated representation which might be used for logging.
*/
string toString( bool isArray = false, bool full=false ) const;
void toString(StringBuilder& s, bool isArray = false, bool full=false ) const;
-
- /** Properly formatted JSON string.
+
+ /** Properly formatted JSON string.
@param pretty if true we try to add some lf's and indentation
*/
string jsonString( JsonStringFormat format = Strict, int pretty = 0 ) const;
@@ -160,36 +160,36 @@ namespace mongo {
names with respect to the returned element. */
BSONElement getFieldDottedOrArray(const char *&name) const;
- /** Get the field of the specified name. eoo() is true on the returned
- element if not found.
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
*/
BSONElement getField(const StringData& name) const;
- /** Get the field of the specified name. eoo() is true on the returned
- element if not found.
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
*/
- BSONElement operator[] (const char *field) const {
+ BSONElement operator[] (const char *field) const {
return getField(field);
}
- BSONElement operator[] (const string& field) const {
+ BSONElement operator[] (const string& field) const {
return getField(field);
}
- BSONElement operator[] (int field) const {
+ BSONElement operator[] (int field) const {
StringBuilder ss;
ss << field;
string s = ss.str();
return getField(s.c_str());
}
- /** @return true if field exists */
+ /** @return true if field exists */
bool hasField( const char * name ) const { return ! getField( name ).eoo(); }
/** @return "" if DNE or wrong type */
const char * getStringField(const char *name) const;
- /** @return subobject of the given name */
+ /** @return subobject of the given name */
BSONObj getObjectField(const char *name) const;
/** @return INT_MIN if not present - does some type conversions */
@@ -204,18 +204,18 @@ namespace mongo {
object.
*/
BSONObj extractFieldsUnDotted(BSONObj pattern) const;
-
+
/** extract items from object which match a pattern object.
- e.g., if pattern is { x : 1, y : 1 }, builds an object with
- x and y elements of this object, if they are present.
+ e.g., if pattern is { x : 1, y : 1 }, builds an object with
+ x and y elements of this object, if they are present.
returns elements with original field names
*/
BSONObj extractFields(const BSONObj &pattern , bool fillWithNull=false) const;
-
+
BSONObj filterFieldsUndotted(const BSONObj &filter, bool inFilter) const;
BSONElement getFieldUsingIndexNames(const char *fieldName, const BSONObj &indexKey) const;
-
+
/** @return the raw data of the object */
const char *objdata() const {
return _objdata;
@@ -231,30 +231,30 @@ namespace mongo {
*/
bool okForStorage() const;
- /** @return true if object is empty -- i.e., {} */
+ /** @return true if object is empty -- i.e., {} */
bool isEmpty() const { return objsize() <= 5; }
void dump() const;
/** Alternative output format */
string hexDump() const;
-
+
/**wo='well ordered'. fields must be in same order in each object.
- Ordering is with respect to the signs of the elements
+ Ordering is with respect to the signs of the elements
and allows ascending / descending key mixing.
- @return <0 if l<r. 0 if l==r. >0 if l>r
+ @return <0 if l<r. 0 if l==r. >0 if l>r
*/
int woCompare(const BSONObj& r, const Ordering &o,
bool considerFieldName=true) const;
/**wo='well ordered'. fields must be in same order in each object.
- Ordering is with respect to the signs of the elements
+ Ordering is with respect to the signs of the elements
and allows ascending / descending key mixing.
- @return <0 if l<r. 0 if l==r. >0 if l>r
+ @return <0 if l<r. 0 if l==r. >0 if l>r
*/
int woCompare(const BSONObj& r, const BSONObj &ordering = BSONObj(),
bool considerFieldName=true) const;
-
+
bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
@@ -277,18 +277,18 @@ namespace mongo {
return false;
}
- /** @return first field of the object */
+ /** @return first field of the object */
BSONElement firstElement() const { return BSONElement(objdata() + 4); }
- /** @return true if field exists in the object */
+ /** @return true if field exists in the object */
bool hasElement(const char *name) const;
- /** Get the _id field from the object. For good performance drivers should
- assure that _id is the first element of the object; however, correct operation
+ /** Get the _id field from the object. For good performance drivers should
+ assure that _id is the first element of the object; however, correct operation
is assured regardless.
@return true if found
- */
- bool getObjectID(BSONElement& e) const;
+ */
+ bool getObjectID(BSONElement& e) const;
/** @return A hash code for the object */
int hash() const {
@@ -304,18 +304,18 @@ namespace mongo {
// string identifier equivalents.
// TODO Support conversion of element types other than min and max.
BSONObj clientReadable() const;
-
+
/** Return new object with the field names replaced by those in the
passed object. */
BSONObj replaceFieldNames( const BSONObj &obj ) const;
-
+
/** true unless corrupt */
bool valid() const;
-
+
/** @return an md5 value for this object. */
string md5() const;
-
- bool operator==( const BSONObj& other ) const{
+
+ bool operator==( const BSONObj& other ) const {
return woCompare( other ) == 0;
}
@@ -339,14 +339,14 @@ namespace mongo {
opNEAR = 0x13,
opWITHIN = 0x14,
opMAX_DISTANCE=0x15
- };
+ };
/** add all elements of the object to the specified vector */
void elems(vector<BSONElement> &) const;
/** add all elements of the object to the specified list */
void elems(list<BSONElement> &) const;
- /** add all values of the object to the specified vector. If type mismatches, exception.
+ /** add all values of the object to the specified vector. If type mismatches, exception.
this is most useful when the BSONObj is an array, but can be used with non-arrays too in theory.
example:
@@ -371,7 +371,7 @@ namespace mongo {
typedef BSONObjIterator iterator;
/** use something like this:
- for( BSONObj::iterator i = myObj.begin(); i.more(); ) {
+ for( BSONObj::iterator i = myObj.begin(); i.more(); ) {
BSONElement e = i.next();
...
}
@@ -383,11 +383,11 @@ namespace mongo {
b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
}
-private:
+ private:
class Holder {
public:
Holder( const char *objdata ) :
- _objdata( objdata ) {
+ _objdata( objdata ) {
}
~Holder() {
free((void *)_objdata);
diff --git a/bson/bsonobjbuilder.h b/bson/bsonobjbuilder.h
index 01e0e9b6b96..dda89c8e6ce 100644
--- a/bson/bsonobjbuilder.h
+++ b/bson/bsonobjbuilder.h
@@ -36,7 +36,7 @@ namespace mongo {
template<typename T>
class BSONFieldValue {
public:
- BSONFieldValue( const string& name , const T& t ){
+ BSONFieldValue( const string& name , const T& t ) {
_name = name;
_t = t;
}
@@ -52,8 +52,8 @@ namespace mongo {
template<typename T>
class BSONField {
public:
- BSONField( const string& name , const string& longName="" )
- : _name(name), _longName(longName){}
+ BSONField( const string& name , const string& longName="" )
+ : _name(name), _longName(longName) {}
const string& name() const { return _name; }
operator string() const { return _name; }
@@ -65,11 +65,11 @@ namespace mongo {
BSONFieldValue<BSONObj> lt( const T& t ) const { return query( "$lt" , t ); }
BSONFieldValue<BSONObj> query( const char * q , const T& t ) const;
-
+
BSONFieldValue<T> operator()( const T& t ) const {
return BSONFieldValue<T>( _name , t );
}
-
+
private:
string _name;
string _longName;
@@ -90,13 +90,13 @@ namespace mongo {
BSONObjBuilder( BufBuilder &baseBuilder ) : _b( baseBuilder ), _buf( 0 ), _offset( baseBuilder.len() ), _s( this ) , _tracker(0) , _doneCalled(false) {
_b.skip( 4 );
}
-
+
BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() ), _offset(0), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
_b.skip( 4 );
}
- ~BSONObjBuilder(){
- if ( !_doneCalled && _b.buf() && _buf.getSize() == 0 ){
+ ~BSONObjBuilder() {
+ if ( !_doneCalled && _b.buf() && _buf.getSize() == 0 ) {
_done();
}
}
@@ -129,14 +129,14 @@ namespace mongo {
}
/** add a subobject as a member */
- BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ){
+ BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ) {
assert( objdata );
- if ( size == 0 ){
+ if ( size == 0 ) {
size = *((int*)objdata);
}
-
+
assert( size > 4 && size < 100000000 );
-
+
_b.appendNum((char) Object);
_b.appendStr(fieldName);
_b.appendBuf((void*)objdata, size );
@@ -151,7 +151,7 @@ namespace mongo {
_b.appendStr(fieldName);
return _b;
}
-
+
/** add a subobject as a member with type Array. Thus arr object should have "0", "1", ...
style fields in it.
*/
@@ -161,9 +161,9 @@ namespace mongo {
_b.appendBuf((void *) subObj.objdata(), subObj.objsize());
return *this;
}
- BSONObjBuilder& append(const StringData& fieldName, BSONArray arr) {
- return appendArray(fieldName, arr);
- }
+ BSONObjBuilder& append(const StringData& fieldName, BSONArray arr) {
+ return appendArray(fieldName, arr);
+ }
/** add header for a new subarray and return bufbuilder for writing to
the subarray's body */
@@ -172,7 +172,7 @@ namespace mongo {
_b.appendStr(fieldName);
return _b;
}
-
+
/** Append a boolean element */
BSONObjBuilder& appendBool(const StringData& fieldName, int val) {
_b.appendNum((char) Bool);
@@ -185,10 +185,10 @@ namespace mongo {
BSONObjBuilder& append(const StringData& fieldName, bool val) {
_b.appendNum((char) Bool);
_b.appendStr(fieldName);
- _b.appendNum((char) (val?1:0));
+ _b.appendNum((char) (val?1:0));
return *this;
}
-
+
/** Append a 32 bit integer element */
BSONObjBuilder& append(const StringData& fieldName, int n) {
_b.appendNum((char) NumberInt);
@@ -198,20 +198,20 @@ namespace mongo {
}
/** Append a 32 bit unsigned element - cast to a signed int. */
- BSONObjBuilder& append(const StringData& fieldName, unsigned n) {
- return append(fieldName, (int) n);
+ BSONObjBuilder& append(const StringData& fieldName, unsigned n) {
+ return append(fieldName, (int) n);
}
/** Append a NumberLong */
- BSONObjBuilder& append(const StringData& fieldName, long long n) {
+ BSONObjBuilder& append(const StringData& fieldName, long long n) {
_b.appendNum((char) NumberLong);
_b.appendStr(fieldName);
_b.appendNum(n);
- return *this;
+ return *this;
}
/** appends a number. if n < max(int)/2 then uses int, otherwise long long */
- BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long long n ){
+ BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long long n ) {
long long x = n;
if ( x < 0 )
x = x * -1;
@@ -226,15 +226,15 @@ namespace mongo {
* appendNumber is a series of method for appending the smallest sensible type
* mostly for JS
*/
- BSONObjBuilder& appendNumber( const StringData& fieldName , int n ){
+ BSONObjBuilder& appendNumber( const StringData& fieldName , int n ) {
return append( fieldName , n );
}
- BSONObjBuilder& appendNumber( const StringData& fieldName , double d ){
+ BSONObjBuilder& appendNumber( const StringData& fieldName , double d ) {
return append( fieldName , d );
}
- BSONObjBuilder& appendNumber( const StringData& fieldName , size_t n ){
+ BSONObjBuilder& appendNumber( const StringData& fieldName , size_t n ) {
static size_t maxInt = (size_t)pow( 2.0 , 30.0 );
if ( n < maxInt )
@@ -245,7 +245,7 @@ namespace mongo {
}
- BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ){
+ BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ) {
static long long maxInt = (int)pow( 2.0 , 30.0 );
static long long maxDouble = (long long)pow( 2.0 , 40.0 );
@@ -257,7 +257,7 @@ namespace mongo {
append( fieldName , l );
return *this;
}
-
+
/** Append a double element */
BSONObjBuilder& append(const StringData& fieldName, double n) {
_b.appendNum((char) NumberDouble);
@@ -271,8 +271,8 @@ namespace mongo {
*/
bool appendAsNumber( const StringData& fieldName , const string& data );
- /** Append a BSON Object ID (OID type).
- @deprecated Generally, it is preferred to use the append append(name, oid)
+ /** Append a BSON Object ID (OID type).
+ @deprecated Generally, it is preferred to use the append append(name, oid)
method for this.
*/
BSONObjBuilder& appendOID(const StringData& fieldName, OID *oid = 0 , bool generateIfBlank = false ) {
@@ -291,8 +291,8 @@ namespace mongo {
return *this;
}
- /**
- Append a BSON Object ID.
+ /**
+ Append a BSON Object ID.
@param fieldName Field name, e.g., "_id".
@returns the builder object
*/
@@ -321,14 +321,14 @@ namespace mongo {
_b.appendNum(static_cast<unsigned long long>(dt) * 1000);
return *this;
}
- /** Append a date.
- @param dt a Java-style 64 bit date value, that is
+ /** Append a date.
+ @param dt a Java-style 64 bit date value, that is
the number of milliseconds since January 1, 1970, 00:00:00 GMT
*/
BSONObjBuilder& appendDate(const StringData& fieldName, Date_t dt) {
/* easy to pass a time_t to this and get a bad result. thus this warning. */
#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
- if( dt > 0 && dt <= 0xffffffff ) {
+ if( dt > 0 && dt <= 0xffffffff ) {
static int n;
if( n++ == 0 )
log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << endl;
@@ -373,7 +373,7 @@ namespace mongo {
}
/** Append a string element */
BSONObjBuilder& append(const StringData& fieldName, const char *str) {
- return append(fieldName, str, (int) strlen(str)+1);
+ return append(fieldName, str, (int) strlen(str)+1);
}
/** Append a string element */
BSONObjBuilder& append(const StringData& fieldName, const string& str) {
@@ -385,40 +385,42 @@ namespace mongo {
_b.appendStr(fieldName);
_b.appendNum((int) symbol.size()+1);
_b.appendStr(symbol);
- return *this; }
+ return *this;
+ }
/** Append a Null element to the object */
BSONObjBuilder& appendNull( const StringData& fieldName ) {
_b.appendNum( (char) jstNULL );
_b.appendStr( fieldName );
- return *this; }
+ return *this;
+ }
// Append an element that is less than all other keys.
BSONObjBuilder& appendMinKey( const StringData& fieldName ) {
_b.appendNum( (char) MinKey );
_b.appendStr( fieldName );
- return *this;
+ return *this;
}
// Append an element that is greater than all other keys.
BSONObjBuilder& appendMaxKey( const StringData& fieldName ) {
_b.appendNum( (char) MaxKey );
_b.appendStr( fieldName );
- return *this;
+ return *this;
}
-
+
// Append a Timestamp field -- will be updated to next OpTime on db insert.
BSONObjBuilder& appendTimestamp( const StringData& fieldName ) {
_b.appendNum( (char) Timestamp );
_b.appendStr( fieldName );
_b.appendNum( (unsigned long long) 0 );
- return *this;
+ return *this;
}
BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long val ) {
_b.appendNum( (char) Timestamp );
_b.appendStr( fieldName );
_b.appendNum( val );
- return *this;
+ return *this;
}
/**
@@ -427,10 +429,10 @@ namespace mongo {
@param time - in millis (but stored in seconds)
*/
BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc );
-
+
/*
Append an element of the deprecated DBRef type.
- @deprecated
+ @deprecated
*/
BSONObjBuilder& appendDBRef( const StringData& fieldName, const StringData& ns, const OID &oid ) {
_b.appendNum( (char) DBRef );
@@ -438,13 +440,13 @@ namespace mongo {
_b.appendNum( (int) ns.size() + 1 );
_b.appendStr( ns );
_b.appendBuf( (void *) &oid, 12 );
- return *this;
+ return *this;
}
- /** Append a binary data element
+ /** Append a binary data element
@param fieldName name of the field
@param len length of the binary data in bytes
- @param subtype subtype information for the data. @see enum BinDataType in bsontypes.h.
+ @param subtype subtype information for the data. @see enum BinDataType in bsontypes.h.
Use BinDataGeneral if you don't care about the type.
@param data the byte array
*/
@@ -454,29 +456,29 @@ namespace mongo {
_b.appendNum( len );
_b.appendNum( (char) type );
_b.appendBuf( (void *) data, len );
- return *this;
+ return *this;
}
BSONObjBuilder& appendBinData( const StringData& fieldName, int len, BinDataType type, const unsigned char *data ) {
return appendBinData(fieldName, len, type, (const char *) data);
}
-
+
/**
Subtype 2 is deprecated.
Append a BSON bindata bytearray element.
@param data a byte array
@param len the length of data
*/
- BSONObjBuilder& appendBinDataArrayDeprecated( const char * fieldName , const char * data , int len ){
+ BSONObjBuilder& appendBinDataArrayDeprecated( const char * fieldName , const char * data , int len ) {
_b.appendNum( (char) BinData );
_b.appendStr( fieldName );
_b.appendNum( len + 4 );
_b.appendNum( (char)0x2 );
_b.appendNum( len );
- _b.appendBuf( (void *) data, len );
- return *this;
+ _b.appendBuf( (void *) data, len );
+ return *this;
}
- /** Append to the BSON object a field of type CodeWScope. This is a javascript code
+ /** Append to the BSON object a field of type CodeWScope. This is a javascript code
fragment accompanied by some scope that goes with it.
*/
BSONObjBuilder& appendCodeWScope( const StringData& fieldName, const StringData& code, const BSONObj &scope ) {
@@ -493,12 +495,12 @@ namespace mongo {
_b.appendNum( (char) Undefined );
_b.appendStr( fieldName );
}
-
+
/* helper function -- see Query::where() for primary way to do this. */
- void appendWhere( const StringData& code, const BSONObj &scope ){
+ void appendWhere( const StringData& code, const BSONObj &scope ) {
appendCodeWScope( "$where" , code , scope );
}
-
+
/**
these are the min/max when comparing, not strict min/max elements for a given type
*/
@@ -512,9 +514,9 @@ namespace mongo {
template < class T >
BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
- /**
+ /**
* destructive
- * The returned BSONObj will free the buffer when it is finished.
+ * The returned BSONObj will free the buffer when it is finished.
* @return owned BSONObj
*/
BSONObj obj() {
@@ -525,9 +527,9 @@ namespace mongo {
}
/** Fetch the object we have built.
- BSONObjBuilder still frees the object when the builder goes out of
- scope -- very important to keep in mind. Use obj() if you
- would like the BSONObj to last longer than the builder.
+ BSONObjBuilder still frees the object when the builder goes out of
+ scope -- very important to keep in mind. Use obj() if you
+ would like the BSONObj to last longer than the builder.
*/
BSONObj done() {
return BSONObj(_done(), /*ifree*/false);
@@ -600,14 +602,14 @@ namespace mongo {
BSONObjBuilderValueStream& operator<<( const BSONField<T>& f ) {
_s.endField( f.name().c_str() );
return _s;
- }
+ }
template<typename T>
BSONObjBuilder& operator<<( const BSONFieldValue<T>& v ) {
append( v.name().c_str() , v.value() );
return *this;
- }
-
+ }
+
/** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
bool owned() const { return &_b == &_buf; }
@@ -615,12 +617,12 @@ namespace mongo {
BSONObjIterator iterator() const ;
int len() const { return _b.len(); }
-
+
private:
char* _done() {
if ( _doneCalled )
return _b.buf() + _offset;
-
+
_doneCalled = true;
_s.endField();
_b.appendNum((char) EOO);
@@ -649,42 +651,42 @@ namespace mongo {
BSONArrayBuilder( int initialSize ) : _i(0), _b(initialSize) {}
template <typename T>
- BSONArrayBuilder& append(const T& x){
+ BSONArrayBuilder& append(const T& x) {
_b.append(num(), x);
return *this;
}
- BSONArrayBuilder& append(const BSONElement& e){
+ BSONArrayBuilder& append(const BSONElement& e) {
_b.appendAs(e, num());
return *this;
}
-
+
template <typename T>
- BSONArrayBuilder& operator<<(const T& x){
+ BSONArrayBuilder& operator<<(const T& x) {
return append(x);
}
-
+
void appendNull() {
_b.appendNull(num());
}
-
+
/**
* destructive - ownership moves to returned BSONArray
* @return owned BSONArray
*/
- BSONArray arr(){ return BSONArray(_b.obj()); }
-
+ BSONArray arr() { return BSONArray(_b.obj()); }
+
BSONObj done() { return _b.done(); }
-
+
void doneFast() { _b.doneFast(); }
-
+
template <typename T>
- BSONArrayBuilder& append(const StringData& name, const T& x){
+ BSONArrayBuilder& append(const StringData& name, const T& x) {
fill( name );
append( x );
return *this;
}
-
+
BufBuilder &subobjStart( const StringData& name = "0" ) {
fill( name );
return _b.subobjStart( num() );
@@ -694,17 +696,17 @@ namespace mongo {
fill( name );
return _b.subarrayStart( num() );
}
-
+
void appendArray( const StringData& name, BSONObj subObj ) {
fill( name );
_b.appendArray( num(), subObj );
}
-
+
void appendAs( const BSONElement &e, const char *name) {
fill( name );
append( e );
}
-
+
int len() const { return _b.len(); }
private:
@@ -716,19 +718,19 @@ namespace mongo {
while( _i < n )
append( nullElt() );
}
-
+
static BSONElement nullElt() {
static BSONObj n = nullObj();
return n.firstElement();
}
-
+
static BSONObj nullObj() {
BSONObjBuilder _b;
_b.appendNull( "" );
return _b.obj();
}
-
- string num(){ return _b.numStr(_i++); }
+
+ string num() { return _b.numStr(_i++); }
int _i;
BSONObjBuilder _b;
};
@@ -754,14 +756,14 @@ namespace mongo {
// $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT 6));
inline BSONObj OR(const BSONObj& a, const BSONObj& b)
- { return BSON( "$or" << BSON_ARRAY(a << b) ); }
+ { return BSON( "$or" << BSON_ARRAY(a << b) ); }
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c)
- { return BSON( "$or" << BSON_ARRAY(a << b << c) ); }
+ { return BSON( "$or" << BSON_ARRAY(a << b << c) ); }
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d)
- { return BSON( "$or" << BSON_ARRAY(a << b << c << d) ); }
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d) ); }
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e)
- { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e) ); }
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e) ); }
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f)
- { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e << f) ); }
-
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e << f) ); }
+
}
diff --git a/bson/bsonobjiterator.h b/bson/bsonobjiterator.h
index de5d75bf7aa..6e6a69e7dc2 100644
--- a/bson/bsonobjiterator.h
+++ b/bson/bsonobjiterator.h
@@ -31,7 +31,7 @@ namespace mongo {
*/
class BSONObjIterator {
public:
- /** Create an iterator for a BSON object.
+ /** Create an iterator for a BSON object.
*/
BSONObjIterator(const BSONObj& jso) {
int sz = jso.objsize();
@@ -43,11 +43,11 @@ namespace mongo {
_theend = jso.objdata() + sz;
}
- BSONObjIterator( const char * start , const char * end ){
+ BSONObjIterator( const char * start , const char * end ) {
_pos = start + 4;
_theend = end;
}
-
+
/** @return true if more elements exist to be enumerated. */
bool more() { return _pos < _theend && _pos[0]; }
@@ -78,18 +78,18 @@ namespace mongo {
class BSONObjIteratorSorted {
public:
BSONObjIteratorSorted( const BSONObj& o );
-
- ~BSONObjIteratorSorted(){
+
+ ~BSONObjIteratorSorted() {
assert( _fields );
delete[] _fields;
_fields = 0;
}
- bool more(){
+ bool more() {
return _cur < _nfields;
}
-
- BSONElement next(){
+
+ BSONElement next() {
assert( _fields );
if ( _cur < _nfields )
return BSONElement( _fields[_cur++] );
@@ -102,30 +102,30 @@ namespace mongo {
int _cur;
};
-/** Similar to BOOST_FOREACH
- *
- * because the iterator is defined outside of the for, you must use {} around
- * the surrounding scope. Don't do this:
- *
- * if (foo)
- * BSONForEach(e, obj)
- * doSomething(e);
- *
- * but this is OK:
- *
- * if (foo) {
- * BSONForEach(e, obj)
- * doSomething(e);
- * }
- *
- */
+ /** Similar to BOOST_FOREACH
+ *
+ * because the iterator is defined outside of the for, you must use {} around
+ * the surrounding scope. Don't do this:
+ *
+ * if (foo)
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ *
+ * but this is OK:
+ *
+ * if (foo) {
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ * }
+ *
+ */
#define BSONForEach(e, obj) \
BSONObjIterator BOOST_PP_CAT(it_,__LINE__)(obj); \
for ( BSONElement e; \
- (BOOST_PP_CAT(it_,__LINE__).more() ? \
- (e = BOOST_PP_CAT(it_,__LINE__).next(), true) : \
- false) ; \
- /*nothing*/ )
+ (BOOST_PP_CAT(it_,__LINE__).more() ? \
+ (e = BOOST_PP_CAT(it_,__LINE__).next(), true) : \
+ false) ; \
+ /*nothing*/ )
}
diff --git a/bson/bsontypes.h b/bson/bsontypes.h
index 27f2aafb76f..9d95e8e9ad4 100644
--- a/bson/bsontypes.h
+++ b/bson/bsontypes.h
@@ -39,69 +39,69 @@ namespace mongo {
extern BSONObj maxKey;
extern BSONObj minKey;
-/**
- the complete list of valid BSON types
- see also bsonspec.org
-*/
-enum BSONType {
- /** smaller than all other types */
- MinKey=-1,
- /** end of object */
- EOO=0,
- /** double precision floating point value */
- NumberDouble=1,
- /** character string, stored in utf8 */
- String=2,
- /** an embedded object */
- Object=3,
- /** an embedded array */
- Array=4,
- /** binary data */
- BinData=5,
- /** Undefined type */
- Undefined=6,
- /** ObjectId */
- jstOID=7,
- /** boolean type */
- Bool=8,
- /** date type */
- Date=9,
- /** null type */
- jstNULL=10,
- /** regular expression, a pattern with options */
- RegEx=11,
- /** deprecated / will be redesigned */
- DBRef=12,
- /** deprecated / use CodeWScope */
- Code=13,
- /** a programming language (e.g., Python) symbol */
- Symbol=14,
- /** javascript code that can execute on the database server, with SavedContext */
- CodeWScope=15,
- /** 32 bit signed integer */
- NumberInt = 16,
- /** Updated to a Date with value next OpTime on insert */
- Timestamp = 17,
- /** 64 bit integer */
- NumberLong = 18,
- /** max type that is not MaxKey */
- JSTypeMax=18,
- /** larger than all other types */
- MaxKey=127
-};
+ /**
+ the complete list of valid BSON types
+ see also bsonspec.org
+ */
+ enum BSONType {
+ /** smaller than all other types */
+ MinKey=-1,
+ /** end of object */
+ EOO=0,
+ /** double precision floating point value */
+ NumberDouble=1,
+ /** character string, stored in utf8 */
+ String=2,
+ /** an embedded object */
+ Object=3,
+ /** an embedded array */
+ Array=4,
+ /** binary data */
+ BinData=5,
+ /** Undefined type */
+ Undefined=6,
+ /** ObjectId */
+ jstOID=7,
+ /** boolean type */
+ Bool=8,
+ /** date type */
+ Date=9,
+ /** null type */
+ jstNULL=10,
+ /** regular expression, a pattern with options */
+ RegEx=11,
+ /** deprecated / will be redesigned */
+ DBRef=12,
+ /** deprecated / use CodeWScope */
+ Code=13,
+ /** a programming language (e.g., Python) symbol */
+ Symbol=14,
+ /** javascript code that can execute on the database server, with SavedContext */
+ CodeWScope=15,
+ /** 32 bit signed integer */
+ NumberInt = 16,
+ /** Updated to a Date with value next OpTime on insert */
+ Timestamp = 17,
+ /** 64 bit integer */
+ NumberLong = 18,
+ /** max type that is not MaxKey */
+ JSTypeMax=18,
+ /** larger than all other types */
+ MaxKey=127
+ };
/* subtypes of BinData.
bdtCustom and above are ones that the JS compiler understands, but are
opaque to the database.
*/
- enum BinDataType {
+ enum BinDataType {
BinDataGeneral=0,
- Function=1,
+ Function=1,
ByteArrayDeprecated=2, /* use BinGeneral instead */
- bdtUUID = 3,
- MD5Type=5,
- bdtCustom=128
+ bdtUUID = 3,
+ MD5Type=5,
+ bdtCustom=128
};
-
+
}
diff --git a/bson/inline_decls.h b/bson/inline_decls.h
index aab9810a9c3..1605611687f 100644
--- a/bson/inline_decls.h
+++ b/bson/inline_decls.h
@@ -26,7 +26,7 @@
#define NOINLINE_DECL __declspec(noinline)
-#else
+#else
#define NOINLINE_DECL
diff --git a/bson/oid.cpp b/bson/oid.cpp
index 280697832d1..4cb624e808e 100644
--- a/bson/oid.cpp
+++ b/bson/oid.cpp
@@ -27,7 +27,7 @@ namespace mongo {
// machine # before folding in the process id
OID::MachineAndPid OID::ourMachine;
- unsigned OID::ourPid() {
+ unsigned OID::ourPid() {
unsigned pid;
#if defined(_WIN32)
pid = (unsigned short) GetCurrentProcessId();
@@ -39,7 +39,7 @@ namespace mongo {
return pid;
}
- void OID::foldInPid(OID::MachineAndPid& x) {
+ void OID::foldInPid(OID::MachineAndPid& x) {
unsigned p = ourPid();
x._pid ^= (unsigned short) p;
// when the pid is greater than 16 bits, let the high bits modulate the machine id field.
@@ -47,10 +47,10 @@ namespace mongo {
rest ^= p >> 16;
}
- OID::MachineAndPid OID::genMachineAndPid() {
+ OID::MachineAndPid OID::genMachineAndPid() {
BOOST_STATIC_ASSERT( sizeof(mongo::OID::MachineAndPid) == 5 );
- // this is not called often, so the following is not expensive, and gives us some
+ // this is not called often, so the following is not expensive, and gives us some
// testing that nonce generation is working right and that our OIDs are (perhaps) ok.
{
nonce a = security.getNonce();
@@ -68,15 +68,15 @@ namespace mongo {
// after folding in the process id
OID::MachineAndPid OID::ourMachineAndPid = OID::genMachineAndPid();
- void OID::regenMachineId() {
+ void OID::regenMachineId() {
ourMachineAndPid = genMachineAndPid();
}
- inline bool OID::MachineAndPid::operator!=(const OID::MachineAndPid& rhs) const {
+ inline bool OID::MachineAndPid::operator!=(const OID::MachineAndPid& rhs) const {
return _pid != rhs._pid || _machineNumber != rhs._machineNumber;
}
- unsigned OID::getMachineId() {
+ unsigned OID::getMachineId() {
unsigned char x[4];
x[0] = ourMachineAndPid._machineNumber[0];
x[1] = ourMachineAndPid._machineNumber[1];
@@ -87,21 +87,21 @@ namespace mongo {
void OID::justForked() {
MachineAndPid x = ourMachine;
- // we let the random # for machine go into all 5 bytes of MachineAndPid, and then
+ // we let the random # for machine go into all 5 bytes of MachineAndPid, and then
// xor in the pid into _pid. this reduces the probability of collisions.
foldInPid(x);
ourMachineAndPid = genMachineAndPid();
assert( x != ourMachineAndPid );
ourMachineAndPid = x;
}
-
+
void OID::init() {
static AtomicUInt inc = (unsigned) security.getNonce();
{
unsigned t = (unsigned) time(0);
unsigned char *T = (unsigned char *) &t;
- _time[0] = T[3]; // big endian order because we use memcmp() to compare OID's
+ _time[0] = T[3]; // big endian order because we use memcmp() to compare OID's
_time[1] = T[2];
_time[2] = T[1];
_time[3] = T[0];
@@ -118,7 +118,7 @@ namespace mongo {
}
}
- void OID::init( string s ){
+ void OID::init( string s ) {
assert( s.size() == 24 );
const char *p = s.c_str();
for( int i = 0; i < 12; i++ ) {
@@ -127,7 +127,7 @@ namespace mongo {
}
}
- void OID::init(Date_t date, bool max){
+ void OID::init(Date_t date, bool max) {
int time = (int) (date / 1000);
char* T = (char *) &time;
data[0] = T[3];
@@ -141,7 +141,7 @@ namespace mongo {
*(long long*)(data + 4) = 0x0000000000000000ll;
}
- time_t OID::asTimeT(){
+ time_t OID::asTimeT() {
int time;
char* T = (char *) &time;
T[0] = data[3];
diff --git a/bson/oid.h b/bson/oid.h
index fdf974b3064..401460c268d 100644
--- a/bson/oid.h
+++ b/bson/oid.h
@@ -22,18 +22,18 @@
namespace mongo {
#pragma pack(1)
- /** Object ID type.
- BSON objects typically have an _id field for the object id. This field should be the first
- member of the object when present. class OID is a special type that is a 12 byte id which
+ /** Object ID type.
+ BSON objects typically have an _id field for the object id. This field should be the first
+ member of the object when present. class OID is a special type that is a 12 byte id which
is likely to be unique to the system. You may also use other types for _id's.
- When _id field is missing from a BSON object, on an insert the database may insert one
+ When _id field is missing from a BSON object, on an insert the database may insert one
automatically in certain circumstances.
Warning: You must call OID::newState() after a fork().
- Typical contents of the BSON ObjectID is a 12-byte value consisting of a 4-byte timestamp (seconds since epoch),
- a 3-byte machine id, a 2-byte process id, and a 3-byte counter. Note that the timestamp and counter fields must
- be stored big endian unlike the rest of BSON. This is because they are compared byte-by-byte and we want to ensure
+ Typical contents of the BSON ObjectID is a 12-byte value consisting of a 4-byte timestamp (seconds since epoch),
+ a 3-byte machine id, a 2-byte process id, and a 3-byte counter. Note that the timestamp and counter fields must
+ be stored big endian unlike the rest of BSON. This is because they are compared byte-by-byte and we want to ensure
a mostly increasing order.
*/
class OID {
@@ -43,8 +43,8 @@ namespace mongo {
/** init from a 24 char hex string */
explicit OID(const string &s) { init(s); }
- /** initialize to 'null' */
- void clear() { a = 0; b = 0; }
+ /** initialize to 'null' */
+ void clear() { a = 0; b = 0; }
const unsigned char *getData() const { return data; }
@@ -59,7 +59,7 @@ namespace mongo {
string toString() const { return str(); }
static OID gen() { OID o; o.init(); return o; }
-
+
/** sets the contents to a new oid / randomized value */
void init();
@@ -71,9 +71,9 @@ namespace mongo {
time_t asTimeT();
Date_t asDateT() { return asTimeT() * (long long)1000; }
-
+
bool isSet() const { return a || b; }
-
+
/** call this after a fork to update the process id */
static void justForked();
@@ -81,7 +81,7 @@ namespace mongo {
static void regenMachineId(); // used by unit tests
private:
- struct MachineAndPid {
+ struct MachineAndPid {
unsigned char _machineNumber[3];
unsigned short _pid;
bool operator!=(const OID::MachineAndPid& rhs) const;
diff --git a/bson/ordering.h b/bson/ordering.h
index fbbfbece5e3..749e20dd4ea 100644
--- a/bson/ordering.h
+++ b/bson/ordering.h
@@ -23,7 +23,7 @@ namespace mongo {
The constructor is private to make conversion more explicit so we notice where we call make().
Over time we should push this up higher and higher.
*/
- class Ordering {
+ class Ordering {
const unsigned bits;
const unsigned nkeys;
Ordering(unsigned b,unsigned n) : bits(b),nkeys(n) { }
@@ -32,13 +32,13 @@ namespace mongo {
get(0) == 1
get(1) == -1
*/
- int get(int i) const {
+ int get(int i) const {
return ((1 << i) & bits) ? -1 : 1;
}
// for woCompare...
unsigned descending(unsigned mask) const { return bits & mask; }
-
+
operator string() const {
StringBuilder buf(32);
for ( unsigned i=0; i<nkeys; i++)
@@ -50,7 +50,7 @@ namespace mongo {
unsigned b = 0;
BSONObjIterator k(obj);
unsigned n = 0;
- while( 1 ) {
+ while( 1 ) {
BSONElement e = k.next();
if( e.eoo() )
break;
@@ -62,5 +62,5 @@ namespace mongo {
return Ordering(b,n);
}
};
-
+
}
diff --git a/bson/stringdata.h b/bson/stringdata.h
index 0c3dae415b1..46cdb7adbe0 100644
--- a/bson/stringdata.h
+++ b/bson/stringdata.h
@@ -31,18 +31,18 @@ namespace mongo {
// go around the fact that string literals in C++ are char[N]'s.
//
// Note that the object StringData wraps around must be alive while the StringDAta
- // is.
+ // is.
class StringData {
public:
// Construct a StringData explicilty, for the case where the lenght of
// string is not known. 'c' must be a pointer to a null-terminated string.
- StringData( const char* c )
+ StringData( const char* c )
: _data(c), _size((unsigned) strlen(c)) {}
- // Construct a StringData explicitly, for the case where the length of the string
- // is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
- // must be the length that std::strlen(c) would return, a.k.a the index of the
+ // Construct a StringData explicitly, for the case where the length of the string
+ // is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
+ // must be the length that std::strlen(c) would return, a.k.a the index of the
// terminator in c.
StringData( const char* c, size_t strlenOfc )
: _data(c), _size((unsigned) strlenOfc) {}
@@ -50,7 +50,7 @@ namespace mongo {
// Construct a StringData explicitly, for the case of a std::string.
StringData( const string& s )
: _data(s.c_str()), _size((unsigned) s.size()) {}
-
+
// Construct a StringData explicitly, for the case of a literal whose size is
// known at compile time.
struct LiteralTag {};
diff --git a/bson/util/atomic_int.h b/bson/util/atomic_int.h
index d8169da740e..15735524aa2 100644
--- a/bson/util/atomic_int.h
+++ b/bson/util/atomic_int.h
@@ -24,7 +24,7 @@
namespace mongo {
- struct AtomicUInt{
+ struct AtomicUInt {
AtomicUInt() : x(0) {}
AtomicUInt(unsigned z) : x(z) { }
@@ -35,44 +35,44 @@ namespace mongo {
inline AtomicUInt operator++(int);// postfix++
inline AtomicUInt operator--(); // --prefix
inline AtomicUInt operator--(int); // postfix--
-
+
inline void zero() { x = 0; } // TODO: this isn't thread safe
-
+
volatile unsigned x;
};
#if defined(_WIN32)
- AtomicUInt AtomicUInt::operator++(){
+ AtomicUInt AtomicUInt::operator++() {
// InterlockedIncrement returns the new value
return InterlockedIncrement((volatile long*)&x); //long is 32bits in Win64
}
- AtomicUInt AtomicUInt::operator++(int){
+ AtomicUInt AtomicUInt::operator++(int) {
return InterlockedIncrement((volatile long*)&x)-1;
}
- AtomicUInt AtomicUInt::operator--(){
+ AtomicUInt AtomicUInt::operator--() {
return InterlockedDecrement((volatile long*)&x);
}
- AtomicUInt AtomicUInt::operator--(int){
+ AtomicUInt AtomicUInt::operator--(int) {
return InterlockedDecrement((volatile long*)&x)+1;
}
#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
// this is in GCC >= 4.1
- AtomicUInt AtomicUInt::operator++(){
+ AtomicUInt AtomicUInt::operator++() {
return __sync_add_and_fetch(&x, 1);
}
- AtomicUInt AtomicUInt::operator++(int){
+ AtomicUInt AtomicUInt::operator++(int) {
return __sync_fetch_and_add(&x, 1);
}
- AtomicUInt AtomicUInt::operator--(){
+ AtomicUInt AtomicUInt::operator--() {
return __sync_add_and_fetch(&x, -1);
}
- AtomicUInt AtomicUInt::operator--(int){
+ AtomicUInt AtomicUInt::operator--(int) {
return __sync_fetch_and_add(&x, -1);
}
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
// from boost 1.39 interprocess/detail/atomic.hpp
- inline unsigned atomic_int_helper(volatile unsigned *x, int val){
+ inline unsigned atomic_int_helper(volatile unsigned *x, int val) {
int r;
asm volatile
(
@@ -84,16 +84,16 @@ namespace mongo {
);
return r;
}
- AtomicUInt AtomicUInt::operator++(){
+ AtomicUInt AtomicUInt::operator++() {
return atomic_int_helper(&x, 1)+1;
}
- AtomicUInt AtomicUInt::operator++(int){
+ AtomicUInt AtomicUInt::operator++(int) {
return atomic_int_helper(&x, 1);
}
- AtomicUInt AtomicUInt::operator--(){
+ AtomicUInt AtomicUInt::operator--() {
return atomic_int_helper(&x, -1)-1;
}
- AtomicUInt AtomicUInt::operator--(int){
+ AtomicUInt AtomicUInt::operator--(int) {
return atomic_int_helper(&x, -1);
}
#else
diff --git a/bson/util/builder.h b/bson/util/builder.h
index 169f137fb02..6f4ff9eabd3 100644
--- a/bson/util/builder.h
+++ b/bson/util/builder.h
@@ -42,9 +42,9 @@ namespace mongo {
is slightly larger than a user object for example.
*/
const int BSONObjMaxInternalSize = BSONObjMaxUserSize + ( 16 * 1024 );
-
+
const int BufferMaxSize = 64 * 1024 * 1024;
-
+
class StringBuilder;
void msgasserted(int msgid, const char *msg);
@@ -56,7 +56,8 @@ namespace mongo {
data = (char *) malloc(size);
if( data == 0 )
msgasserted(10000, "out of memory BufBuilder");
- } else {
+ }
+ else {
data = 0;
}
l = 0;
@@ -72,16 +73,16 @@ namespace mongo {
}
}
- void reset( int maxSize = 0 ){
+ void reset( int maxSize = 0 ) {
l = 0;
- if ( maxSize && size > maxSize ){
+ if ( maxSize && size > maxSize ) {
free(data);
data = (char*)malloc(maxSize);
size = maxSize;
- }
+ }
}
- /** leave room for some stuff later
+ /** leave room for some stuff later
@return point to region that was skipped. pointer may change later (on realloc), so for immediate use only
*/
char* skip(int n) { return grow(n); }
@@ -93,10 +94,10 @@ namespace mongo {
/* assume ownership of the buffer - you must then free() it */
void decouple() { data = 0; }
- void appendChar(char j){
+ void appendChar(char j) {
*((char*)grow(sizeof(char))) = j;
}
- void appendNum(char j){
+ void appendNum(char j) {
*((char*)grow(sizeof(char))) = j;
}
void appendNum(short j) {
@@ -126,7 +127,7 @@ namespace mongo {
}
template<class T>
- void appendStruct(const T& s) {
+ void appendStruct(const T& s) {
appendBuf(&s, sizeof(T));
}
@@ -151,7 +152,7 @@ namespace mongo {
private:
/* "slow" portion of 'grow()' */
- void NOINLINE_DECL grow_reallocate(){
+ void NOINLINE_DECL grow_reallocate() {
int a = size * 2;
if ( a == 0 )
a = 512;
@@ -180,45 +181,45 @@ namespace mongo {
class StringBuilder {
public:
StringBuilder( int initsize=256 )
- : _buf( initsize ){
+ : _buf( initsize ) {
}
- StringBuilder& operator<<( double x ){
+ StringBuilder& operator<<( double x ) {
return SBNUM( x , 25 , "%g" );
}
- StringBuilder& operator<<( int x ){
+ StringBuilder& operator<<( int x ) {
return SBNUM( x , 11 , "%d" );
}
- StringBuilder& operator<<( unsigned x ){
+ StringBuilder& operator<<( unsigned x ) {
return SBNUM( x , 11 , "%u" );
}
- StringBuilder& operator<<( long x ){
+ StringBuilder& operator<<( long x ) {
return SBNUM( x , 22 , "%ld" );
}
- StringBuilder& operator<<( unsigned long x ){
+ StringBuilder& operator<<( unsigned long x ) {
return SBNUM( x , 22 , "%lu" );
}
- StringBuilder& operator<<( long long x ){
+ StringBuilder& operator<<( long long x ) {
return SBNUM( x , 22 , "%lld" );
}
- StringBuilder& operator<<( unsigned long long x ){
+ StringBuilder& operator<<( unsigned long long x ) {
return SBNUM( x , 22 , "%llu" );
}
- StringBuilder& operator<<( short x ){
+ StringBuilder& operator<<( short x ) {
return SBNUM( x , 8 , "%hd" );
}
- StringBuilder& operator<<( char c ){
+ StringBuilder& operator<<( char c ) {
_buf.grow( 1 )[0] = c;
return *this;
}
- void appendDoubleNice( double x ){
+ void appendDoubleNice( double x ) {
int prev = _buf.l;
char * start = _buf.grow( 32 );
int z = sprintf( start , "%.16g" , x );
assert( z >= 0 );
_buf.l = prev + z;
- if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ){
+ if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ) {
write( ".0" , 2 );
}
}
@@ -227,13 +228,13 @@ namespace mongo {
void append( const StringData& str ) { memcpy( _buf.grow( str.size() ) , str.data() , str.size() ); }
- StringBuilder& operator<<( const StringData& str ){
+ StringBuilder& operator<<( const StringData& str ) {
append( str );
return *this;
}
-
+
void reset( int maxSize = 0 ) { _buf.reset( maxSize ); }
-
+
std::string str() const { return std::string(_buf.data, _buf.l); }
private:
@@ -249,7 +250,7 @@ namespace mongo {
int z = sprintf( _buf.grow(maxSize) , macro , (val) );
assert( z >= 0 );
_buf.l = prev + z;
- return *this;
+ return *this;
}
};
diff --git a/bson/util/misc.h b/bson/util/misc.h
index cad9a284458..b31f36f49ac 100644
--- a/bson/util/misc.h
+++ b/bson/util/misc.h
@@ -34,7 +34,7 @@ namespace mongo {
buf[24] = 0; // don't want the \n
}
- inline string time_t_to_String(time_t t = time(0) ){
+ inline string time_t_to_String(time_t t = time(0) ) {
char buf[64];
#if defined(_WIN32)
ctime_s(buf, sizeof(buf), &t);
@@ -76,7 +76,7 @@ namespace mongo {
Date_t(unsigned long long m): millis(m) {}
operator unsigned long long&() { return millis; }
operator const unsigned long long&() const { return millis; }
- string toString() const {
+ string toString() const {
char buf[64];
time_t_to_String(millis/1000, buf);
return buf;
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index c744c862e81..0c1d7603715 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -37,12 +37,12 @@ namespace mongo {
out() << "exiting" << endl;
::exit( returnCode );
}
-
- bool inShutdown(){
+
+ bool inShutdown() {
return dbexitCalled;
}
- void setupSignals(){
+ void setupSignals() {
// maybe should do SIGPIPE here, not sure
}
@@ -50,20 +50,20 @@ namespace mongo {
return "in client only mode";
}
- bool haveLocalShardingInfo( const string& ns ){
+ bool haveLocalShardingInfo( const string& ns ) {
return false;
}
- DBClientBase * createDirectClient(){
+ DBClientBase * createDirectClient() {
uassert( 10256 , "no createDirectClient in clientOnly" , 0 );
return 0;
}
- void Shard::getAllShards( vector<Shard>& all ){
+ void Shard::getAllShards( vector<Shard>& all ) {
assert(0);
}
- bool Shard::isAShard( const string& ident ){
+ bool Shard::isAShard( const string& ident ) {
assert(0);
return false;
}
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 25098190e85..dce6b852759 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -27,9 +27,9 @@
namespace mongo {
// ------ PoolForHost ------
-
- PoolForHost::~PoolForHost(){
- while ( ! _pool.empty() ){
+
+ PoolForHost::~PoolForHost() {
+ while ( ! _pool.empty() ) {
StoredConnection sc = _pool.top();
delete sc.conn;
_pool.pop();
@@ -39,64 +39,64 @@ namespace mongo {
void PoolForHost::done( DBClientBase * c ) {
_pool.push(c);
}
-
+
DBClientBase * PoolForHost::get() {
-
+
time_t now = time(0);
-
- while ( ! _pool.empty() ){
+
+ while ( ! _pool.empty() ) {
StoredConnection sc = _pool.top();
_pool.pop();
if ( sc.ok( now ) )
return sc.conn;
delete sc.conn;
}
-
+
return NULL;
}
-
+
void PoolForHost::flush() {
vector<StoredConnection> all;
- while ( ! _pool.empty() ){
+ while ( ! _pool.empty() ) {
StoredConnection c = _pool.top();
_pool.pop();
all.push_back( c );
bool res;
c.conn->isMaster( res );
}
-
- for ( vector<StoredConnection>::iterator i=all.begin(); i != all.end(); ++i ){
+
+ for ( vector<StoredConnection>::iterator i=all.begin(); i != all.end(); ++i ) {
_pool.push( *i );
}
}
- PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ){
+ PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ) {
conn = c;
when = time(0);
}
- bool PoolForHost::StoredConnection::ok( time_t now ){
+ bool PoolForHost::StoredConnection::ok( time_t now ) {
// if connection has been idle for an hour, kill it
return ( now - when ) < 3600;
}
- void PoolForHost::createdOne( DBClientBase * base){
+ void PoolForHost::createdOne( DBClientBase * base) {
if ( _created == 0 )
_type = base->type();
- _created++;
+ _created++;
}
// ------ DBConnectionPool ------
DBConnectionPool pool;
-
+
DBClientBase* DBConnectionPool::_get(const string& ident) {
scoped_lock L(_mutex);
PoolForHost& p = _pools[ident];
return p.get();
}
- DBClientBase* DBConnectionPool::_finishCreate( const string& host , DBClientBase* conn ){
+ DBClientBase* DBConnectionPool::_finishCreate( const string& host , DBClientBase* conn ) {
{
scoped_lock L(_mutex);
PoolForHost& p = _pools[host];
@@ -105,85 +105,85 @@ namespace mongo {
onCreate( conn );
onHandedOut( conn );
-
+
return conn;
}
DBClientBase* DBConnectionPool::get(const ConnectionString& url) {
DBClientBase * c = _get( url.toString() );
- if ( c ){
+ if ( c ) {
onHandedOut( c );
return c;
}
-
+
string errmsg;
c = url.connect( errmsg );
uassert( 13328 , _name + ": connect failed " + url.toString() + " : " + errmsg , c );
-
+
return _finishCreate( url.toString() , c );
}
-
+
DBClientBase* DBConnectionPool::get(const string& host) {
DBClientBase * c = _get( host );
- if ( c ){
+ if ( c ) {
onHandedOut( c );
return c;
}
-
+
string errmsg;
ConnectionString cs = ConnectionString::parse( host , errmsg );
uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
-
+
c = cs.connect( errmsg );
uassert( 11002 , _name + ": connect failed " + host + " : " + errmsg , c );
return _finishCreate( host , c );
}
- DBConnectionPool::~DBConnectionPool(){
+ DBConnectionPool::~DBConnectionPool() {
// connection closing is handled by ~PoolForHost
}
- void DBConnectionPool::flush(){
+ void DBConnectionPool::flush() {
scoped_lock L(_mutex);
- for ( map<string,PoolForHost>::iterator i = _pools.begin(); i != _pools.end(); i++ ){
+ for ( map<string,PoolForHost>::iterator i = _pools.begin(); i != _pools.end(); i++ ) {
PoolForHost& p = i->second;
p.flush();
}
}
- void DBConnectionPool::addHook( DBConnectionHook * hook ){
+ void DBConnectionPool::addHook( DBConnectionHook * hook ) {
_hooks.push_back( hook );
}
- void DBConnectionPool::onCreate( DBClientBase * conn ){
+ void DBConnectionPool::onCreate( DBClientBase * conn ) {
if ( _hooks.size() == 0 )
return;
-
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ){
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
(*i)->onCreate( conn );
}
}
- void DBConnectionPool::onHandedOut( DBClientBase * conn ){
+ void DBConnectionPool::onHandedOut( DBClientBase * conn ) {
if ( _hooks.size() == 0 )
return;
-
- for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ){
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks.begin(); i != _hooks.end(); i++ ) {
(*i)->onHandedOut( conn );
}
}
- void DBConnectionPool::appendInfo( BSONObjBuilder& b ){
+ void DBConnectionPool::appendInfo( BSONObjBuilder& b ) {
BSONObjBuilder bb( b.subobjStart( "hosts" ) );
int avail = 0;
long long created = 0;
-
-
+
+
map<ConnectionString::ConnectionType,long long> createdByType;
{
scoped_lock lk( _mutex );
- for ( map<string,PoolForHost>::iterator i=_pools.begin(); i!=_pools.end(); ++i ){
+ for ( map<string,PoolForHost>::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
string s = i->first;
BSONObjBuilder temp( bb.subobjStart( s ) );
temp.append( "available" , i->second.numAvailable() );
@@ -198,10 +198,10 @@ namespace mongo {
}
}
bb.done();
-
+
{
BSONObjBuilder temp( bb.subobjStart( "createdByType" ) );
- for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ){
+ for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ) {
temp.appendNumber( ConnectionString::typeToString( i->first ) , i->second );
}
temp.done();
@@ -211,15 +211,15 @@ namespace mongo {
b.appendNumber( "totalCreated" , created );
}
- ScopedDbConnection * ScopedDbConnection::steal(){
+ ScopedDbConnection * ScopedDbConnection::steal() {
assert( _conn );
ScopedDbConnection * n = new ScopedDbConnection( _host , _conn );
_conn = 0;
return n;
}
-
+
ScopedDbConnection::~ScopedDbConnection() {
- if ( _conn ){
+ if ( _conn ) {
if ( ! _conn->isFailed() ) {
/* see done() comments above for why we log this line */
log() << "~ScopedDbConnection: _conn != null" << endl;
@@ -229,20 +229,20 @@ namespace mongo {
}
ScopedDbConnection::ScopedDbConnection(const Shard& shard )
- : _host( shard.getConnString() ) , _conn( pool.get(_host) ){
+ : _host( shard.getConnString() ) , _conn( pool.get(_host) ) {
}
-
+
ScopedDbConnection::ScopedDbConnection(const Shard* shard )
- : _host( shard->getConnString() ) , _conn( pool.get(_host) ){
+ : _host( shard->getConnString() ) , _conn( pool.get(_host) ) {
}
class PoolFlushCmd : public Command {
public:
- PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ){}
+ PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
virtual void help( stringstream &help ) const { help<<"internal"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
+ virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.flush();
return true;
}
@@ -254,10 +254,10 @@ namespace mongo {
class PoolStats : public Command {
public:
- PoolStats() : Command( "connPoolStats" ){}
+ PoolStats() : Command( "connPoolStats" ) {}
virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool){
+ virtual bool run(const string&, mongo::BSONObj&, std::string&, mongo::BSONObjBuilder& result, bool) {
pool.appendInfo( result );
result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
diff --git a/client/connpool.h b/client/connpool.h
index c2673a44154..4f0245cbee0 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -24,7 +24,7 @@
namespace mongo {
class Shard;
-
+
/**
* not thread safe
* thread safety is handled by DBConnectionPool
@@ -32,9 +32,9 @@ namespace mongo {
class PoolForHost {
public:
PoolForHost()
- : _created(0){}
-
- PoolForHost( const PoolForHost& other ){
+ : _created(0) {}
+
+ PoolForHost( const PoolForHost& other ) {
assert(other._pool.size() == 0);
_created = other._created;
assert( _created == 0 );
@@ -49,16 +49,16 @@ namespace mongo {
ConnectionString::ConnectionType type() const { assert(_created); return _type; }
- /**
+ /**
* gets a connection or return NULL
*/
DBClientBase * get();
-
+
void done( DBClientBase * c );
-
+
void flush();
private:
-
+
struct StoredConnection {
StoredConnection( DBClientBase * c );
@@ -72,24 +72,24 @@ namespace mongo {
long long _created;
ConnectionString::ConnectionType _type;
};
-
+
class DBConnectionHook {
public:
- virtual ~DBConnectionHook(){}
- virtual void onCreate( DBClientBase * conn ){}
- virtual void onHandedOut( DBClientBase * conn ){}
+ virtual ~DBConnectionHook() {}
+ virtual void onCreate( DBClientBase * conn ) {}
+ virtual void onHandedOut( DBClientBase * conn ) {}
};
/** Database connection pool.
Generally, use ScopedDbConnection and do not call these directly.
- This class, so far, is suitable for use with unauthenticated connections.
- Support for authenticated connections requires some adjustements: please
+ This class, so far, is suitable for use with unauthenticated connections.
+ Support for authenticated connections requires some adjustements: please
request...
Usage:
-
+
{
ScopedDbConnection c("myserver");
c.conn()...
@@ -100,15 +100,15 @@ namespace mongo {
map<string,PoolForHost> _pools; // servername -> pool
list<DBConnectionHook*> _hooks;
string _name;
-
+
DBClientBase* _get( const string& ident );
-
+
DBClientBase* _finishCreate( const string& ident , DBClientBase* conn );
- public:
+ public:
DBConnectionPool() : _mutex("DBConnectionPool") , _name( "dbconnectionpool" ) { }
~DBConnectionPool();
-
+
/** right now just controls some asserts. defaults to "dbconnectionpool" */
void setName( const string& name ) { _name = name; }
@@ -121,7 +121,7 @@ namespace mongo {
DBClientBase *get(const ConnectionString& host);
void release(const string& host, DBClientBase *c) {
- if ( c->isFailed() ){
+ if ( c->isFailed() ) {
delete c;
return;
}
@@ -131,7 +131,7 @@ namespace mongo {
void addHook( DBConnectionHook * hook );
void appendInfo( BSONObjBuilder& b );
};
-
+
extern DBConnectionPool pool;
class AScopedConnection : boost::noncopyable {
@@ -152,21 +152,21 @@ namespace mongo {
};
/** Use to get a connection from the pool. On exceptions things
- clean up nicely (i.e. the socket gets closed automatically when the
+ clean up nicely (i.e. the socket gets closed automatically when the
scopeddbconnection goes out of scope).
*/
class ScopedDbConnection : public AScopedConnection {
public:
/** the main constructor you want to use
- throws UserException if can't connect
+ throws UserException if can't connect
*/
explicit ScopedDbConnection(const string& host) : _host(host), _conn( pool.get(host) ) {}
-
+
ScopedDbConnection() : _host( "" ) , _conn(0) {}
/* @param conn - bind to an existing connection */
ScopedDbConnection(const string& host, DBClientBase* conn ) : _host( host ) , _conn( conn ) {}
-
+
/** throws UserException if can't connect */
explicit ScopedDbConnection(const ConnectionString& url ) : _host(url.toString()), _conn( pool.get(url) ) {}
@@ -177,11 +177,11 @@ namespace mongo {
~ScopedDbConnection();
/** get the associated connection object */
- DBClientBase* operator->(){
+ DBClientBase* operator->() {
uassert( 11004 , "connection was returned to the pool already" , _conn );
- return _conn;
+ return _conn;
}
-
+
/** get the associated connection object */
DBClientBase& conn() {
uassert( 11005 , "connection was returned to the pool already" , _conn );
@@ -193,7 +193,7 @@ namespace mongo {
uassert( 13102 , "connection was returned to the pool already" , _conn );
return _conn;
}
-
+
string getHost() const { return _host; }
/** Force closure of the connection. You should call this if you leave it in
@@ -205,8 +205,8 @@ namespace mongo {
}
/** Call this when you are done with the connection.
-
- If you do not call done() before this object goes out of scope,
+
+ If you do not call done() before this object goes out of scope,
we can't be sure we fully read all expected data of a reply on the socket. so
we don't try to reuse the connection in that situation.
*/
@@ -214,7 +214,7 @@ namespace mongo {
if ( ! _conn )
return;
- /* we could do this, but instead of assume one is using autoreconnect mode on the connection
+ /* we could do this, but instead of assume one is using autoreconnect mode on the connection
if ( _conn->isFailed() )
kill();
else
@@ -222,7 +222,7 @@ namespace mongo {
pool.release(_host, _conn);
_conn = 0;
}
-
+
ScopedDbConnection * steal();
private:
diff --git a/client/constants.h b/client/constants.h
index 66aa9b1516f..54f3fd216f2 100644
--- a/client/constants.h
+++ b/client/constants.h
@@ -2,22 +2,22 @@
#pragma once
-namespace mongo {
+namespace mongo {
/* query results include a 32 result flag word consisting of these bits */
enum ResultFlagType {
- /* returned, with zero results, when getMore is called but the cursor id
+ /* returned, with zero results, when getMore is called but the cursor id
is not valid at the server. */
- ResultFlag_CursorNotFound = 1,
-
+ ResultFlag_CursorNotFound = 1,
+
/* { $err : ... } is being returned */
- ResultFlag_ErrSet = 2,
-
+ ResultFlag_ErrSet = 2,
+
/* Have to update config from the server, usually $err is also set */
- ResultFlag_ShardConfigStale = 4,
-
- /* for backward compatability: this let's us know the server supports
- the QueryOption_AwaitData option. if it doesn't, a repl slave client should sleep
+ ResultFlag_ShardConfigStale = 4,
+
+ /* for backward compatability: this let's us know the server supports
+ the QueryOption_AwaitData option. if it doesn't, a repl slave client should sleep
a little between getMore's.
*/
ResultFlag_AwaitCapable = 8
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index 8b768b94cff..e30a9bbbab7 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -32,7 +32,7 @@
namespace mongo {
DBClientBase* ConnectionString::connect( string& errmsg ) const {
- switch ( _type ){
+ switch ( _type ) {
case MASTER: {
DBClientConnection * c = new DBClientConnection(true);
log(1) << "creating new connection to:" << _servers[0] << endl;
@@ -42,11 +42,11 @@ namespace mongo {
}
return c;
}
-
- case PAIR:
+
+ case PAIR:
case SET: {
DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers );
- if( ! set->connect() ){
+ if( ! set->connect() ) {
delete set;
errmsg = "connect failed to set ";
errmsg += toString();
@@ -54,7 +54,7 @@ namespace mongo {
}
return set;
}
-
+
case SYNC: {
// TODO , don't copy
list<HostAndPort> l;
@@ -62,41 +62,41 @@ namespace mongo {
l.push_back( _servers[i] );
return new SyncClusterConnection( l );
}
-
+
case INVALID:
throw UserException( 13421 , "trying to connect to invalid ConnectionString" );
break;
}
-
+
assert( 0 );
return 0;
}
- ConnectionString ConnectionString::parse( const string& host , string& errmsg ){
-
+ ConnectionString ConnectionString::parse( const string& host , string& errmsg ) {
+
string::size_type i = host.find( '/' );
- if ( i != string::npos && i != 0){
+ if ( i != string::npos && i != 0) {
// replica set
return ConnectionString( SET , host.substr( i + 1 ) , host.substr( 0 , i ) );
}
int numCommas = str::count( host , ',' );
-
- if( numCommas == 0 )
+
+ if( numCommas == 0 )
return ConnectionString( HostAndPort( host ) );
-
- if ( numCommas == 1 )
+
+ if ( numCommas == 1 )
return ConnectionString( PAIR , host );
if ( numCommas == 2 )
return ConnectionString( SYNC , host );
-
+
errmsg = (string)"invalid hostname [" + host + "]";
return ConnectionString(); // INVALID
}
- string ConnectionString::typeToString( ConnectionType type ){
- switch ( type ){
+ string ConnectionString::typeToString( ConnectionType type ) {
+ switch ( type ) {
case INVALID:
return "invalid";
case MASTER:
@@ -111,9 +111,9 @@ namespace mongo {
assert(0);
return "";
}
-
- Query& Query::where(const string &jscode, BSONObj scope) {
+
+ Query& Query::where(const string &jscode, BSONObj scope) {
/* use where() before sort() and hint() and explain(), else this will assert. */
assert( ! isComplex() );
BSONObjBuilder b;
@@ -131,44 +131,44 @@ namespace mongo {
obj = b.obj();
}
- Query& Query::sort(const BSONObj& s) {
+ Query& Query::sort(const BSONObj& s) {
appendComplex( "orderby", s );
- return *this;
+ return *this;
}
Query& Query::hint(BSONObj keyPattern) {
appendComplex( "$hint", keyPattern );
- return *this;
+ return *this;
}
Query& Query::explain() {
appendComplex( "$explain", true );
- return *this;
+ return *this;
}
-
+
Query& Query::snapshot() {
appendComplex( "$snapshot", true );
- return *this;
+ return *this;
}
-
+
Query& Query::minKey( const BSONObj &val ) {
appendComplex( "$min", val );
- return *this;
+ return *this;
}
Query& Query::maxKey( const BSONObj &val ) {
appendComplex( "$max", val );
- return *this;
+ return *this;
}
- bool Query::isComplex( bool * hasDollar ) const{
- if ( obj.hasElement( "query" ) ){
+ bool Query::isComplex( bool * hasDollar ) const {
+ if ( obj.hasElement( "query" ) ) {
if ( hasDollar )
hasDollar[0] = false;
return true;
}
- if ( obj.hasElement( "$query" ) ){
+ if ( obj.hasElement( "$query" ) ) {
if ( hasDollar )
hasDollar[0] = true;
return true;
@@ -176,12 +176,12 @@ namespace mongo {
return false;
}
-
+
BSONObj Query::getFilter() const {
bool hasDollar;
if ( ! isComplex( &hasDollar ) )
return obj;
-
+
return obj.getObjectField( hasDollar ? "$query" : "query" );
}
BSONObj Query::getSort() const {
@@ -200,8 +200,8 @@ namespace mongo {
bool Query::isExplain() const {
return isComplex() && obj.getBoolField( "$explain" );
}
-
- string Query::toString() const{
+
+ string Query::toString() const {
return obj.toString();
}
@@ -221,7 +221,7 @@ namespace mongo {
}
return _cachedAvailableOptions;
}
-
+
inline bool DBClientWithCommands::runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options) {
string ns = dbname + ".$cmd";
info = findOne(ns, cmd, 0 , options);
@@ -240,7 +240,7 @@ namespace mongo {
return runCommand(dbname, b.done(), *info);
}
- unsigned long long DBClientWithCommands::count(const string &_ns, const BSONObj& query, int options, int limit, int skip ) {
+ unsigned long long DBClientWithCommands::count(const string &_ns, const BSONObj& query, int options, int limit, int skip ) {
NamespaceString ns(_ns);
BSONObjBuilder b;
b.append( "count" , ns.coll );
@@ -258,27 +258,27 @@ namespace mongo {
BSONObj getlasterrorcmdobj = fromjson("{getlasterror:1}");
- BSONObj DBClientWithCommands::getLastErrorDetailed() {
+ BSONObj DBClientWithCommands::getLastErrorDetailed() {
BSONObj info;
runCommand("admin", getlasterrorcmdobj, info);
- return info;
+ return info;
}
- string DBClientWithCommands::getLastError() {
+ string DBClientWithCommands::getLastError() {
BSONObj info = getLastErrorDetailed();
return getLastErrorString( info );
}
-
- string DBClientWithCommands::getLastErrorString( const BSONObj& info ){
+
+ string DBClientWithCommands::getLastErrorString( const BSONObj& info ) {
BSONElement e = info["err"];
if( e.eoo() ) return "";
if( e.type() == Object ) return e.toString();
- return e.str();
+ return e.str();
}
BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
- BSONObj DBClientWithCommands::getPrevError() {
+ BSONObj DBClientWithCommands::getPrevError() {
BSONObj info;
runCommand("admin", getpreverrorcmdobj, info);
return info;
@@ -286,7 +286,7 @@ namespace mongo {
BSONObj getnoncecmdobj = fromjson("{getnonce:1}");
- string DBClientWithCommands::createPasswordDigest( const string & username , const string & clearTextPassword ){
+ string DBClientWithCommands::createPasswordDigest( const string & username , const string & clearTextPassword ) {
md5digest d;
{
md5_state_t st;
@@ -300,9 +300,9 @@ namespace mongo {
}
bool DBClientWithCommands::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
- string password = password_text;
- if( digestPassword )
- password = createPasswordDigest( username , password_text );
+ string password = password_text;
+ if( digestPassword )
+ password = createPasswordDigest( username , password_text );
BSONObj info;
string nonce;
@@ -333,8 +333,8 @@ namespace mongo {
b << "key" << digestToString( d );
authCmd = b.done();
}
-
- if( runCommand(dbname, authCmd, info) )
+
+ if( runCommand(dbname, authCmd, info) )
return true;
errmsg = info.toString();
@@ -345,7 +345,7 @@ namespace mongo {
bool DBClientWithCommands::isMaster(bool& isMaster, BSONObj *info) {
BSONObj o;
- if ( info == 0 )
+ if ( info == 0 )
info = &o;
bool ok = runCommand("admin", ismastercmdobj, *info);
isMaster = info->getField("ismaster").trueValue();
@@ -354,7 +354,7 @@ namespace mongo {
bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
BSONObj o;
- if ( info == 0 ) info = &o;
+ if ( info == 0 ) info = &o;
BSONObjBuilder b;
string db = nsToDatabase(ns.c_str());
b.append("create", ns.c_str() + db.length() + 1);
@@ -404,7 +404,7 @@ namespace mongo {
return false;
}
- BSONObj DBClientWithCommands::mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query, const string& outputcolname) {
+ BSONObj DBClientWithCommands::mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query, const string& outputcolname) {
BSONObjBuilder b;
b.append("mapreduce", nsGetCollection(ns));
b.appendCode("map", jsmapf);
@@ -435,27 +435,27 @@ namespace mongo {
return eval(dbname, jscode, info, retValue);
}
- list<string> DBClientWithCommands::getDatabaseNames(){
+ list<string> DBClientWithCommands::getDatabaseNames() {
BSONObj info;
uassert( 10005 , "listdatabases failed" , runCommand( "admin" , BSON( "listDatabases" << 1 ) , info ) );
uassert( 10006 , "listDatabases.databases not array" , info["databases"].type() == Array );
-
+
list<string> names;
-
+
BSONObjIterator i( info["databases"].embeddedObjectUserCheck() );
- while ( i.more() ){
+ while ( i.more() ) {
names.push_back( i.next().embeddedObjectUserCheck()["name"].valuestr() );
}
return names;
}
- list<string> DBClientWithCommands::getCollectionNames( const string& db ){
+ list<string> DBClientWithCommands::getCollectionNames( const string& db ) {
list<string> names;
-
+
string ns = db + ".system.namespaces";
auto_ptr<DBClientCursor> c = query( ns.c_str() , BSONObj() );
- while ( c->more() ){
+ while ( c->more() ) {
string name = c->next()["name"].valuestr();
if ( name.find( "$" ) != string::npos )
continue;
@@ -464,9 +464,9 @@ namespace mongo {
return names;
}
- bool DBClientWithCommands::exists( const string& ns ){
+ bool DBClientWithCommands::exists( const string& ns ) {
list<string> names;
-
+
string db = nsGetDB( ns ) + ".system.namespaces";
BSONObj q = BSON( "name" << ns );
return count( db.c_str() , q ) != 0;
@@ -474,21 +474,21 @@ namespace mongo {
/* --- dbclientconnection --- */
- bool DBClientConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
- string password = password_text;
- if( digestPassword )
- password = createPasswordDigest( username , password_text );
+ bool DBClientConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
+ string password = password_text;
+ if( digestPassword )
+ password = createPasswordDigest( username , password_text );
- if( autoReconnect ) {
- /* note we remember the auth info before we attempt to auth -- if the connection is broken, we will
- then have it for the next autoreconnect attempt.
- */
- pair<string,string> p = pair<string,string>(username, password);
- authCache[dbname] = p;
- }
+ if( autoReconnect ) {
+ /* note we remember the auth info before we attempt to auth -- if the connection is broken, we will
+ then have it for the next autoreconnect attempt.
+ */
+ pair<string,string> p = pair<string,string>(username, password);
+ authCache[dbname] = p;
+ }
- return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
- }
+ return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
+ }
BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
auto_ptr<DBClientCursor> c =
@@ -505,20 +505,20 @@ namespace mongo {
return c->nextSafe().copy();
}
- bool DBClientConnection::connect(const HostAndPort& server, string& errmsg){
+ bool DBClientConnection::connect(const HostAndPort& server, string& errmsg) {
_server = server;
_serverString = _server.toString();
return _connect( errmsg );
}
- bool DBClientConnection::_connect( string& errmsg ){
+ bool DBClientConnection::_connect( string& errmsg ) {
_serverString = _server.toString();
// we keep around SockAddr for connection life -- maybe MessagingPort
// requires that?
server.reset(new SockAddr(_server.host().c_str(), _server.port()));
p.reset(new MessagingPort( _so_timeout, _logLevel ));
- if (server->getAddr() == "0.0.0.0"){
+ if (server->getAddr() == "0.0.0.0") {
failed = true;
return false;
}
@@ -548,27 +548,27 @@ namespace mongo {
log(_logLevel) << "trying reconnect to " << _serverString << endl;
string errmsg;
failed = false;
- if ( ! _connect(errmsg) ) {
+ if ( ! _connect(errmsg) ) {
failed = true;
log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
throw SocketException(SocketException::CONNECT_ERROR);
- }
+ }
- log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
- for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
- const char *dbname = i->first.c_str();
- const char *username = i->second.first.c_str();
- const char *password = i->second.second.c_str();
- if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
- log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
- }
+ log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
+ for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
+ const char *dbname = i->first.c_str();
+ const char *username = i->second.first.c_str();
+ const char *password = i->second.second.c_str();
+ if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
+ log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
+ }
}
auto_ptr<DBClientCursor> DBClientBase::query(const string &ns, Query query, int nToReturn,
- int nToSkip, const BSONObj *fieldsToReturn, int queryOptions , int batchSize ) {
+ int nToSkip, const BSONObj *fieldsToReturn, int queryOptions , int batchSize ) {
auto_ptr<DBClientCursor> c( new DBClientCursor( this,
- ns, query.obj, nToReturn, nToSkip,
- fieldsToReturn, queryOptions , batchSize ) );
+ ns, query.obj, nToReturn, nToSkip,
+ fieldsToReturn, queryOptions , batchSize ) );
if ( c->init() )
return c;
return auto_ptr< DBClientCursor >( 0 );
@@ -589,14 +589,14 @@ namespace mongo {
}
boost::function<void(const BSONObj &)> _f;
};
-
+
unsigned long long DBClientConnection::query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
DBClientFunConvertor fun;
fun._f = f;
boost::function<void(DBClientCursorBatchIterator &)> ptr( fun );
return DBClientConnection::query( ptr, ns, query, fieldsToReturn, queryOptions );
}
-
+
unsigned long long DBClientConnection::query( boost::function<void(DBClientCursorBatchIterator &)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
// mask options
queryOptions &= (int)( QueryOption_NoCursorTimeout | QueryOption_SlaveOk );
@@ -604,11 +604,11 @@ namespace mongo {
bool doExhaust = ( availableOptions() & QueryOption_Exhaust );
if ( doExhaust ) {
- queryOptions |= (int)QueryOption_Exhaust;
+ queryOptions |= (int)QueryOption_Exhaust;
}
auto_ptr<DBClientCursor> c( this->query(ns, query, 0, 0, fieldsToReturn, queryOptions) );
uassert( 13386, "socket error for mapping query", c.get() );
-
+
if ( !doExhaust ) {
while( c->more() ) {
DBClientCursorBatchIterator i( *c );
@@ -618,21 +618,21 @@ namespace mongo {
return n;
}
- try {
- while( 1 ) {
- while( c->moreInCurrentBatch() ) {
+ try {
+ while( 1 ) {
+ while( c->moreInCurrentBatch() ) {
DBClientCursorBatchIterator i( *c );
f( i );
n += i.n();
}
- if( c->getCursorId() == 0 )
+ if( c->getCursorId() == 0 )
break;
c->exhaustReceiveMore();
}
}
- catch(std::exception&) {
+ catch(std::exception&) {
/* connection CANNOT be used anymore as more data may be on the way from the server.
we have to reconnect.
*/
@@ -660,16 +660,16 @@ namespace mongo {
void DBClientBase::insert( const string & ns , const vector< BSONObj > &v ) {
Message toSend;
-
+
BufBuilder b;
int opts = 0;
b.appendNum( opts );
b.appendStr( ns );
for( vector< BSONObj >::const_iterator i = v.begin(); i != v.end(); ++i )
i->appendSelfToBufBuilder( b );
-
+
toSend.setData( dbInsert, b.buf(), b.len() );
-
+
say( toSend );
}
@@ -713,63 +713,63 @@ namespace mongo {
say( toSend );
}
- auto_ptr<DBClientCursor> DBClientWithCommands::getIndexes( const string &ns ){
+ auto_ptr<DBClientCursor> DBClientWithCommands::getIndexes( const string &ns ) {
return query( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , BSON( "ns" << ns ) );
}
-
- void DBClientWithCommands::dropIndex( const string& ns , BSONObj keys ){
+
+ void DBClientWithCommands::dropIndex( const string& ns , BSONObj keys ) {
dropIndex( ns , genIndexName( keys ) );
}
- void DBClientWithCommands::dropIndex( const string& ns , const string& indexName ){
+ void DBClientWithCommands::dropIndex( const string& ns , const string& indexName ) {
BSONObj info;
- if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
- BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
- info ) ){
+ if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
+ BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
+ info ) ) {
log(_logLevel) << "dropIndex failed: " << info << endl;
uassert( 10007 , "dropIndex failed" , 0 );
}
resetIndexCache();
}
-
- void DBClientWithCommands::dropIndexes( const string& ns ){
+
+ void DBClientWithCommands::dropIndexes( const string& ns ) {
BSONObj info;
- uassert( 10008 , "dropIndexes failed" , runCommand( nsToDatabase( ns.c_str() ) ,
- BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << "*") ,
- info ) );
+ uassert( 10008 , "dropIndexes failed" , runCommand( nsToDatabase( ns.c_str() ) ,
+ BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << "*") ,
+ info ) );
resetIndexCache();
}
- void DBClientWithCommands::reIndex( const string& ns ){
+ void DBClientWithCommands::reIndex( const string& ns ) {
list<BSONObj> all;
auto_ptr<DBClientCursor> i = getIndexes( ns );
- while ( i->more() ){
+ while ( i->more() ) {
all.push_back( i->next().getOwned() );
}
-
+
dropIndexes( ns );
-
- for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ){
+
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , o );
}
-
+
}
-
- string DBClientWithCommands::genIndexName( const BSONObj& keys ){
+
+ string DBClientWithCommands::genIndexName( const BSONObj& keys ) {
stringstream ss;
-
+
bool first = 1;
for ( BSONObjIterator i(keys); i.more(); ) {
BSONElement f = i.next();
-
+
if ( first )
first = 0;
else
ss << "_";
-
+
ss << f.fieldName() << "_";
if( f.isNumber() )
ss << f.numberInt();
@@ -794,7 +794,7 @@ namespace mongo {
toSave.append( "name" , nn );
cacheKey += nn;
}
-
+
if ( unique )
toSave.appendBool( "unique", unique );
@@ -837,9 +837,10 @@ namespace mongo {
void DBClientConnection::say( Message &toSend ) {
checkConnection();
- try {
+ try {
port().say( toSend );
- } catch( SocketException & ) {
+ }
+ catch( SocketException & ) {
failed = true;
throw;
}
@@ -849,16 +850,16 @@ namespace mongo {
port().piggyBack( toSend );
}
- void DBClientConnection::recv( Message &m ) {
+ void DBClientConnection::recv( Message &m ) {
port().recv(m);
}
bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk ) {
- /* todo: this is very ugly messagingport::call returns an error code AND can throw
- an exception. we should make it return void and just throw an exception anytime
+ /* todo: this is very ugly messagingport::call returns an error code AND can throw
+ an exception. we should make it return void and just throw an exception anytime
it fails
*/
- try {
+ try {
if ( !port().call(toSend, response) ) {
failed = true;
if ( assertOk )
@@ -867,7 +868,7 @@ namespace mongo {
return false;
}
}
- catch( SocketException & ) {
+ catch( SocketException & ) {
failed = true;
throw;
}
@@ -888,15 +889,15 @@ namespace mongo {
}
}
- void DBClientConnection::killCursor( long long cursorId ){
+ void DBClientConnection::killCursor( long long cursorId ) {
BufBuilder b;
b.appendNum( (int)0 ); // reserved
b.appendNum( (int)1 ); // number
b.appendNum( cursorId );
-
+
Message m;
m.setData( dbKillCursors , b.buf() , b.len() );
-
+
sayPiggyBack( m );
}
@@ -912,5 +913,5 @@ namespace mongo {
return false;
return true;
}
-
+
} // namespace mongo
diff --git a/client/dbclient.h b/client/dbclient.h
index 87fae77012c..cc5e5b3fe3b 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -40,7 +40,7 @@ namespace mongo {
/** allow query of replica slave. normally these return an error except for namespace "local".
*/
QueryOption_SlaveOk = 1 << 2,
-
+
// findingStart mode is used to find the first operation of interest when
// we are scanning through a repl log. For efficiency in the common case,
// where the first operation of interest is closer to the tail than the head,
@@ -52,24 +52,24 @@ namespace mongo {
QueryOption_OplogReplay = 1 << 3,
/** The server normally times out idle cursors after an inactivy period to prevent excess memory uses
- Set this option to prevent that.
+ Set this option to prevent that.
*/
QueryOption_NoCursorTimeout = 1 << 4,
- /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
+ /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
than returning no data. After a timeout period, we do return as normal.
*/
QueryOption_AwaitData = 1 << 5,
- /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
- will fully read all data queried. Faster when you are pulling a lot of data and know you want to
+ /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
+ will fully read all data queried. Faster when you are pulling a lot of data and know you want to
pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
- Use the query( boost::function<void(const BSONObj&)> f, ... ) version of the connection's query()
+ Use the query( boost::function<void(const BSONObj&)> f, ... ) version of the connection's query()
method, and it will take care of all the details for you.
*/
QueryOption_Exhaust = 1 << 6,
-
+
QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOption_AwaitData | QueryOption_Exhaust
};
@@ -78,7 +78,7 @@ namespace mongo {
/** Upsert - that is, insert the item if no matching item is found. */
UpdateOption_Upsert = 1 << 0,
- /** Update multiple documents (if multiple documents match query expression).
+ /** Update multiple documents (if multiple documents match query expression).
(Default is update a single document and stop.) */
UpdateOption_Multi = 1 << 1,
@@ -103,7 +103,7 @@ namespace mongo {
* server:port
* foo/server:port,server:port SET
* server,server,server SYNC
- *
+ *
* tyipcal use
* string errmsg,
* ConnectionString cs = ConnectionString::parse( url , errmsg );
@@ -113,19 +113,19 @@ namespace mongo {
class ConnectionString {
public:
enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC };
-
- ConnectionString( const HostAndPort& server ){
+
+ ConnectionString( const HostAndPort& server ) {
_type = MASTER;
_servers.push_back( server );
_finishInit();
}
- ConnectionString( ConnectionType type , const string& s , const string& setName = "" ){
+ ConnectionString( ConnectionType type , const string& s , const string& setName = "" ) {
_type = type;
_setName = setName;
_fillServers( s );
-
- switch ( _type ){
+
+ switch ( _type ) {
case MASTER:
assert( _servers.size() == 1 );
break;
@@ -139,13 +139,13 @@ namespace mongo {
default:
assert( _servers.size() > 0 );
}
-
+
_finishInit();
}
- ConnectionString( const string& s , ConnectionType favoredMultipleType ){
+ ConnectionString( const string& s , ConnectionType favoredMultipleType ) {
_fillServers( s );
- if ( _servers.size() == 1 ){
+ if ( _servers.size() == 1 ) {
_type = MASTER;
}
else {
@@ -156,14 +156,14 @@ namespace mongo {
}
bool isValid() const { return _type != INVALID; }
-
+
string toString() const {
return _string;
}
-
+
DBClientBase* connect( string& errmsg ) const;
-
- string getSetName() const{
+
+ string getSetName() const {
return _setName;
}
@@ -172,29 +172,29 @@ namespace mongo {
}
static ConnectionString parse( const string& url , string& errmsg );
-
+
static string typeToString( ConnectionType type );
-
+
private:
- ConnectionString(){
+ ConnectionString() {
_type = INVALID;
}
-
- void _fillServers( string s ){
+
+ void _fillServers( string s ) {
string::size_type idx;
- while ( ( idx = s.find( ',' ) ) != string::npos ){
+ while ( ( idx = s.find( ',' ) ) != string::npos ) {
_servers.push_back( s.substr( 0 , idx ) );
s = s.substr( idx + 1 );
}
_servers.push_back( s );
}
-
- void _finishInit(){
+
+ void _finishInit() {
stringstream ss;
if ( _type == SET )
ss << _setName << "/";
- for ( unsigned i=0; i<_servers.size(); i++ ){
+ for ( unsigned i=0; i<_servers.size(); i++ ) {
if ( i > 0 )
ss << ",";
ss << _servers[i].toString();
@@ -207,7 +207,7 @@ namespace mongo {
string _string;
string _setName;
};
-
+
/**
* controls how much a clients cares about writes
* default is NORMAL
@@ -223,7 +223,7 @@ namespace mongo {
class DBClientCursor;
class DBClientCursorBatchIterator;
- /** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
+ /** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
Examples:
QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
QUERY( "age" << GT << 30 << LT << 50 )
@@ -233,22 +233,22 @@ namespace mongo {
BSONObj obj;
Query() : obj(BSONObj()) { }
Query(const BSONObj& b) : obj(b) { }
- Query(const string &json) :
+ Query(const string &json) :
obj(fromjson(json)) { }
- Query(const char * json) :
+ Query(const char * json) :
obj(fromjson(json)) { }
- /** Add a sort (ORDER BY) criteria to the query expression.
+ /** Add a sort (ORDER BY) criteria to the query expression.
@param sortPattern the sort order template. For example to order by name ascending, time descending:
{ name : 1, ts : -1 }
i.e.
BSON( "name" << 1 << "ts" << -1 )
- or
+ or
fromjson(" name : 1, ts : -1 ")
*/
Query& sort(const BSONObj& sortPattern);
- /** Add a sort (ORDER BY) criteria to the query expression.
+ /** Add a sort (ORDER BY) criteria to the query expression.
This version of sort() assumes you want to sort on a single field.
@param asc = 1 for ascending order
asc = -1 for descending order
@@ -277,8 +277,8 @@ namespace mongo {
*/
Query& explain();
- /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
- present at both the start and end of the query's execution (if an object is new during the query, or deleted during
+ /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
+ present at both the start and end of the query's execution (if an object is new during the query, or deleted during
the query, it may or may not be returned, even with snapshot mode).
Note that short query responses (less than 1MB) are always effectively snapshotted.
@@ -287,16 +287,16 @@ namespace mongo {
*/
Query& snapshot();
- /** Queries to the Mongo database support a $where parameter option which contains
- a javascript function that is evaluated to see whether objects being queried match
- its criteria. Use this helper to append such a function to a query object.
+ /** Queries to the Mongo database support a $where parameter option which contains
+ a javascript function that is evaluated to see whether objects being queried match
+ its criteria. Use this helper to append such a function to a query object.
Your query may also contain other traditional Mongo query terms.
- @param jscode The javascript function to evaluate against each potential object
- match. The function must return true for matched objects. Use the this
+ @param jscode The javascript function to evaluate against each potential object
+ match. The function must return true for matched objects. Use the this
variable to inspect the current object.
- @param scope SavedContext for the javascript object. List in a BSON object any
- variables you would like defined when the jscode executes. One can think
+ @param scope SavedContext for the javascript object. List in a BSON object any
+ variables you would like defined when the jscode executes. One can think
of these as "bind variables".
Examples:
@@ -310,12 +310,12 @@ namespace mongo {
* if this query has an orderby, hint, or some other field
*/
bool isComplex( bool * hasDollar = 0 ) const;
-
+
BSONObj getFilter() const;
BSONObj getSort() const;
BSONObj getHint() const;
bool isExplain() const;
-
+
string toString() const;
operator string() const { return toString(); }
private:
@@ -326,13 +326,13 @@ namespace mongo {
BSONObjBuilder b;
b.appendElements(obj);
b.append(fieldName, val);
- obj = b.obj();
+ obj = b.obj();
}
};
-
-/** Typically one uses the QUERY(...) macro to construct a Query object.
- Example: QUERY( "age" << 33 << "school" << "UCLA" )
-*/
+
+ /** Typically one uses the QUERY(...) macro to construct a Query object.
+ Example: QUERY( "age" << 33 << "school" << "UCLA" )
+ */
#define QUERY(x) mongo::Query( BSON(x) )
/**
@@ -360,9 +360,9 @@ namespace mongo {
/** don't use this - called automatically by DBClientCursor for you */
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
-
+
virtual void insert( const string &ns, BSONObj obj ) = 0;
-
+
virtual void insert( const string &ns, const vector< BSONObj >& v ) = 0;
virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
@@ -406,18 +406,18 @@ namespace mongo {
directly call runCommand.
@param dbname database name. Use "admin" for global administrative commands.
- @param cmd the command object to execute. For example, { ismaster : 1 }
- @param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
- set.
+ @param cmd the command object to execute. For example, { ismaster : 1 }
+ @param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
+ set.
@param options see enum QueryOptions - normally not needed to run a command
@return true if the command returned "ok".
*/
virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
/** Authorize access to a particular database.
- Authentication is separate for each database on the server -- you may authenticate for any
+ Authentication is separate for each database on the server -- you may authenticate for any
number of databases on a single connection.
- The "admin" database is special and once authenticated provides access to all databases on the
+ The "admin" database is special and once authenticated provides access to all databases on the
server.
@param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
@return true if successful
@@ -459,14 +459,14 @@ namespace mongo {
*/
bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
- /** Get error result from the last operation on this connection.
+ /** Get error result from the last operation on this connection.
@return error message text, or empty string if no error.
*/
string getLastError();
- /** Get error result from the last operation on this connection.
- @return full error object.
- */
- virtual BSONObj getLastErrorDetailed();
+ /** Get error result from the last operation on this connection.
+ @return full error object.
+ */
+ virtual BSONObj getLastErrorDetailed();
static string getLastErrorString( const BSONObj& res );
@@ -475,23 +475,23 @@ namespace mongo {
@return { err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }
result.err will be null if no error has occurred.
- */
+ */
BSONObj getPrevError();
- /** Reset the previous error state for this connection (accessed via getLastError and
- getPrevError). Useful when performing several operations at once and then checking
+ /** Reset the previous error state for this connection (accessed via getLastError and
+ getPrevError). Useful when performing several operations at once and then checking
for an error after attempting all operations.
*/
bool resetError() { return simpleCommand("admin", 0, "reseterror"); }
- /** Delete the specified collection. */
- virtual bool dropCollection( const string &ns ){
+ /** Delete the specified collection. */
+ virtual bool dropCollection( const string &ns ) {
string db = nsGetDB( ns );
string coll = nsGetCollection( ns );
uassert( 10011 , "no collection name", coll.size() );
BSONObj info;
-
+
bool res = runCommand( db.c_str() , BSON( "drop" << coll ) , info );
resetIndexCache();
return res;
@@ -503,7 +503,7 @@ namespace mongo {
bool repairDatabase(const string &dbname, BSONObj *info = 0) {
return simpleCommand(dbname, info, "repairDatabase");
}
-
+
/** Copy database from one server or name to another server or name.
Generally, you should dropDatabase() first as otherwise the copied information will MERGE
@@ -533,23 +533,23 @@ namespace mongo {
ProfileOff = 0,
ProfileSlow = 1, // log very slow (>100ms) operations
ProfileAll = 2
-
+
};
bool setDbProfilingLevel(const string &dbname, ProfilingLevel level, BSONObj *info = 0);
bool getDbProfilingLevel(const string &dbname, ProfilingLevel& level, BSONObj *info = 0);
- /** Run a map/reduce job on the server.
+ /** Run a map/reduce job on the server.
See http://www.mongodb.org/display/DOCS/MapReduce
ns namespace (db+collection name) of input data
- jsmapf javascript map function code
- jsreducef javascript reduce function code.
+ jsmapf javascript map function code
+ jsreducef javascript reduce function code.
query optional query filter for the input
- output optional permanent output collection name. if not specified server will
+ output optional permanent output collection name. if not specified server will
generate a temporary collection and return its name.
- returns a result object which contains:
+ returns a result object which contains:
{ result : <collection_name>,
numObjects : <number_of_objects_scanned>,
timeMillis : <job_time>,
@@ -557,8 +557,8 @@ namespace mongo {
[, err : <errmsg_if_error>]
}
- For example one might call:
- result.getField("ok").trueValue()
+ For example one might call:
+ result.getField("ok").trueValue()
on the result to check if ok.
*/
BSONObj mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query = BSONObj(), const string& output = "");
@@ -569,7 +569,7 @@ namespace mongo {
jscode source code for a javascript function.
info the command object which contains any information on the invocation result including
the return value and other information. If an error occurs running the jscode, error
- information will be in info. (try "out() << info.toString()")
+ information will be in info. (try "out() << info.toString()")
retValue return value from the jscode function.
args args to pass to the jscode function. when invoked, the 'args' variable will be defined
for use by the jscode.
@@ -583,7 +583,7 @@ namespace mongo {
/** validate a collection, checking for errors and reporting back statistics.
this operation is slow and blocking.
*/
- bool validate( const string &ns , bool scandata=true ){
+ bool validate( const string &ns , bool scandata=true ) {
BSONObj cmd = BSON( "validate" << nsGetCollection( ns ) << "scandata" << scandata );
BSONObj info;
return runCommand( nsGetDB( ns ).c_str() , cmd , info );
@@ -616,7 +616,7 @@ namespace mongo {
ret = (NumType) retValue.number();
return true;
}
-
+
/**
get a list of all the current databases
uses the { listDatabases : 1 } command.
@@ -632,7 +632,7 @@ namespace mongo {
bool exists( const string& ns );
/** Create an index if it does not already exist.
- ensureIndex calls are remembered so it is safe/fast to call this function many
+ ensureIndex calls are remembered so it is safe/fast to call this function many
times in your code.
@param ns collection to be indexed
@param keys the "key pattern" for the index. e.g., { name : 1 }
@@ -642,7 +642,7 @@ namespace mongo {
@return whether or not sent message to db.
should be true on first call, false on subsequent unless resetIndexCache was called
*/
- virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
+ virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
bool cache = true );
/**
@@ -651,17 +651,17 @@ namespace mongo {
virtual void resetIndexCache();
virtual auto_ptr<DBClientCursor> getIndexes( const string &ns );
-
+
virtual void dropIndex( const string& ns , BSONObj keys );
virtual void dropIndex( const string& ns , const string& indexName );
-
+
/**
drops all indexes for the collection
*/
virtual void dropIndexes( const string& ns );
virtual void reIndex( const string& ns );
-
+
string genIndexName( const BSONObj& keys );
/** Erase / drop an entire database */
@@ -674,33 +674,33 @@ namespace mongo {
virtual string toString() = 0;
/** @return the database name portion of an ns string */
- string nsGetDB( const string &ns ){
+ string nsGetDB( const string &ns ) {
string::size_type pos = ns.find( "." );
if ( pos == string::npos )
return ns;
-
+
return ns.substr( 0 , pos );
}
-
+
/** @return the collection name portion of an ns string */
- string nsGetCollection( const string &ns ){
+ string nsGetCollection( const string &ns ) {
string::size_type pos = ns.find( "." );
if ( pos == string::npos )
return "";
- return ns.substr( pos + 1 );
+ return ns.substr( pos + 1 );
}
protected:
bool isOk(const BSONObj&);
-
+
enum QueryOptions availableOptions();
-
+
private:
enum QueryOptions _cachedAvailableOptions;
bool _haveCachedAvailableOptions;
};
-
+
/**
abstract class that implements the core db operations
*/
@@ -709,13 +709,13 @@ namespace mongo {
WriteConcern _writeConcern;
public:
- DBClientBase(){
+ DBClientBase() {
_writeConcern = W_NORMAL;
}
-
+
WriteConcern getWriteConcern() const { return _writeConcern; }
- void setWriteConcern( WriteConcern w ){ _writeConcern = w; }
-
+ void setWriteConcern( WriteConcern w ) { _writeConcern = w; }
+
/** send a query to the database.
@param ns namespace to query, format is <dbname>.<collectname>[.<collectname>]*
@param query query to perform on the collection. this is a BSONObj (binary JSON)
@@ -744,7 +744,7 @@ namespace mongo {
insert an object into the database
*/
virtual void insert( const string &ns , BSONObj obj );
-
+
/**
insert a vector of objects into the database
*/
@@ -755,14 +755,14 @@ namespace mongo {
@param justOne if this true, then once a single match is found will stop
*/
virtual void remove( const string &ns , Query q , bool justOne = 0 );
-
+
/**
updates objects matching query
*/
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = false , bool multi = false );
-
+
virtual bool isFailed() const = 0;
-
+
virtual void killCursor( long long cursorID ) = 0;
virtual bool callRead( Message& toSend , Message& response ) = 0;
@@ -772,16 +772,16 @@ namespace mongo {
virtual ConnectionString::ConnectionType type() const = 0;
}; // DBClientBase
-
+
class DBClientReplicaSet;
-
- class ConnectException : public UserException {
+
+ class ConnectException : public UserException {
public:
ConnectException(string msg) : UserException(9000,msg) { }
};
- /**
- A basic connection to the database.
+ /**
+ A basic connection to the database.
This is the main entry point for talking to a simple Mongo setup
*/
class DBClientConnection : public DBClientBase {
@@ -789,7 +789,7 @@ namespace mongo {
/**
@param _autoReconnect if true, automatically reconnect on a connection failure
@param cp used by DBClientReplicaSet. You do not need to specify this parameter
- @param timeout tcp timeout in seconds - this is for read/write, not connect.
+ @param timeout tcp timeout in seconds - this is for read/write, not connect.
Connect timeout is fixed, but short, at 5 seconds.
*/
DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double so_timeout=0) :
@@ -797,7 +797,7 @@ namespace mongo {
_numConnections++;
}
- virtual ~DBClientConnection(){
+ virtual ~DBClientConnection() {
_numConnections--;
}
@@ -812,14 +812,14 @@ namespace mongo {
@deprecated please use HostAndPort
@return false if fails to connect.
*/
- virtual bool connect(const char * hostname, string& errmsg){
+ virtual bool connect(const char * hostname, string& errmsg) {
// TODO: remove this method
HostAndPort t( hostname );
return connect( t , errmsg );
}
/** Connect to a Mongo database server.
-
+
If autoReconnect is true, you can try to use the DBClientConnection even when
false was returned -- it will try to connect again.
@@ -837,9 +837,9 @@ namespace mongo {
@param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
*/
- void connect(const string& serverHostname) {
+ void connect(const string& serverHostname) {
string errmsg;
- if( !connect(HostAndPort(serverHostname), errmsg) )
+ if( !connect(HostAndPort(serverHostname), errmsg) )
throw ConnectException(string("can't connect ") + errmsg);
}
@@ -851,8 +851,8 @@ namespace mongo {
return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions , batchSize );
}
- /** Uses QueryOption_Exhaust
- Exhaust mode sends back all data queries as fast as possible, with no back-and-for for OP_GETMORE. If you are certain
+ /** Uses QueryOption_Exhaust
+ Exhaust mode sends back all data queries as fast as possible, with no back-and-for for OP_GETMORE. If you are certain
you will exhaust the query, it could be useful.
Use DBClientCursorBatchIterator version if you want to do items in large blocks, perhaps to avoid granular locking and such.
@@ -861,7 +861,7 @@ namespace mongo {
unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
/**
- @return true if this connection is currently in a failed state. When autoreconnect is on,
+ @return true if this connection is currently in a failed state. When autoreconnect is on,
a connection will transition back to an ok state after reconnecting.
*/
bool isFailed() const { return failed; }
@@ -877,18 +877,18 @@ namespace mongo {
/** Returns the address of the server */
string toString() { return _serverString; }
-
+
string getServerAddress() const { return _serverString; }
-
+
virtual void killCursor( long long cursorID );
virtual bool callRead( Message& toSend , Message& response ) { return call( toSend , response ); }
virtual void say( Message &toSend );
- virtual bool call( Message &toSend, Message &response, bool assertOk = true );
- virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+ virtual bool call( Message &toSend, Message &response, bool assertOk = true );
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
virtual void checkResponse( const char *data, int nReturned );
void setSoTimeout(double to) { _so_timeout = to; }
-
- static int getNumConnections(){
+
+ static int getNumConnections() {
return _numConnections;
}
@@ -911,18 +911,18 @@ namespace mongo {
void checkConnection() { if( failed ) _checkConnection(); }
map< string, pair<string,string> > authCache;
- double _so_timeout;
+ double _so_timeout;
bool _connect( string& errmsg );
static AtomicUInt _numConnections;
};
-
+
/** pings server to check if it's up
*/
bool serverAlive( const string &uri );
DBClientBase * createDirectClient();
-
+
} // namespace mongo
#include "dbclientcursor.h"
diff --git a/client/dbclient_rs.cpp b/client/dbclient_rs.cpp
index e8dadffcefd..94ade4b4b23 100644
--- a/client/dbclient_rs.cpp
+++ b/client/dbclient_rs.cpp
@@ -26,7 +26,7 @@
#include "../util/background.h"
namespace mongo {
-
+
// --------------------------------
// ----- ReplicaSetMonitor ---------
// --------------------------------
@@ -34,34 +34,34 @@ namespace mongo {
// global background job responsible for checking every X amount of time
class ReplicaSetMonitorWatcher : public BackgroundJob {
protected:
- void run(){
+ void run() {
sleepsecs( 20 );
try {
ReplicaSetMonitor::checkAll();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
error() << "ReplicaSetMonitorWatcher: check failed: " << e.what() << endl;
}
}
} replicaSetMonitorWatcher;
-
+
ReplicaSetMonitor::ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers )
: _lock( "ReplicaSetMonitor instance" ) , _name( name ) , _master(-1) {
string errmsg;
-
- for ( unsigned i=0; i<servers.size(); i++ ){
+
+ for ( unsigned i=0; i<servers.size(); i++ ) {
auto_ptr<DBClientConnection> conn( new DBClientConnection( true ) );
- if (!conn->connect( servers[i] , errmsg ) ){
+ if (!conn->connect( servers[i] , errmsg ) ) {
log(1) << "error connecting to seed " << servers[i] << ": " << errmsg << endl;
// skip seeds that don't work
continue;
}
-
+
_nodes.push_back( Node( servers[i] , conn.release() ) );
-
+
string maybePrimary;
if (_checkConnection( _nodes[_nodes.size()-1].conn , maybePrimary, false)) {
break;
@@ -69,14 +69,14 @@ namespace mongo {
}
}
- ReplicaSetMonitor::~ReplicaSetMonitor(){
+ ReplicaSetMonitor::~ReplicaSetMonitor() {
for ( unsigned i=0; i<_nodes.size(); i++ )
delete _nodes[i].conn;
_nodes.clear();
_master = -1;
}
-
- ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name , const vector<HostAndPort>& servers ){
+
+ ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name , const vector<HostAndPort>& servers ) {
scoped_lock lk( _setsLock );
ReplicaSetMonitorPtr& m = _sets[name];
if ( ! m )
@@ -84,17 +84,17 @@ namespace mongo {
if ( replicaSetMonitorWatcher.getState() == BackgroundJob::NotStarted )
replicaSetMonitorWatcher.go();
-
+
return m;
}
- void ReplicaSetMonitor::checkAll(){
+ void ReplicaSetMonitor::checkAll() {
set<string> seen;
-
- while ( true ){
+
+ while ( true ) {
ReplicaSetMonitorPtr m;
{
- for ( map<string,ReplicaSetMonitorPtr>::iterator i=_sets.begin(); i!=_sets.end(); ++i ){
+ for ( map<string,ReplicaSetMonitorPtr>::iterator i=_sets.begin(); i!=_sets.end(); ++i ) {
string name = i->first;
if ( seen.count( name ) )
continue;
@@ -110,11 +110,11 @@ namespace mongo {
m->check();
}
-
-
+
+
}
- void ReplicaSetMonitor::setConfigChangeHook( ConfigChangeHook hook ){
+ void ReplicaSetMonitor::setConfigChangeHook( ConfigChangeHook hook ) {
massert( 13610 , "ConfigChangeHook already specified" , _hook == 0 );
_hook = hook;
}
@@ -126,7 +126,7 @@ namespace mongo {
{
scoped_lock lk( _lock );
- for ( unsigned i=0; i<_nodes.size(); i++ ){
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
if ( i > 0 )
ss << ",";
ss << _nodes[i].addr.toString();
@@ -135,31 +135,31 @@ namespace mongo {
return ss.str();
}
- void ReplicaSetMonitor::notifyFailure( const HostAndPort& server ){
- if ( _master >= 0 ){
+ void ReplicaSetMonitor::notifyFailure( const HostAndPort& server ) {
+ if ( _master >= 0 ) {
scoped_lock lk( _lock );
if ( server == _nodes[_master].addr )
_master = -1;
}
}
-
- HostAndPort ReplicaSetMonitor::getMaster(){
+
+ HostAndPort ReplicaSetMonitor::getMaster() {
if ( _master < 0 )
_check();
-
+
uassert( 10009 , str::stream() << "ReplicaSetMonitor no master found for set: " << _name , _master >= 0 );
-
+
scoped_lock lk( _lock );
return _nodes[_master].addr;
}
-
- HostAndPort ReplicaSetMonitor::getSlave(){
+
+ HostAndPort ReplicaSetMonitor::getSlave() {
int x = rand() % _nodes.size();
{
scoped_lock lk( _lock );
- for ( unsigned i=0; i<_nodes.size(); i++ ){
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
int p = ( i + x ) % _nodes.size();
if ( p == _master )
continue;
@@ -174,9 +174,9 @@ namespace mongo {
/**
* notify the monitor that server has faild
*/
- void ReplicaSetMonitor::notifySlaveFailure( const HostAndPort& server ){
+ void ReplicaSetMonitor::notifySlaveFailure( const HostAndPort& server ) {
int x = _find( server );
- if ( x >= 0 ){
+ if ( x >= 0 ) {
scoped_lock lk( _lock );
_nodes[x].ok = false;
}
@@ -186,16 +186,16 @@ namespace mongo {
BSONObj status;
if (!conn->runCommand("admin", BSON("replSetGetStatus" << 1), status) ||
- !status.hasField("members") ||
- status["members"].type() != Array) {
+ !status.hasField("members") ||
+ status["members"].type() != Array) {
return;
}
-
+
BSONObjIterator hi(status["members"].Obj());
while (hi.more()) {
BSONObj member = hi.next().Obj();
string host = member["name"].String();
-
+
int m = -1;
if ((m = _find(host)) <= 0) {
continue;
@@ -206,7 +206,7 @@ namespace mongo {
scoped_lock lk( _lock );
_nodes[m].ok = true;
}
- else {
+ else {
scoped_lock lk( _lock );
_nodes[m].ok = false;
}
@@ -215,9 +215,9 @@ namespace mongo {
void ReplicaSetMonitor::_checkHosts( const BSONObj& hostList, bool& changed ) {
BSONObjIterator hi(hostList);
- while ( hi.more() ){
+ while ( hi.more() ) {
string toCheck = hi.next().String();
-
+
if ( _find( toCheck ) >= 0 )
continue;
@@ -233,34 +233,34 @@ namespace mongo {
changed = true;
}
}
-
+
bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose ) {
bool isMaster = false;
bool changed = false;
try {
BSONObj o;
c->isMaster(isMaster, &o);
-
+
log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << '\n';
-
+
// add other nodes
string maybePrimary;
- if ( o["hosts"].type() == Array ){
+ if ( o["hosts"].type() == Array ) {
if ( o["primary"].type() == String )
maybePrimary = o["primary"].String();
-
+
_checkHosts(o["hosts"].Obj(), changed);
}
if (o.hasField("passives") && o["passives"].type() == Array) {
_checkHosts(o["passives"].Obj(), changed);
}
-
+
_checkStatus(c);
}
catch ( std::exception& e ) {
log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception " << c->toString() << ' ' << e.what() << endl;
}
-
+
if ( changed && _hook )
_hook( this );
@@ -268,13 +268,13 @@ namespace mongo {
}
void ReplicaSetMonitor::_check() {
-
+
bool triedQuickCheck = false;
-
+
LOG(1) << "_check : " << getServerAddress() << endl;
-
+
for ( int retry = 0; retry < 2; retry++ ) {
- for ( unsigned i=0; i<_nodes.size(); i++ ){
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
DBClientConnection * c;
{
scoped_lock lk( _lock );
@@ -282,14 +282,14 @@ namespace mongo {
}
string maybePrimary;
- if ( _checkConnection( c , maybePrimary , retry ) ){
+ if ( _checkConnection( c , maybePrimary , retry ) ) {
_master = i;
return;
}
- if ( ! triedQuickCheck && maybePrimary.size() ){
+ if ( ! triedQuickCheck && maybePrimary.size() ) {
int x = _find( maybePrimary );
- if ( x >= 0 ){
+ if ( x >= 0 ) {
triedQuickCheck = true;
string dummy;
DBClientConnection * testConn;
@@ -297,7 +297,7 @@ namespace mongo {
scoped_lock lk( _lock );
testConn = _nodes[x].conn;
}
- if ( _checkConnection( testConn , dummy , false ) ){
+ if ( _checkConnection( testConn , dummy , false ) ) {
_master = x;
return;
}
@@ -310,11 +310,11 @@ namespace mongo {
}
- void ReplicaSetMonitor::check(){
+ void ReplicaSetMonitor::check() {
// first see if the current master is fine
- if ( _master >= 0 ){
+ if ( _master >= 0 ) {
string temp;
- if ( _checkConnection( _nodes[_master].conn , temp , false ) ){
+ if ( _checkConnection( _nodes[_master].conn , temp , false ) ) {
// current master is fine, so we're done
return;
}
@@ -349,14 +349,14 @@ namespace mongo {
// --------------------------------
DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers )
- : _monitor( ReplicaSetMonitor::get( name , servers ) ){
+ : _monitor( ReplicaSetMonitor::get( name , servers ) ) {
}
-
- DBClientReplicaSet::~DBClientReplicaSet(){
+
+ DBClientReplicaSet::~DBClientReplicaSet() {
}
DBClientConnection * DBClientReplicaSet::checkMaster() {
- if ( _master ){
+ if ( _master ) {
// a master is selected. let's just make sure connection didn't die
if ( ! _master->isFailed() )
return _master.get();
@@ -364,7 +364,7 @@ namespace mongo {
}
HostAndPort h = _monitor->getMaster();
- if ( h != _masterHost ){
+ if ( h != _masterHost ) {
_masterHost = h;
_master.reset( new DBClientConnection( true ) );
_master->connect( _masterHost );
@@ -374,14 +374,14 @@ namespace mongo {
}
DBClientConnection * DBClientReplicaSet::checkSlave() {
- if ( _slave ){
+ if ( _slave ) {
if ( ! _slave->isFailed() )
return _slave.get();
_monitor->notifySlaveFailure( _slaveHost );
}
HostAndPort h = _monitor->getSlave();
- if ( h != _slaveHost ){
+ if ( h != _slaveHost ) {
_slaveHost = h;
_slave.reset( new DBClientConnection( true ) );
_slave->connect( _slaveHost );
@@ -391,8 +391,8 @@ namespace mongo {
}
- void DBClientReplicaSet::_auth( DBClientConnection * conn ){
- for ( list<AuthInfo>::iterator i=_auths.begin(); i!=_auths.end(); ++i ){
+ void DBClientReplicaSet::_auth( DBClientConnection * conn ) {
+ for ( list<AuthInfo>::iterator i=_auths.begin(); i!=_auths.end(); ++i ) {
const AuthInfo& a = *i;
string errmsg;
if ( ! conn->auth( a.dbname , a.username , a.pwd , errmsg, a.digestPassword ) )
@@ -402,15 +402,15 @@ namespace mongo {
}
- DBClientConnection& DBClientReplicaSet::masterConn(){
+ DBClientConnection& DBClientReplicaSet::masterConn() {
return *checkMaster();
}
- DBClientConnection& DBClientReplicaSet::slaveConn(){
+ DBClientConnection& DBClientReplicaSet::slaveConn() {
return *checkSlave();
}
- bool DBClientReplicaSet::connect(){
+ bool DBClientReplicaSet::connect() {
try {
checkMaster();
}
@@ -420,17 +420,17 @@ namespace mongo {
return true;
}
- bool DBClientReplicaSet::auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword ) {
- DBClientConnection * m = checkMaster();
+ bool DBClientReplicaSet::auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword ) {
+ DBClientConnection * m = checkMaster();
// first make sure it actually works
- if( ! m->auth(dbname, username, pwd, errmsg, digestPassword ) )
- return false;
-
+ if( ! m->auth(dbname, username, pwd, errmsg, digestPassword ) )
+ return false;
+
// now that it does, we should save so that for a new node we can auth
_auths.push_back( AuthInfo( dbname , username , pwd , digestPassword ) );
- return true;
- }
+ return true;
+ }
// ------------- simple functions -----------------
@@ -451,59 +451,59 @@ namespace mongo {
}
auto_ptr<DBClientCursor> DBClientReplicaSet::query(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions, int batchSize){
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize) {
- if ( queryOptions & QueryOption_SlaveOk ){
+ if ( queryOptions & QueryOption_SlaveOk ) {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ){
+ for ( int i=0; i<2; i++ ) {
try {
return checkSlave()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize);
}
- catch ( DBException & ){
+ catch ( DBException & ) {
LOG(1) << "can't query replica set slave: " << _slaveHost << endl;
}
}
}
-
+
return checkMaster()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize);
}
BSONObj DBClientReplicaSet::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
- if ( queryOptions & QueryOption_SlaveOk ){
+ if ( queryOptions & QueryOption_SlaveOk ) {
// we're ok sending to a slave
// we'll try 2 slaves before just using master
// checkSlave will try a different slave automatically after a failure
- for ( int i=0; i<2; i++ ){
+ for ( int i=0; i<2; i++ ) {
try {
return checkSlave()->findOne(ns,query,fieldsToReturn,queryOptions);
}
- catch ( DBException & ){
+ catch ( DBException & ) {
LOG(1) << "can't query replica set slave: " << _slaveHost << endl;
}
}
}
-
+
return checkMaster()->findOne(ns,query,fieldsToReturn,queryOptions);
}
- void DBClientReplicaSet::killCursor( long long cursorID ){
+ void DBClientReplicaSet::killCursor( long long cursorID ) {
checkMaster()->killCursor( cursorID );
- }
+ }
- bool DBClientReplicaSet::call( Message &toSend, Message &response, bool assertOk ) {
- if ( toSend.operation() == dbQuery ){
+ bool DBClientReplicaSet::call( Message &toSend, Message &response, bool assertOk ) {
+ if ( toSend.operation() == dbQuery ) {
// TODO: might be possible to do this faster by changing api
DbMessage dm( toSend );
QueryMessage qm( dm );
- if ( qm.queryOptions & QueryOption_SlaveOk ){
- for ( int i=0; i<2; i++ ){
+ if ( qm.queryOptions & QueryOption_SlaveOk ) {
+ for ( int i=0; i<2; i++ ) {
try {
return checkSlave()->call( toSend , response , assertOk );
}
- catch ( DBException & ){
+ catch ( DBException & ) {
log(1) << "can't query replica set slave: " << _slaveHost << endl;
}
}
diff --git a/client/dbclient_rs.h b/client/dbclient_rs.h
index bbc06a6829d..62b008edfac 100644
--- a/client/dbclient_rs.h
+++ b/client/dbclient_rs.h
@@ -34,7 +34,7 @@ namespace mongo {
*/
class ReplicaSetMonitor {
public:
-
+
typedef boost::function1<void,const ReplicaSetMonitor*> ConfigChangeHook;
/**
@@ -57,7 +57,7 @@ namespace mongo {
static void setConfigChangeHook( ConfigChangeHook hook );
~ReplicaSetMonitor();
-
+
/** @return HostAndPort or throws an exception */
HostAndPort getMaster();
@@ -72,7 +72,7 @@ namespace mongo {
/**
* notify the monitor that server has faild
*/
- void notifySlaveFailure( const HostAndPort& server );
+ void notifySlaveFailure( const HostAndPort& server );
/**
* checks for current master and new secondaries
@@ -82,18 +82,18 @@ namespace mongo {
string getName() const { return _name; }
string getServerAddress() const;
-
+
private:
/**
* This populates a list of hosts from the list of seeds (discarding the
- * seed list).
+ * seed list).
* @param name set name
* @param servers seeds
*/
ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
void _check();
-
+
/**
* Use replSetGetStatus command to make sure hosts in host list are up
* and readable. Sets Node::ok appropriately.
@@ -107,7 +107,7 @@ namespace mongo {
* @param changed if new hosts were added
*/
void _checkHosts(const BSONObj& hostList, bool& changed);
-
+
/**
* Updates host list.
* @param c the connection to check
@@ -124,14 +124,14 @@ namespace mongo {
string _name;
struct Node {
- Node( const HostAndPort& a , DBClientConnection* c ) : addr( a ) , conn(c) , ok(true){}
+ Node( const HostAndPort& a , DBClientConnection* c ) : addr( a ) , conn(c) , ok(true) {}
HostAndPort addr;
DBClientConnection* conn;
// if this node is in a failure state
// used for slave routing
// this is too simple, should make it better
- bool ok;
+ bool ok;
};
/**
@@ -141,20 +141,20 @@ namespace mongo {
int _master; // which node is the current master. -1 means no master is known
-
+
static mongo::mutex _setsLock; // protects _sets
static map<string,ReplicaSetMonitorPtr> _sets; // set name to Monitor
-
+
static ConfigChangeHook _hook;
};
/** Use this class to connect to a replica set of servers. The class will manage
checking for which server in a replica set is master, and do failover automatically.
-
+
This can also be used to connect to replica pairs since pairs are a subset of sets
-
- On a failover situation, expect at least one operation to return an error (throw
- an exception) before the failover is complete. Operations are not retried.
+
+ On a failover situation, expect at least one operation to return an error (throw
+ an exception) before the failover is complete. Operations are not retried.
*/
class DBClientReplicaSet : public DBClientBase {
@@ -173,33 +173,33 @@ namespace mongo {
/** Authorize. Authorizes all nodes as needed
*/
virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true );
-
+
// ----------- simple functions --------------
/** throws userassertion "no master found" */
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
-
+
/** throws userassertion "no master found" */
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
virtual void insert( const string &ns , BSONObj obj );
- /** insert multiple objects. Note that single object insert is asynchronous, so this version
+ /** insert multiple objects. Note that single object insert is asynchronous, so this version
is only nominally faster and not worth a special effort to try to use. */
virtual void insert( const string &ns, const vector< BSONObj >& v );
virtual void remove( const string &ns , Query obj , bool justOne = 0 );
-
+
virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 );
-
+
virtual void killCursor( long long cursorID );
-
+
// ---- access raw connections ----
-
+
DBClientConnection& masterConn();
DBClientConnection& slaveConn();
-
+
// ---- callback pieces -------
virtual void checkResponse( const char *data, int nReturned ) { checkMaster()->checkResponse( data , nReturned ); }
@@ -217,41 +217,41 @@ namespace mongo {
string toString() { return getServerAddress(); }
string getServerAddress() const { return _monitor->getServerAddress(); }
-
- virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
-
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+
// ---- low level ------
virtual bool call( Message &toSend, Message &response, bool assertOk=true );
virtual void say( Message &toSend ) { checkMaster()->say( toSend ); }
- virtual bool callRead( Message& toSend , Message& response ){ return checkMaster()->callRead( toSend , response ); }
+ virtual bool callRead( Message& toSend , Message& response ) { return checkMaster()->callRead( toSend , response ); }
- protected:
+ protected:
virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); }
private:
DBClientConnection * checkMaster();
DBClientConnection * checkSlave();
-
+
void _auth( DBClientConnection * conn );
ReplicaSetMonitorPtr _monitor;
- HostAndPort _masterHost;
+ HostAndPort _masterHost;
scoped_ptr<DBClientConnection> _master;
HostAndPort _slaveHost;
scoped_ptr<DBClientConnection> _slave;
-
+
/**
* for storing authentication info
* fields are exactly for DBClientConnection::auth
*/
struct AuthInfo {
- AuthInfo( string d , string u , string p , bool di )
- : dbname( d ) , username( u ) , pwd( p ) , digestPassword( di ){}
+ AuthInfo( string d , string u , string p , bool di )
+ : dbname( d ) , username( u ) , pwd( p ) , digestPassword( di ) {}
string dbname;
string username;
string pwd;
@@ -262,8 +262,8 @@ namespace mongo {
// we can re-auth
// this could be a security issue, as the password is stored in memory
// not sure if/how we should handle
- list<AuthInfo> _auths;
+ list<AuthInfo> _auths;
};
-
+
}
diff --git a/client/dbclientcursor.cpp b/client/dbclientcursor.cpp
index bb4aa5320d3..d3928dcdcf9 100644
--- a/client/dbclientcursor.cpp
+++ b/client/dbclientcursor.cpp
@@ -26,14 +26,14 @@ namespace mongo {
void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend );
- int DBClientCursor::nextBatchSize(){
+ int DBClientCursor::nextBatchSize() {
if ( nToReturn == 0 )
return batchSize;
if ( batchSize == 0 )
return nToReturn;
-
+
return batchSize < nToReturn ? batchSize : nToReturn;
}
@@ -41,7 +41,8 @@ namespace mongo {
Message toSend;
if ( !cursorId ) {
assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
- } else {
+ }
+ else {
BufBuilder b;
b.appendNum( opts );
b.appendStr( ns );
@@ -60,7 +61,7 @@ namespace mongo {
void DBClientCursor::requestMore() {
assert( cursorId && pos == nReturned );
- if (haveLimit){
+ if (haveLimit) {
nToReturn -= nReturned;
assert(nToReturn > 0);
}
@@ -69,12 +70,12 @@ namespace mongo {
b.appendStr(ns);
b.appendNum(nextBatchSize());
b.appendNum(cursorId);
-
+
Message toSend;
toSend.setData(dbGetMore, b.buf(), b.len());
auto_ptr<Message> response(new Message());
-
- if ( connector ){
+
+ if ( connector ) {
connector->call( toSend, *response );
m = response;
dataReceived();
@@ -105,7 +106,7 @@ namespace mongo {
void DBClientCursor::dataReceived() {
QueryResult *qr = (QueryResult *) m->singleData();
resultFlags = qr->resultFlags();
-
+
if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
// cursor id no longer valid at the server.
assert( qr->cursorId == 0 );
@@ -113,7 +114,7 @@ namespace mongo {
if ( ! ( opts & QueryOption_CursorTailable ) )
throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
}
-
+
if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
// only set initially: we don't want to kill it on end of data
// if it's a tailable cursor
@@ -136,7 +137,7 @@ namespace mongo {
if ( !_putBack.empty() )
return true;
-
+
if (haveLimit && pos >= nToReturn)
return false;
@@ -171,7 +172,7 @@ namespace mongo {
int m = atMost;
/*
- for( stack<BSONObj>::iterator i = _putBack.begin(); i != _putBack.end(); i++ ) {
+ for( stack<BSONObj>::iterator i = _putBack.begin(); i != _putBack.end(); i++ ) {
if( m == 0 )
return;
v.push_back(*i);
@@ -191,7 +192,7 @@ namespace mongo {
}
}
- void DBClientCursor::attach( AScopedConnection * conn ){
+ void DBClientCursor::attach( AScopedConnection * conn ) {
assert( _scopedHost.size() == 0 );
_scopedHost = conn->getHost();
conn->done();
@@ -204,28 +205,28 @@ namespace mongo {
DESTRUCTOR_GUARD (
- if ( cursorId && _ownCursor ) {
- BufBuilder b;
- b.appendNum( (int)0 ); // reserved
- b.appendNum( (int)1 ); // number
- b.appendNum( cursorId );
-
- Message m;
- m.setData( dbKillCursors , b.buf() , b.len() );
-
- if ( connector ){
- connector->sayPiggyBack( m );
- }
- else {
- assert( _scopedHost.size() );
- ScopedDbConnection conn( _scopedHost );
- conn->sayPiggyBack( m );
- conn.done();
- }
+ if ( cursorId && _ownCursor ) {
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)1 ); // number
+ b.appendNum( cursorId );
+
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ if ( connector ) {
+ connector->sayPiggyBack( m );
}
+ else {
+ assert( _scopedHost.size() );
+ ScopedDbConnection conn( _scopedHost );
+ conn->sayPiggyBack( m );
+ conn.done();
+ }
+ }
);
}
-
+
} // namespace mongo
diff --git a/client/dbclientcursor.h b/client/dbclientcursor.h
index 2cdf3683db5..c86814d7e6d 100644
--- a/client/dbclientcursor.h
+++ b/client/dbclientcursor.h
@@ -1,4 +1,4 @@
-// file dbclientcursor.h
+// file dbclientcursor.h
/* Copyright 2009 10gen Inc.
*
@@ -24,9 +24,9 @@
#include <stack>
namespace mongo {
-
+
class AScopedConnection;
-
+
/** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here */
class DBClientCursorInterface {
public:
@@ -41,38 +41,38 @@ namespace mongo {
DBClientCursorInterface() {}
};
- /** Queries return a cursor object */
+ /** Queries return a cursor object */
class DBClientCursor : public DBClientCursorInterface {
public:
- /** If true, safe to call next(). Requests more from server if necessary. */
+ /** If true, safe to call next(). Requests more from server if necessary. */
bool more();
- /** If true, there is more in our local buffers to be fetched via next(). Returns
- false when a getMore request back to server would be required. You can use this
- if you want to exhaust whatever data has been fetched to the client already but
+ /** If true, there is more in our local buffers to be fetched via next(). Returns
+ false when a getMore request back to server would be required. You can use this
+ if you want to exhaust whatever data has been fetched to the client already but
then perhaps stop.
*/
int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + nReturned - pos; }
bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
/** next
- @return next object in the result cursor.
+ @return next object in the result cursor.
on an error at the remote server, you will get back:
{ $err: <string> }
if you do not want to handle that yourself, call nextSafe().
*/
BSONObj next();
-
- /**
+
+ /**
restore an object previously returned by next() to the cursor
*/
void putBack( const BSONObj &o ) { _putBack.push( o.getOwned() ); }
- /** throws AssertionException if get back { $err : ... } */
+ /** throws AssertionException if get back { $err : ... } */
BSONObj nextSafe() {
BSONObj o = next();
BSONElement e = o.firstElement();
- if( strcmp(e.fieldName(), "$err") == 0 ) {
+ if( strcmp(e.fieldName(), "$err") == 0 ) {
if( logLevel >= 5 )
log() << "nextSafe() error " << o.toString() << endl;
uassert(13106, "nextSafe(): " + o.toString(), false);
@@ -81,7 +81,7 @@ namespace mongo {
}
/** peek ahead at items buffered for future next() calls.
- never requests new data from the server. so peek only effective
+ never requests new data from the server. so peek only effective
with what is already buffered.
WARNING: no support for _putBack yet!
*/
@@ -90,9 +90,9 @@ namespace mongo {
/**
iterate the rest of the cursor and return the number if items
*/
- int itcount(){
+ int itcount() {
int c = 0;
- while ( more() ){
+ while ( more() ) {
next();
c++;
}
@@ -111,48 +111,48 @@ namespace mongo {
bool tailable() const {
return (opts & QueryOption_CursorTailable) != 0;
}
-
- /** see ResultFlagType (constants.h) for flag values
- mostly these flags are for internal purposes -
+
+ /** see ResultFlagType (constants.h) for flag values
+ mostly these flags are for internal purposes -
ResultFlag_ErrSet is the possible exception to that
*/
- bool hasResultFlag( int flag ){
+ bool hasResultFlag( int flag ) {
_assertIfNull();
return (resultFlags & flag) != 0;
}
DBClientCursor( DBConnector *_connector, const string &_ns, BSONObj _query, int _nToReturn,
int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions , int bs ) :
- connector(_connector),
- ns(_ns),
- query(_query),
- nToReturn(_nToReturn),
- haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
- nToSkip(_nToSkip),
- fieldsToReturn(_fieldsToReturn),
- opts(queryOptions),
- batchSize(bs==1?2:bs),
- m(new Message()),
- cursorId(),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ){
+ connector(_connector),
+ ns(_ns),
+ query(_query),
+ nToReturn(_nToReturn),
+ haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
+ nToSkip(_nToSkip),
+ fieldsToReturn(_fieldsToReturn),
+ opts(queryOptions),
+ batchSize(bs==1?2:bs),
+ m(new Message()),
+ cursorId(),
+ nReturned(),
+ pos(),
+ data(),
+ _ownCursor( true ) {
}
-
+
DBClientCursor( DBConnector *_connector, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
- connector(_connector),
- ns(_ns),
- nToReturn( _nToReturn ),
- haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
- opts( options ),
- m(new Message()),
- cursorId( _cursorId ),
- nReturned(),
- pos(),
- data(),
- _ownCursor( true ){
- }
+ connector(_connector),
+ ns(_ns),
+ nToReturn( _nToReturn ),
+ haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
+ opts( options ),
+ m(new Message()),
+ cursorId( _cursorId ),
+ nReturned(),
+ pos(),
+ data(),
+ _ownCursor( true ) {
+ }
virtual ~DBClientCursor();
@@ -162,13 +162,13 @@ namespace mongo {
message when ~DBClientCursor() is called. This function overrides that.
*/
void decouple() { _ownCursor = false; }
-
+
void attach( AScopedConnection * conn );
-
+
private:
friend class DBClientBase;
friend class DBClientConnection;
- bool init();
+ bool init();
int nextBatchSize();
DBConnector *connector;
string ns;
@@ -199,7 +199,7 @@ namespace mongo {
DBClientCursor( const DBClientCursor& );
DBClientCursor& operator=( const DBClientCursor& );
};
-
+
/** iterate over objects in current batch only - will not cause a network call
*/
class DBClientCursorBatchIterator {
diff --git a/client/dbclientmockcursor.h b/client/dbclientmockcursor.h
index 0f6dba96458..8d85ff5ad2e 100644
--- a/client/dbclientmockcursor.h
+++ b/client/dbclientmockcursor.h
@@ -20,7 +20,7 @@
#include "dbclientcursor.h"
namespace mongo {
-
+
class DBClientMockCursor : public DBClientCursorInterface {
public:
DBClientMockCursor( const BSONArray& mockCollection ) : _iter( mockCollection ) {}
diff --git a/client/distlock.cpp b/client/distlock.cpp
index b88c89fd5f0..0db55c96cb0 100644
--- a/client/distlock.cpp
+++ b/client/distlock.cpp
@@ -42,15 +42,15 @@ namespace mongo {
/* =================== */
- string getDistLockProcess(){
+ string getDistLockProcess() {
boost::call_once( initModule, _init );
assert( _cachedProcessString );
return *_cachedProcessString;
}
- string getDistLockId(){
+ string getDistLockId() {
string s = distLockIds.get();
- if ( s.empty() ){
+ if ( s.empty() ) {
stringstream ss;
ss << getDistLockProcess() << ":" << getThreadName() << ":" << rand();
s = ss.str();
@@ -58,27 +58,27 @@ namespace mongo {
}
return s;
}
-
- void distLockPingThread( ConnectionString addr ){
+
+ void distLockPingThread( ConnectionString addr ) {
setThreadName( "LockPinger" );
static int loops = 0;
- while( ! inShutdown() ){
+ while( ! inShutdown() ) {
string process = getDistLockProcess();
log(4) << "dist_lock about to ping for: " << process << endl;
try {
ScopedDbConnection conn( addr );
-
+
// refresh the entry corresponding to this process in the lockpings collection
- conn->update( lockPingNS ,
- BSON( "_id" << process ) ,
+ conn->update( lockPingNS ,
+ BSON( "_id" << process ) ,
BSON( "$set" << BSON( "ping" << DATENOW ) ) ,
true );
string err = conn->getLastError();
- if ( ! err.empty() ){
- log( LL_WARNING ) << "dist_lock process: " << process << " pinging: " << addr << " failed: "
+ if ( ! err.empty() ) {
+ log( LL_WARNING ) << "dist_lock process: " << process << " pinging: " << addr << " failed: "
<< err << endl;
conn.done();
sleepsecs(30);
@@ -91,32 +91,32 @@ namespace mongo {
// if the lock is taken, the take-over mechanism should handle the situation
auto_ptr<DBClientCursor> c = conn->query( locksNS , BSONObj() );
vector<string> pids;
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj lock = c->next();
if ( ! lock["process"].eoo() ) {
pids.push_back( lock["process"].valuestrsafe() );
}
- }
+ }
Date_t fourDays = jsTime() - ( 4 * 86400 * 1000 ); // 4 days
conn->remove( lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
err = conn->getLastError();
- if ( ! err.empty() ){
- log ( LL_WARNING ) << "dist_lock cleanup request from process: " << process << " to: " << addr
+ if ( ! err.empty() ) {
+ log ( LL_WARNING ) << "dist_lock cleanup request from process: " << process << " to: " << addr
<< " failed: " << err << endl;
conn.done();
sleepsecs(30);
continue;
}
-
+
// create index so remove is fast even with a lot of servers
- if ( loops++ == 0 ){
+ if ( loops++ == 0 ) {
conn->ensureIndex( lockPingNS , BSON( "ping" << 1 ) );
}
-
+
conn.done();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log( LL_WARNING ) << "dist_lock exception during ping: " << e.what() << endl;
}
@@ -124,14 +124,14 @@ namespace mongo {
sleepsecs(30);
}
}
-
+
class DistributedLockPinger {
public:
DistributedLockPinger()
- : _mutex( "DistributedLockPinger" ){
+ : _mutex( "DistributedLockPinger" ) {
}
-
- void got( const ConnectionString& conn ){
+
+ void got( const ConnectionString& conn ) {
string s = conn.toString();
scoped_lock lk( _mutex );
if ( _seen.count( s ) > 0 )
@@ -139,52 +139,53 @@ namespace mongo {
boost::thread t( boost::bind( &distLockPingThread , conn ) );
_seen.insert( s );
}
-
+
set<string> _seen;
mongo::mutex _mutex;
-
+
} distLockPinger;
-
+
DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes )
- : _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes){
+ : _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes) {
_id = BSON( "_id" << name );
_ns = "config.locks";
distLockPinger.got( conn );
}
-
- bool DistributedLock::lock_try( string why , BSONObj * other ){
+
+ bool DistributedLock::lock_try( string why , BSONObj * other ) {
// write to dummy if 'other' is null
- BSONObj dummyOther;
+ BSONObj dummyOther;
if ( other == NULL )
other = &dummyOther;
ScopedDbConnection conn( _conn );
-
+
BSONObjBuilder queryBuilder;
queryBuilder.appendElements( _id );
- queryBuilder.append( "state" , 0 );
+ queryBuilder.append( "state" , 0 );
- { // make sure its there so we can use simple update logic below
+ {
+ // make sure its there so we can use simple update logic below
BSONObj o = conn->findOne( _ns , _id );
- if ( o.isEmpty() ){
+ if ( o.isEmpty() ) {
try {
log(4) << "dist_lock inserting initial doc in " << _ns << " for lock " << _name << endl;
conn->insert( _ns , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
- catch ( UserException& e ){
+ catch ( UserException& e ) {
log() << "dist_lock could not insert initial doc: " << e << endl;
}
}
- else if ( o["state"].numberInt() > 0 ){
+ else if ( o["state"].numberInt() > 0 ) {
BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
- if ( lastPing.isEmpty() ){
+ if ( lastPing.isEmpty() ) {
// if a lock is taken but there's no ping for it, we're in an inconsistent situation
// if the lock holder (mongos or d) does not exist anymore, the lock could safely be removed
// but we'd require analysis of the situation before a manual intervention
log(LL_ERROR) << "config.locks: " << _name << " lock is taken by old process? "
- << "remove the following lock if the process is not active anymore: " << o << endl;
+ << "remove the following lock if the process is not active anymore: " << o << endl;
*other = o;
other->getOwned();
conn.done();
@@ -194,18 +195,18 @@ namespace mongo {
unsigned long long elapsed = jsTime() - lastPing["ping"].Date(); // in ms
elapsed = elapsed / ( 1000 * 60 ); // convert to minutes
- if ( elapsed <= _takeoverMinutes ){
+ if ( elapsed <= _takeoverMinutes ) {
log(1) << "dist_lock lock failed because taken by: " << o << " elapsed minutes: " << elapsed << endl;
*other = o;
other->getOwned();
conn.done();
return false;
}
-
+
log() << "dist_lock forcefully taking over from: " << o << " elapsed minutes: " << elapsed << endl;
conn->update( _ns , _id , BSON( "$set" << BSON( "state" << 0 ) ) );
string err = conn->getLastError();
- if ( ! err.empty() ){
+ if ( ! err.empty() ) {
log( LL_WARNING ) << "dist_lock take over from: " << o << " failed: " << err << endl;
*other = o;
other->getOwned();
@@ -214,11 +215,11 @@ namespace mongo {
}
}
- else if ( o["ts"].type() ){
+ else if ( o["ts"].type() ) {
queryBuilder.append( o["ts"] );
}
}
-
+
OID ts;
ts.init();
@@ -232,11 +233,11 @@ namespace mongo {
log(4) << "dist_lock about to aquire lock: " << lockDetails << endl;
conn->update( _ns , queryBuilder.obj() , whatIWant );
-
+
BSONObj o = conn->getLastErrorDetailed();
now = conn->findOne( _ns , _id );
-
- if ( o["n"].numberInt() == 0 ){
+
+ if ( o["n"].numberInt() == 0 ) {
*other = now;
other->getOwned();
log() << "dist_lock error trying to aquire lock: " << lockDetails << " error: " << o << endl;
@@ -247,22 +248,22 @@ namespace mongo {
}
}
- catch ( UpdateNotTheSame& up ){
+ catch ( UpdateNotTheSame& up ) {
// this means our update got through on some, but not others
log(4) << "dist_lock lock did not propagate properly" << endl;
- for ( unsigned i=0; i<up.size(); i++ ){
+ for ( unsigned i=0; i<up.size(); i++ ) {
ScopedDbConnection temp( up[i].first );
BSONObj temp2 = temp->findOne( _ns , _id );
- if ( now.isEmpty() || now["ts"] < temp2["ts"] ){
+ if ( now.isEmpty() || now["ts"] < temp2["ts"] ) {
now = temp2.getOwned();
}
temp.done();
}
- if ( now["ts"].OID() == ts ){
+ if ( now["ts"].OID() == ts ) {
log(4) << "dist_lock completed lock propagation" << endl;
gotLock = true;
conn->update( _ns , _id , whatIWant );
@@ -272,15 +273,15 @@ namespace mongo {
gotLock = false;
}
}
-
+
conn.done();
-
+
log(2) << "dist_lock lock gotLock: " << gotLock << " now: " << now << endl;
return gotLock;
}
- void DistributedLock::unlock(){
+ void DistributedLock::unlock() {
const int maxAttempts = 3;
int attempted = 0;
while ( ++attempted <= maxAttempts ) {
@@ -293,17 +294,18 @@ namespace mongo {
return;
-
- } catch ( std::exception& e) {
- log( LL_WARNING ) << "dist_lock " << _name << " failed to contact config server in unlock attempt "
+
+ }
+ catch ( std::exception& e) {
+ log( LL_WARNING ) << "dist_lock " << _name << " failed to contact config server in unlock attempt "
<< attempted << ": " << e.what() << endl;
sleepsecs(1 << attempted);
}
}
- log( LL_WARNING ) << "dist_lock couldn't consumate unlock request. " << "Lock " << _name
- << " will be taken over after " << _takeoverMinutes << " minutes timeout" << endl;
+ log( LL_WARNING ) << "dist_lock couldn't consumate unlock request. " << "Lock " << _name
+ << " will be taken over after " << _takeoverMinutes << " minutes timeout" << endl;
}
}
diff --git a/client/distlock.h b/client/distlock.h
index 5a933a8c2a0..491de4812bf 100644
--- a/client/distlock.h
+++ b/client/distlock.h
@@ -28,16 +28,16 @@ namespace mongo {
/**
* The distributed lock is a configdb backed way of synchronizing system-wide tasks. A task must be identified by a
* unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
- * collection with that name.
+ * collection with that name.
*
- * To be maintained, each taken lock needs to be revalidaded ("pinged") within a pre-established amount of time. This
+ * To be maintained, each taken lock needs to be revalidaded ("pinged") within a pre-established amount of time. This
* class does this maintenance automatically once a DistributedLock object was constructed.
*/
class DistributedLock {
public:
/**
- * The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
+ * The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
* Construction does trigger a lock "pinging" mechanism, though.
*
* @param conn address of config(s) server(s)
@@ -49,7 +49,7 @@ namespace mongo {
/**
* Attempts to aquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
* consider using the dist_lock_try construct to acquire this lock in an exception safe way.
- *
+ *
* @param why human readable description of why the lock is being taken (used to log)
* @param other configdb's lock document that is currently holding the lock, if lock is taken
* @return true if it managed to grab the lock
@@ -65,15 +65,15 @@ namespace mongo {
ConnectionString _conn;
string _name;
unsigned _takeoverMinutes;
-
+
string _ns;
BSONObj _id;
};
-
+
class dist_lock_try {
public:
dist_lock_try( DistributedLock * lock , string why )
- : _lock(lock){
+ : _lock(lock) {
_got = _lock->lock_try( why , &_other );
}
@@ -85,11 +85,11 @@ namespace mongo {
bool got() const { return _got; }
BSONObj other() const { return _other; }
-
+
private:
DistributedLock * _lock;
bool _got;
- BSONObj _other;
+ BSONObj _other;
};
}
diff --git a/client/distlock_test.cpp b/client/distlock_test.cpp
index 0879b6e2924..8cc28e37479 100644
--- a/client/distlock_test.cpp
+++ b/client/distlock_test.cpp
@@ -21,23 +21,23 @@
#include "../db/commands.h"
namespace mongo {
-
+
class TestDistLockWithSync : public Command {
public:
- TestDistLockWithSync() : Command( "_testDistLockWithSyncCluster" ){}
+ TestDistLockWithSync() : Command( "_testDistLockWithSyncCluster" ) {}
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
-
+
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
- static void runThread(){
- for ( int i=0; i<1000; i++ ){
- if ( current->lock_try( "test" ) ){
+ static void runThread() {
+ for ( int i=0; i<1000; i++ ) {
+ if ( current->lock_try( "test" ) ) {
gotit++;
- for ( int j=0; j<2000; j++ ){
+ for ( int j=0; j<2000; j++ ) {
count++;
}
current->unlock();
@@ -45,17 +45,17 @@ namespace mongo {
}
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
DistributedLock lk( ConnectionString( cmdObj["host"].String() , ConnectionString::SYNC ), "testdistlockwithsync" );
current = &lk;
count = 0;
gotit = 0;
-
+
vector<shared_ptr<boost::thread> > l;
- for ( int i=0; i<4; i++ ){
+ for ( int i=0; i<4; i++ ) {
l.push_back( shared_ptr<boost::thread>( new boost::thread( runThread ) ) );
}
-
+
for ( unsigned i=0; i<l.size(); i++ )
l[i]->join();
@@ -64,7 +64,7 @@ namespace mongo {
current = 0;
return count == gotit * 2000;
}
-
+
static DistributedLock * current;
static int count;
static int gotit;
diff --git a/client/examples/authTest.cpp b/client/examples/authTest.cpp
index 9243bbf9ef8..71cdd390cff 100644
--- a/client/examples/authTest.cpp
+++ b/client/examples/authTest.cpp
@@ -22,7 +22,7 @@
using namespace mongo;
int main( int argc, const char **argv ) {
-
+
const char *port = "27017";
if ( argc != 1 ) {
if ( argc != 3 )
@@ -37,12 +37,13 @@ int main( int argc, const char **argv ) {
throw -11;
}
- { // clean up old data from any previous tests
+ {
+ // clean up old data from any previous tests
conn.remove( "test.system.users" , BSONObj() );
}
conn.insert( "test.system.users" , BSON( "user" << "eliot" << "pwd" << conn.createPasswordDigest( "eliot" , "bar" ) ) );
-
+
errmsg.clear();
bool ok = conn.auth( "test" , "eliot" , "bar" , errmsg );
if ( ! ok )
diff --git a/client/examples/clientTest.cpp b/client/examples/clientTest.cpp
index 64450db3db6..bd4432eb0cf 100644
--- a/client/examples/clientTest.cpp
+++ b/client/examples/clientTest.cpp
@@ -130,12 +130,14 @@ int main( int argc, const char **argv ) {
}
- { // ensure index
+ {
+ // ensure index
assert( conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
assert( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
}
- { // hint related tests
+ {
+ // hint related tests
assert( conn.findOne(ns, "{}")["name"].str() == "sara" );
assert( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" );
@@ -146,7 +148,7 @@ int main( int argc, const char **argv ) {
try {
conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}"));
}
- catch ( ... ){
+ catch ( ... ) {
asserted = true;
}
assert( asserted );
@@ -158,7 +160,8 @@ int main( int argc, const char **argv ) {
assert( conn.validate( ns ) );
}
- { // timestamp test
+ {
+ // timestamp test
const char * tsns = "test.tstest1";
conn.dropCollection( tsns );
@@ -190,32 +193,33 @@ int main( int argc, const char **argv ) {
( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) );
}
-
- { // check that killcursors doesn't affect last error
+
+ {
+ // check that killcursors doesn't affect last error
assert( conn.getLastError().empty() );
-
+
BufBuilder b;
b.appendNum( (int)0 ); // reserved
b.appendNum( (int)-1 ); // invalid # of cursors triggers exception
b.appendNum( (int)-1 ); // bogus cursor id
-
+
Message m;
m.setData( dbKillCursors, b.buf(), b.len() );
-
+
// say() is protected in DBClientConnection, so get superclass
static_cast< DBConnector* >( &conn )->say( m );
-
+
assert( conn.getLastError().empty() );
}
{
list<string> l = conn.getDatabaseNames();
- for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ){
+ for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
cout << "db name : " << *i << endl;
}
l = conn.getCollectionNames( "test" );
- for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ){
+ for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
cout << "coll name : " << *i << endl;
}
}
diff --git a/client/examples/first.cpp b/client/examples/first.cpp
index f3b654fe686..ab5efb325f5 100644
--- a/client/examples/first.cpp
+++ b/client/examples/first.cpp
@@ -40,7 +40,7 @@ int main( int argc, const char **argv ) {
throw -12;
port = argv[ 2 ];
}
-
+
mongo::DBClientConnection conn;
string errmsg;
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
@@ -48,14 +48,15 @@ int main( int argc, const char **argv ) {
throw -11;
}
- { // clean up old data from any previous tests
+ {
+ // clean up old data from any previous tests
mongo::BSONObjBuilder query;
conn.remove( "test.people" , query.obj() );
}
insert( conn , "eliot" , 15 );
insert( conn , "sara" , 23 );
-
+
{
mongo::BSONObjBuilder query;
auto_ptr<mongo::DBClientCursor> cursor = conn.query( "test.people" , query.obj() );
@@ -66,14 +67,14 @@ int main( int argc, const char **argv ) {
}
}
-
+
{
mongo::BSONObjBuilder query;
query.append( "name" , "eliot" );
mongo::BSONObj res = conn.findOne( "test.people" , query.obj() );
cout << res.isEmpty() << "\t" << res.jsonString() << endl;
}
-
+
{
mongo::BSONObjBuilder query;
query.append( "name" , "asd" );
diff --git a/client/examples/httpClientTest.cpp b/client/examples/httpClientTest.cpp
index 89d5bec603a..4fa5fd8069f 100644
--- a/client/examples/httpClientTest.cpp
+++ b/client/examples/httpClientTest.cpp
@@ -23,7 +23,7 @@
using namespace mongo;
int main( int argc, const char **argv ) {
-
+
int port = 27017;
if ( argc != 1 ) {
if ( argc != 3 )
@@ -31,11 +31,11 @@ int main( int argc, const char **argv ) {
port = atoi( argv[ 2 ] );
}
port += 1000;
-
+
stringstream ss;
ss << "http://localhost:" << port << "/";
string url = ss.str();
-
+
cout << "[" << url << "]" << endl;
HttpClient c;
diff --git a/client/examples/rs.cpp b/client/examples/rs.cpp
index 39cdac1cc5f..7813ec633ac 100644
--- a/client/examples/rs.cpp
+++ b/client/examples/rs.cpp
@@ -25,16 +25,16 @@
using namespace mongo;
using namespace std;
-int main( int argc , const char ** argv ){
+int main( int argc , const char ** argv ) {
string errmsg;
- ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
- if ( ! cs.isValid() ){
+ ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
+ if ( ! cs.isValid() ) {
cout << "error parsing url: " << errmsg << endl;
return 1;
}
-
+
DBClientReplicaSet * conn = (DBClientReplicaSet*)cs.connect( errmsg );
- if ( ! conn ){
+ if ( ! conn ) {
cout << "error connecting: " << errmsg << endl;
return 2;
}
@@ -42,17 +42,17 @@ int main( int argc , const char ** argv ){
string collName = "test.rs1";
conn->dropCollection( collName );
- while ( true ){
+ while ( true ) {
try {
conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
cout << conn->findOne( collName , BSONObj() ) << endl;
cout << "\t A" << conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
cout << "\t B " << conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk ) << endl;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "ERROR: " << e.what() << endl;
}
sleepsecs( 1 );
}
-
+
}
diff --git a/client/examples/second.cpp b/client/examples/second.cpp
index 68eafaa91eb..6cc2111580f 100644
--- a/client/examples/second.cpp
+++ b/client/examples/second.cpp
@@ -23,7 +23,7 @@ using namespace std;
using namespace mongo;
int main( int argc, const char **argv ) {
-
+
const char *port = "27017";
if ( argc != 1 ) {
if ( argc != 3 )
diff --git a/client/examples/tail.cpp b/client/examples/tail.cpp
index 3738b4f1840..90e62d279c1 100644
--- a/client/examples/tail.cpp
+++ b/client/examples/tail.cpp
@@ -23,24 +23,24 @@
using namespace mongo;
void tail(DBClientBase& conn, const char *ns) {
- BSONElement lastId = minKey.firstElement();
- Query query = Query();
-
- auto_ptr<DBClientCursor> c =
- conn.query(ns, query, 0, 0, 0, QueryOption_CursorTailable);
-
- while( 1 ) {
- if( !c->more() ) {
- if( c->isDead() ) {
- break; // we need to requery
- }
-
- // all data (so far) exhausted, wait for more
- sleepsecs(1);
- continue;
- }
- BSONObj o = c->next();
- lastId = o["_id"];
- cout << o.toString() << endl;
- }
+ BSONElement lastId = minKey.firstElement();
+ Query query = Query();
+
+ auto_ptr<DBClientCursor> c =
+ conn.query(ns, query, 0, 0, 0, QueryOption_CursorTailable);
+
+ while( 1 ) {
+ if( !c->more() ) {
+ if( c->isDead() ) {
+ break; // we need to requery
+ }
+
+ // all data (so far) exhausted, wait for more
+ sleepsecs(1);
+ continue;
+ }
+ BSONObj o = c->next();
+ lastId = o["_id"];
+ cout << o.toString() << endl;
+ }
}
diff --git a/client/examples/tutorial.cpp b/client/examples/tutorial.cpp
index 28e1b273370..3cdf3593cd8 100644
--- a/client/examples/tutorial.cpp
+++ b/client/examples/tutorial.cpp
@@ -23,45 +23,45 @@
using namespace mongo;
void printIfAge(DBClientConnection& c, int age) {
- auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", QUERY( "age" << age ).sort("name") );
- while( cursor->more() ) {
- BSONObj p = cursor->next();
- cout << p.getStringField("name") << endl;
- }
+ auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", QUERY( "age" << age ).sort("name") );
+ while( cursor->more() ) {
+ BSONObj p = cursor->next();
+ cout << p.getStringField("name") << endl;
+ }
}
void run() {
- DBClientConnection c;
- c.connect("localhost"); //"192.168.58.1");
- cout << "connected ok" << endl;
- BSONObj p = BSON( "name" << "Joe" << "age" << 33 );
- c.insert("tutorial.persons", p);
- p = BSON( "name" << "Jane" << "age" << 40 );
- c.insert("tutorial.persons", p);
- p = BSON( "name" << "Abe" << "age" << 33 );
- c.insert("tutorial.persons", p);
- p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" );
- c.insert("tutorial.persons", p);
+ DBClientConnection c;
+ c.connect("localhost"); //"192.168.58.1");
+ cout << "connected ok" << endl;
+ BSONObj p = BSON( "name" << "Joe" << "age" << 33 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Jane" << "age" << 40 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Abe" << "age" << 33 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" );
+ c.insert("tutorial.persons", p);
- c.ensureIndex("tutorial.persons", fromjson("{age:1}"));
+ c.ensureIndex("tutorial.persons", fromjson("{age:1}"));
- cout << "count:" << c.count("tutorial.persons") << endl;
+ cout << "count:" << c.count("tutorial.persons") << endl;
- auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", BSONObj());
- while( cursor->more() ) {
- cout << cursor->next().toString() << endl;
- }
+ auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", BSONObj());
+ while( cursor->more() ) {
+ cout << cursor->next().toString() << endl;
+ }
- cout << "\nprintifage:\n";
- printIfAge(c, 33);
+ cout << "\nprintifage:\n";
+ printIfAge(c, 33);
}
-int main() {
- try {
- run();
- }
- catch( DBException &e ) {
- cout << "caught " << e.what() << endl;
- }
- return 0;
+int main() {
+ try {
+ run();
+ }
+ catch( DBException &e ) {
+ cout << "caught " << e.what() << endl;
+ }
+ return 0;
}
diff --git a/client/examples/whereExample.cpp b/client/examples/whereExample.cpp
index 69241b5f2d4..ce4174b1c71 100644
--- a/client/examples/whereExample.cpp
+++ b/client/examples/whereExample.cpp
@@ -23,7 +23,7 @@ using namespace std;
using namespace mongo;
int main( int argc, const char **argv ) {
-
+
const char *port = "27017";
if ( argc != 1 ) {
if ( argc != 3 )
@@ -36,7 +36,7 @@ int main( int argc, const char **argv ) {
if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
cout << "couldn't connect : " << errmsg << endl;
throw -11;
- }
+ }
const char * ns = "test.where";
@@ -44,9 +44,9 @@ int main( int argc, const char **argv ) {
conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) );
conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) );
-
+
auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
-
+
while ( cursor->more() ) {
BSONObj obj = cursor->next();
cout << "\t" << obj.jsonString() << endl;
diff --git a/client/gridfs.cpp b/client/gridfs.cpp
index d740c765d7f..233724ae8e5 100644
--- a/client/gridfs.cpp
+++ b/client/gridfs.cpp
@@ -34,11 +34,11 @@ namespace mongo {
const unsigned DEFAULT_CHUNK_SIZE = 256 * 1024;
- GridFSChunk::GridFSChunk( BSONObj o ){
+ GridFSChunk::GridFSChunk( BSONObj o ) {
_data = o;
}
- GridFSChunk::GridFSChunk( BSONObj fileObject , int chunkNumber , const char * data , int len ){
+ GridFSChunk::GridFSChunk( BSONObj fileObject , int chunkNumber , const char * data , int len ) {
BSONObjBuilder b;
b.appendAs( fileObject["_id"] , "files_id" );
b.append( "n" , chunkNumber );
@@ -47,7 +47,7 @@ namespace mongo {
}
- GridFS::GridFS( DBClientBase& client , const string& dbName , const string& prefix ) : _client( client ) , _dbName( dbName ) , _prefix( prefix ){
+ GridFS::GridFS( DBClientBase& client , const string& dbName , const string& prefix ) : _client( client ) , _dbName( dbName ) , _prefix( prefix ) {
_filesNS = dbName + "." + prefix + ".files";
_chunksNS = dbName + "." + prefix + ".chunks";
_chunkSize = DEFAULT_CHUNK_SIZE;
@@ -56,7 +56,7 @@ namespace mongo {
client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) );
}
- GridFS::~GridFS(){
+ GridFS::~GridFS() {
}
@@ -65,7 +65,7 @@ namespace mongo {
_chunkSize = size;
}
- BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){
+ BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType) {
char const * const end = data + length;
OID id;
@@ -73,7 +73,7 @@ namespace mongo {
BSONObj idObj = BSON("_id" << id);
int chunkNumber = 0;
- while (data < end){
+ while (data < end) {
int chunkLen = MIN(_chunkSize, (unsigned)(end-data));
GridFSChunk c(idObj, chunkNumber, data, chunkLen);
_client.insert( _chunksNS.c_str() , c._data );
@@ -86,7 +86,7 @@ namespace mongo {
}
- BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType){
+ BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType) {
uassert( 10012 , "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) );
FILE* fd;
@@ -102,12 +102,12 @@ namespace mongo {
int chunkNumber = 0;
gridfs_offset length = 0;
- while (!feof(fd)){
+ while (!feof(fd)) {
//boost::scoped_array<char>buf (new char[_chunkSize+1]);
char * buf = new char[_chunkSize+1];
char* bufPos = buf;//.get();
unsigned int chunkLen = 0; // how much in the chunk now
- while(chunkLen != _chunkSize && !feof(fd)){
+ while(chunkLen != _chunkSize && !feof(fd)) {
int readLen = fread(bufPos, 1, _chunkSize - chunkLen, fd);
chunkLen += readLen;
bufPos += readLen;
@@ -125,11 +125,11 @@ namespace mongo {
if (fd != stdin)
fclose( fd );
-
+
return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
}
- BSONObj GridFS::insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType){
+ BSONObj GridFS::insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType) {
BSONObj res;
if ( ! _client.runCommand( _dbName.c_str() , BSON( "filemd5" << id << "root" << _prefix ) , res ) )
@@ -143,9 +143,10 @@ namespace mongo {
<< "md5" << res["md5"]
;
- if (length < 1024*1024*1024){ // 2^30
+ if (length < 1024*1024*1024) { // 2^30
file << "length" << (int) length;
- }else{
+ }
+ else {
file << "length" << (long long) length;
}
@@ -158,9 +159,9 @@ namespace mongo {
return ret;
}
- void GridFS::removeFile( const string& fileName ){
+ void GridFS::removeFile( const string& fileName ) {
auto_ptr<DBClientCursor> files = _client.query( _filesNS , BSON( "filename" << fileName ) );
- while (files->more()){
+ while (files->more()) {
BSONObj file = files->next();
BSONElement id = file["_id"];
_client.remove( _filesNS.c_str() , BSON( "_id" << id ) );
@@ -168,38 +169,38 @@ namespace mongo {
}
}
- GridFile::GridFile( GridFS * grid , BSONObj obj ){
+ GridFile::GridFile( GridFS * grid , BSONObj obj ) {
_grid = grid;
_obj = obj;
}
- GridFile GridFS::findFile( const string& fileName ){
+ GridFile GridFS::findFile( const string& fileName ) {
return findFile( BSON( "filename" << fileName ) );
};
- GridFile GridFS::findFile( BSONObj query ){
+ GridFile GridFS::findFile( BSONObj query ) {
query = BSON("query" << query << "orderby" << BSON("uploadDate" << -1));
return GridFile( this , _client.findOne( _filesNS.c_str() , query ) );
}
- auto_ptr<DBClientCursor> GridFS::list(){
+ auto_ptr<DBClientCursor> GridFS::list() {
return _client.query( _filesNS.c_str() , BSONObj() );
}
- auto_ptr<DBClientCursor> GridFS::list( BSONObj o ){
+ auto_ptr<DBClientCursor> GridFS::list( BSONObj o ) {
return _client.query( _filesNS.c_str() , o );
}
- BSONObj GridFile::getMetadata(){
+ BSONObj GridFile::getMetadata() {
BSONElement meta_element = _obj["metadata"];
- if( meta_element.eoo() ){
+ if( meta_element.eoo() ) {
return BSONObj();
}
return meta_element.embeddedObject();
}
- GridFSChunk GridFile::getChunk( int n ){
+ GridFSChunk GridFile::getChunk( int n ) {
_exists();
BSONObjBuilder b;
b.appendAs( _obj["_id"] , "files_id" );
@@ -210,12 +211,12 @@ namespace mongo {
return GridFSChunk(o);
}
- gridfs_offset GridFile::write( ostream & out ){
+ gridfs_offset GridFile::write( ostream & out ) {
_exists();
const int num = getNumChunks();
- for ( int i=0; i<num; i++ ){
+ for ( int i=0; i<num; i++ ) {
GridFSChunk c = getChunk( i );
int len;
@@ -226,17 +227,18 @@ namespace mongo {
return getContentLength();
}
- gridfs_offset GridFile::write( const string& where ){
- if (where == "-"){
+ gridfs_offset GridFile::write( const string& where ) {
+ if (where == "-") {
return write( cout );
- } else {
+ }
+ else {
ofstream out(where.c_str() , ios::out | ios::binary );
uassert(13325, "couldn't open file: " + where, out.is_open() );
return write( out );
}
}
- void GridFile::_exists(){
+ void GridFile::_exists() {
uassert( 10015 , "doesn't exists" , exists() );
}
diff --git a/client/gridfs.h b/client/gridfs.h
index b58cb76ab94..b52cf75117a 100644
--- a/client/gridfs.h
+++ b/client/gridfs.h
@@ -32,13 +32,13 @@ namespace mongo {
GridFSChunk( BSONObj data );
GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len );
- int len(){
+ int len() {
int len;
_data["data"].binDataClean( len );
return len;
}
- const char * data( int & len ){
+ const char * data( int & len ) {
return _data["data"].binDataClean( len );
}
@@ -140,41 +140,41 @@ namespace mongo {
* @return whether or not this file exists
* findFile will always return a GriFile, so need to check this
*/
- bool exists(){
+ bool exists() {
return ! _obj.isEmpty();
}
- string getFilename(){
+ string getFilename() {
return _obj["filename"].str();
}
- int getChunkSize(){
+ int getChunkSize() {
return (int)(_obj["chunkSize"].number());
}
- gridfs_offset getContentLength(){
+ gridfs_offset getContentLength() {
return (gridfs_offset)(_obj["length"].number());
}
- string getContentType(){
+ string getContentType() {
return _obj["contentType"].valuestr();
}
- Date_t getUploadDate(){
+ Date_t getUploadDate() {
return _obj["uploadDate"].date();
}
- string getMD5(){
+ string getMD5() {
return _obj["md5"].str();
}
- BSONElement getFileField( const string& name ){
+ BSONElement getFileField( const string& name ) {
return _obj[name];
}
BSONObj getMetadata();
- int getNumChunks(){
+ int getNumChunks() {
return (int) ceil( (double)getContentLength() / (double)getChunkSize() );
}
diff --git a/client/model.cpp b/client/model.cpp
index 7861b915696..bd10a3c5528 100644
--- a/client/model.cpp
+++ b/client/model.cpp
@@ -21,23 +21,23 @@
namespace mongo {
- bool Model::load(BSONObj& query){
+ bool Model::load(BSONObj& query) {
ScopedDbConnection conn( modelServer() );
BSONObj b = conn->findOne(getNS(), query);
conn.done();
-
+
if ( b.isEmpty() )
return false;
-
+
unserialize(b);
_id = b["_id"].wrap().getOwned();
return true;
}
- void Model::remove( bool safe ){
+ void Model::remove( bool safe ) {
uassert( 10016 , "_id isn't set - needed for remove()" , _id["_id"].type() );
-
+
ScopedDbConnection conn( modelServer() );
conn->remove( getNS() , _id );
@@ -46,34 +46,34 @@ namespace mongo {
errmsg = conn->getLastError();
conn.done();
-
+
if ( safe && errmsg.size() )
throw UserException( 9002 , (string)"error on Model::remove: " + errmsg );
}
- void Model::save( bool safe ){
+ void Model::save( bool safe ) {
ScopedDbConnection conn( modelServer() );
BSONObjBuilder b;
serialize( b );
-
+
BSONElement myId;
{
BSONObjIterator i = b.iterator();
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( strcmp( e.fieldName() , "_id" ) == 0 ){
+ if ( strcmp( e.fieldName() , "_id" ) == 0 ) {
myId = e;
break;
}
}
}
- if ( myId.type() ){
- if ( _id.isEmpty() ){
+ if ( myId.type() ) {
+ if ( _id.isEmpty() ) {
_id = myId.wrap();
}
- else if ( myId.woCompare( _id.firstElement() ) ){
+ else if ( myId.woCompare( _id.firstElement() ) ) {
stringstream ss;
ss << "_id from serialize and stored differ: ";
ss << '[' << myId << "] != ";
@@ -82,11 +82,11 @@ namespace mongo {
}
}
- if ( _id.isEmpty() ){
+ if ( _id.isEmpty() ) {
OID oid;
oid.init();
b.appendOID( "_id" , &oid );
-
+
BSONObj o = b.obj();
conn->insert( getNS() , o );
_id = o["_id"].wrap().getOwned();
@@ -94,25 +94,25 @@ namespace mongo {
log(4) << "inserted new model " << getNS() << " " << o << endl;
}
else {
- if ( myId.eoo() ){
+ if ( myId.eoo() ) {
myId = _id["_id"];
b.append( myId );
}
-
+
assert( ! myId.eoo() );
BSONObjBuilder qb;
qb.append( myId );
-
+
BSONObj q = qb.obj();
BSONObj o = b.obj();
log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
conn->update( getNS() , q , o , true );
-
+
}
-
+
string errmsg = "";
if ( safe )
errmsg = conn->getLastError();
@@ -123,13 +123,13 @@ namespace mongo {
throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
}
- BSONObj Model::toObject(){
+ BSONObj Model::toObject() {
BSONObjBuilder b;
serialize( b );
return b.obj();
}
- void Model::append( const char * name , BSONObjBuilder& b ){
+ void Model::append( const char * name , BSONObjBuilder& b ) {
BSONObjBuilder bb( b.subobjStart( name ) );
serialize( bb );
bb.done();
diff --git a/client/model.h b/client/model.h
index 108efc06ba7..7dd31434f49 100644
--- a/client/model.h
+++ b/client/model.h
@@ -43,16 +43,16 @@ namespace mongo {
virtual void unserialize(const BSONObj& from) = 0;
virtual BSONObj toObject();
virtual void append( const char * name , BSONObjBuilder& b );
-
+
virtual string modelServer() = 0;
-
- /** Load a single object.
+
+ /** Load a single object.
@return true if successful.
*/
virtual bool load(BSONObj& query);
virtual void save( bool safe=false );
virtual void remove( bool safe=false );
-
+
protected:
BSONObj _id;
};
diff --git a/client/mongo_client_lib.cpp b/client/mongo_client_lib.cpp
index 626590dd1b6..69f801aac1d 100755..100644
--- a/client/mongo_client_lib.cpp
+++ b/client/mongo_client_lib.cpp
@@ -2,9 +2,9 @@
MongoDB C++ Driver
- Normally one includes dbclient.h, and links against libmongoclient.a, when connecting to MongoDB
- from C++. However, if you have a situation where the pre-built library does not work, you can use
- this file instead to build all the necessary symbols. To do so, include client_lib.cpp in your
+ Normally one includes dbclient.h, and links against libmongoclient.a, when connecting to MongoDB
+ from C++. However, if you have a situation where the pre-built library does not work, you can use
+ this file instead to build all the necessary symbols. To do so, include client_lib.cpp in your
project.
For example, to build and run simple_client_demo.cpp with GCC and run it:
@@ -30,7 +30,7 @@
#include "../util/md5main.cpp"
-#define MONGO_EXPOSE_MACROS
+#define MONGO_EXPOSE_MACROS
#include "../pch.h"
#include "../util/assert_util.cpp"
@@ -60,7 +60,7 @@
#include "../db/nonce.cpp"
#include "../db/commands.cpp"
-extern "C" {
+extern "C" {
#include "../util/md5.c"
}
diff --git a/client/parallel.cpp b/client/parallel.cpp
index 7649821ddc4..14f0fa450d2 100644
--- a/client/parallel.cpp
+++ b/client/parallel.cpp
@@ -25,10 +25,10 @@
#include "../s/shard.h"
namespace mongo {
-
+
// -------- ClusteredCursor -----------
-
- ClusteredCursor::ClusteredCursor( QueryMessage& q ){
+
+ ClusteredCursor::ClusteredCursor( QueryMessage& q ) {
_ns = q.ns;
_query = q.query.copy();
_options = q.queryOptions;
@@ -41,7 +41,7 @@ namespace mongo {
_didInit = false;
}
- ClusteredCursor::ClusteredCursor( const string& ns , const BSONObj& q , int options , const BSONObj& fields ){
+ ClusteredCursor::ClusteredCursor( const string& ns , const BSONObj& q , int options , const BSONObj& fields ) {
_ns = ns;
_query = q.getOwned();
_options = options;
@@ -52,50 +52,50 @@ namespace mongo {
_didInit = false;
}
- ClusteredCursor::~ClusteredCursor(){
+ ClusteredCursor::~ClusteredCursor() {
_done = true; // just in case
}
- void ClusteredCursor::init(){
+ void ClusteredCursor::init() {
if ( _didInit )
return;
_didInit = true;
_init();
}
-
- auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft ){
+
+ auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft ) {
uassert( 10017 , "cursor already done" , ! _done );
assert( _didInit );
-
+
BSONObj q = _query;
- if ( ! extra.isEmpty() ){
+ if ( ! extra.isEmpty() ) {
q = concatQuery( q , extra );
}
ShardConnection conn( server , _ns );
-
- if ( conn.setVersion() ){
+
+ if ( conn.setVersion() ) {
conn.done();
throw StaleConfigException( _ns , "ClusteredCursor::query ShardConnection had to change" , true );
}
- if ( logLevel >= 5 ){
- log(5) << "ClusteredCursor::query (" << type() << ") server:" << server
- << " ns:" << _ns << " query:" << q << " num:" << num
+ if ( logLevel >= 5 ) {
+ log(5) << "ClusteredCursor::query (" << type() << ") server:" << server
+ << " ns:" << _ns << " query:" << q << " num:" << num
<< " _fields:" << _fields << " options: " << _options << endl;
}
-
- auto_ptr<DBClientCursor> cursor =
+
+ auto_ptr<DBClientCursor> cursor =
conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
assert( cursor.get() );
-
- if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ){
+
+ if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
conn.done();
throw StaleConfigException( _ns , "ClusteredCursor::query" );
}
-
- if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ){
+
+ if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) {
conn.done();
BSONObj o = cursor->next();
throw UserException( o["code"].numberInt() , o["$err"].String() );
@@ -108,9 +108,9 @@ namespace mongo {
return cursor;
}
- BSONObj ClusteredCursor::explain( const string& server , BSONObj extra ){
+ BSONObj ClusteredCursor::explain( const string& server , BSONObj extra ) {
BSONObj q = _query;
- if ( ! extra.isEmpty() ){
+ if ( ! extra.isEmpty() ) {
q = concatQuery( q , extra );
}
@@ -124,26 +124,26 @@ namespace mongo {
return o;
}
- BSONObj ClusteredCursor::concatQuery( const BSONObj& query , const BSONObj& extraFilter ){
+ BSONObj ClusteredCursor::concatQuery( const BSONObj& query , const BSONObj& extraFilter ) {
if ( ! query.hasField( "query" ) )
return _concatFilter( query , extraFilter );
BSONObjBuilder b;
BSONObjIterator i( query );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( strcmp( e.fieldName() , "query" ) ){
+ if ( strcmp( e.fieldName() , "query" ) ) {
b.append( e );
continue;
}
-
+
b.append( "query" , _concatFilter( e.embeddedObjectUserCheck() , extraFilter ) );
}
return b.obj();
}
-
- BSONObj ClusteredCursor::_concatFilter( const BSONObj& filter , const BSONObj& extra ){
+
+ BSONObj ClusteredCursor::_concatFilter( const BSONObj& filter , const BSONObj& extra ) {
BSONObjBuilder b;
b.appendElements( filter );
b.appendElements( extra );
@@ -151,7 +151,7 @@ namespace mongo {
// TODO: should do some simplification here if possibl ideally
}
- BSONObj ClusteredCursor::explain(){
+ BSONObj ClusteredCursor::explain() {
// Note: by default we filter out allPlans and oldPlan in the shell's
// explain() function. If you add any recursive structures, make sure to
// edit the JS to make sure everything gets filtered.
@@ -167,25 +167,25 @@ namespace mongo {
map<string,list<BSONObj> > out;
{
_explain( out );
-
+
BSONObjBuilder x( b.subobjStart( "shards" ) );
- for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ){
+ for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ) {
string shard = i->first;
list<BSONObj> l = i->second;
BSONArrayBuilder y( x.subarrayStart( shard ) );
- for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ){
+ for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ) {
BSONObj temp = *j;
y.append( temp );
-
+
BSONObjIterator k( temp );
- while ( k.more() ){
+ while ( k.more() ) {
BSONElement z = k.next();
if ( z.fieldName()[0] != 'n' )
continue;
long long& c = counters[z.fieldName()];
c += z.numberLong();
}
-
+
millis += temp["millis"].numberLong();
numExplains++;
}
@@ -204,37 +204,37 @@ namespace mongo {
return b.obj();
}
-
+
// -------- FilteringClientCursor -----------
FilteringClientCursor::FilteringClientCursor( const BSONObj filter )
- : _matcher( filter ) , _done( true ){
+ : _matcher( filter ) , _done( true ) {
}
FilteringClientCursor::FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter )
- : _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ){
+ : _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ) {
}
-
- FilteringClientCursor::~FilteringClientCursor(){
+
+ FilteringClientCursor::~FilteringClientCursor() {
}
-
- void FilteringClientCursor::reset( auto_ptr<DBClientCursor> cursor ){
+
+ void FilteringClientCursor::reset( auto_ptr<DBClientCursor> cursor ) {
_cursor = cursor;
_next = BSONObj();
_done = _cursor.get() == 0;
}
- bool FilteringClientCursor::more(){
+ bool FilteringClientCursor::more() {
if ( ! _next.isEmpty() )
return true;
-
+
if ( _done )
return false;
-
+
_advance();
return ! _next.isEmpty();
}
-
- BSONObj FilteringClientCursor::next(){
+
+ BSONObj FilteringClientCursor::next() {
assert( ! _next.isEmpty() );
assert( ! _done );
@@ -244,20 +244,20 @@ namespace mongo {
return ret;
}
- BSONObj FilteringClientCursor::peek(){
+ BSONObj FilteringClientCursor::peek() {
if ( _next.isEmpty() )
_advance();
return _next;
}
-
- void FilteringClientCursor::_advance(){
+
+ void FilteringClientCursor::_advance() {
assert( _next.isEmpty() );
if ( ! _cursor.get() || _done )
return;
-
- while ( _cursor->more() ){
+
+ while ( _cursor->more() ) {
_next = _cursor->next();
- if ( _matcher.matches( _next ) ){
+ if ( _matcher.matches( _next ) ) {
if ( ! _cursor->moreInCurrentBatch() )
_next = _next.getOwned();
return;
@@ -266,53 +266,53 @@ namespace mongo {
}
_done = true;
}
-
+
// -------- SerialServerClusteredCursor -----------
-
- SerialServerClusteredCursor::SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ){
+
+ SerialServerClusteredCursor::SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ) {
for ( set<ServerAndQuery>::const_iterator i = servers.begin(); i!=servers.end(); i++ )
_servers.push_back( *i );
-
+
if ( sortOrder > 0 )
sort( _servers.begin() , _servers.end() );
else if ( sortOrder < 0 )
sort( _servers.rbegin() , _servers.rend() );
-
+
_serverIndex = 0;
_needToSkip = q.ntoskip;
}
-
- bool SerialServerClusteredCursor::more(){
-
+
+ bool SerialServerClusteredCursor::more() {
+
// TODO: optimize this by sending on first query and then back counting
// tricky in case where 1st server doesn't have any after
// need it to send n skipped
- while ( _needToSkip > 0 && _current.more() ){
+ while ( _needToSkip > 0 && _current.more() ) {
_current.next();
_needToSkip--;
}
-
+
if ( _current.more() )
return true;
-
- if ( _serverIndex >= _servers.size() ){
+
+ if ( _serverIndex >= _servers.size() ) {
return false;
}
-
+
ServerAndQuery& sq = _servers[_serverIndex++];
_current.reset( query( sq._server , 0 , sq._extra ) );
return more();
}
-
- BSONObj SerialServerClusteredCursor::next(){
+
+ BSONObj SerialServerClusteredCursor::next() {
uassert( 10018 , "no more items" , more() );
return _current.next();
}
- void SerialServerClusteredCursor::_explain( map< string,list<BSONObj> >& out ){
- for ( unsigned i=0; i<_servers.size(); i++ ){
+ void SerialServerClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
+ for ( unsigned i=0; i<_servers.size(); i++ ) {
ServerAndQuery& sq = _servers[i];
list<BSONObj> & l = out[sq._server];
l.push_back( explain( sq._server , sq._extra ) );
@@ -320,29 +320,29 @@ namespace mongo {
}
// -------- ParallelSortClusteredCursor -----------
-
- ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q ,
- const BSONObj& sortKey )
- : ClusteredCursor( q ) , _servers( servers ){
+
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q ,
+ const BSONObj& sortKey )
+ : ClusteredCursor( q ) , _servers( servers ) {
_sortKey = sortKey.getOwned();
_needToSkip = q.ntoskip;
_finishCons();
}
- ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
- const Query& q ,
- int options , const BSONObj& fields )
- : ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ){
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
+ const Query& q ,
+ int options , const BSONObj& fields )
+ : ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ) {
_sortKey = q.getSort().copy();
_needToSkip = 0;
_finishCons();
}
- void ParallelSortClusteredCursor::_finishCons(){
+ void ParallelSortClusteredCursor::_finishCons() {
_numServers = _servers.size();
_cursors = 0;
- if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ){
+ if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ) {
// we need to make sure the sort key is in the project
set<string> sortKeyFields;
@@ -352,7 +352,7 @@ namespace mongo {
bool isNegative = false;
{
BSONObjIterator i( _fields );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
b.append( e );
@@ -368,93 +368,94 @@ namespace mongo {
if ( ! e.trueValue() ) {
uassert( 13431 , "have to have sort key in projection and removing it" , !found && begin == end );
- } else if (!e.isABSONObj()) {
+ }
+ else if (!e.isABSONObj()) {
isNegative = true;
}
}
- }
-
- if (isNegative){
- for (set<string>::const_iterator it(sortKeyFields.begin()), end(sortKeyFields.end()); it != end; ++it){
+ }
+
+ if (isNegative) {
+ for (set<string>::const_iterator it(sortKeyFields.begin()), end(sortKeyFields.end()); it != end; ++it) {
b.append(*it, 1);
}
}
-
+
_fields = b.obj();
}
}
-
- void ParallelSortClusteredCursor::_init(){
+
+ void ParallelSortClusteredCursor::_init() {
assert( ! _cursors );
_cursors = new FilteringClientCursor[_numServers];
-
+
// TODO: parellize
int num = 0;
- for ( set<ServerAndQuery>::iterator i = _servers.begin(); i!=_servers.end(); ++i ){
+ for ( set<ServerAndQuery>::iterator i = _servers.begin(); i!=_servers.end(); ++i ) {
const ServerAndQuery& sq = *i;
_cursors[num++].reset( query( sq._server , 0 , sq._extra , _needToSkip ) );
}
-
+
}
-
- ParallelSortClusteredCursor::~ParallelSortClusteredCursor(){
+
+ ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
delete [] _cursors;
_cursors = 0;
}
- bool ParallelSortClusteredCursor::more(){
+ bool ParallelSortClusteredCursor::more() {
- if ( _needToSkip > 0 ){
+ if ( _needToSkip > 0 ) {
int n = _needToSkip;
_needToSkip = 0;
- while ( n > 0 && more() ){
+ while ( n > 0 && more() ) {
BSONObj x = next();
n--;
}
_needToSkip = n;
}
-
- for ( int i=0; i<_numServers; i++ ){
+
+ for ( int i=0; i<_numServers; i++ ) {
if ( _cursors[i].more() )
return true;
}
return false;
}
-
- BSONObj ParallelSortClusteredCursor::next(){
+
+ BSONObj ParallelSortClusteredCursor::next() {
BSONObj best = BSONObj();
int bestFrom = -1;
-
- for ( int i=0; i<_numServers; i++){
+
+ for ( int i=0; i<_numServers; i++) {
if ( ! _cursors[i].more() )
continue;
-
+
BSONObj me = _cursors[i].peek();
- if ( best.isEmpty() ){
+ if ( best.isEmpty() ) {
best = me;
bestFrom = i;
continue;
}
-
+
int comp = best.woSortOrder( me , _sortKey , true );
if ( comp < 0 )
continue;
-
+
best = me;
bestFrom = i;
}
-
+
uassert( 10019 , "no more elements" , ! best.isEmpty() );
_cursors[bestFrom].next();
-
+
return best;
}
- void ParallelSortClusteredCursor::_explain( map< string,list<BSONObj> >& out ){
- for ( set<ServerAndQuery>::iterator i=_servers.begin(); i!=_servers.end(); ++i ){
+ void ParallelSortClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
+ for ( set<ServerAndQuery>::iterator i=_servers.begin(); i!=_servers.end(); ++i ) {
const ServerAndQuery& sq = *i;
list<BSONObj> & l = out[sq._server];
l.push_back( explain( sq._server , sq._extra ) );
@@ -466,39 +467,39 @@ namespace mongo {
// ---- Future -----
// -----------------
- Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd ){
+ Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd ) {
_server = server;
_db = db;
_cmd = cmd;
_done = false;
}
- bool Future::CommandResult::join(){
+ bool Future::CommandResult::join() {
_thr->join();
assert( _done );
return _ok;
}
- void Future::commandThread(shared_ptr<CommandResult> res){
+ void Future::commandThread(shared_ptr<CommandResult> res) {
setThreadName( "future" );
-
+
try {
ScopedDbConnection conn( res->_server );
res->_ok = conn->runCommand( res->_db , res->_cmd , res->_res );
conn.done();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
error() << "Future::commandThread exception: " << e.what() << endl;
res->_ok = false;
}
res->_done = true;
}
- shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd ){
+ shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd ) {
shared_ptr<Future::CommandResult> res (new Future::CommandResult( server , db , cmd ));
res->_thr.reset( new boost::thread( boost::bind(Future::commandThread, res) ) );
-
+
return res;
}
-
+
}
diff --git a/client/parallel.h b/client/parallel.h
index b131ac57f3e..e993e7cffa2 100644
--- a/client/parallel.h
+++ b/client/parallel.h
@@ -33,14 +33,14 @@ namespace mongo {
*/
class ServerAndQuery {
public:
- ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
- _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ){
+ ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
+ _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ) {
}
- bool operator<( const ServerAndQuery& other ) const{
+ bool operator<( const ServerAndQuery& other ) const {
if ( ! _orderObject.isEmpty() )
return _orderObject.woCompare( other._orderObject ) < 0;
-
+
if ( _server < other._server )
return true;
if ( other._server > _server )
@@ -72,28 +72,28 @@ namespace mongo {
ClusteredCursor( QueryMessage& q );
ClusteredCursor( const string& ns , const BSONObj& q , int options=0 , const BSONObj& fields=BSONObj() );
virtual ~ClusteredCursor();
-
+
/** call before using */
void init();
-
+
virtual bool more() = 0;
virtual BSONObj next() = 0;
-
+
static BSONObj concatQuery( const BSONObj& query , const BSONObj& extraFilter );
-
+
virtual string type() const = 0;
virtual BSONObj explain();
protected:
-
+
virtual void _init() = 0;
auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 );
BSONObj explain( const string& server , BSONObj extraFilter = BSONObj() );
-
+
static BSONObj _concatFilter( const BSONObj& filter , const BSONObj& extraFilter );
-
+
virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
string _ns;
@@ -113,19 +113,19 @@ namespace mongo {
FilteringClientCursor( const BSONObj filter = BSONObj() );
FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter = BSONObj() );
~FilteringClientCursor();
-
+
void reset( auto_ptr<DBClientCursor> cursor );
-
+
bool more();
BSONObj next();
-
+
BSONObj peek();
private:
void _advance();
-
+
Matcher _matcher;
auto_ptr<DBClientCursor> _cursor;
-
+
BSONObj _next;
bool _done;
};
@@ -133,22 +133,22 @@ namespace mongo {
class Servers {
public:
- Servers(){
+ Servers() {
}
-
- void add( const ServerAndQuery& s ){
+
+ void add( const ServerAndQuery& s ) {
add( s._server , s._extra );
}
-
- void add( const string& server , const BSONObj& filter ){
+
+ void add( const string& server , const BSONObj& filter ) {
vector<BSONObj>& mine = _filters[server];
mine.push_back( filter.getOwned() );
}
-
+
// TOOO: pick a less horrible name
class View {
- View( const Servers* s ){
- for ( map<string, vector<BSONObj> >::const_iterator i=s->_filters.begin(); i!=s->_filters.end(); ++i ){
+ View( const Servers* s ) {
+ for ( map<string, vector<BSONObj> >::const_iterator i=s->_filters.begin(); i!=s->_filters.end(); ++i ) {
_servers.push_back( i->first );
_filters.push_back( i->second );
}
@@ -165,7 +165,7 @@ namespace mongo {
vector<BSONObj> getFilter( int n ) const {
return _filters[ n ];
}
-
+
private:
vector<string> _servers;
vector< vector<BSONObj> > _filters;
@@ -176,7 +176,7 @@ namespace mongo {
View view() const {
return View( this );
}
-
+
private:
map<string, vector<BSONObj> > _filters;
@@ -199,13 +199,13 @@ namespace mongo {
protected:
virtual void _explain( map< string,list<BSONObj> >& out );
- void _init(){}
+ void _init() {}
vector<ServerAndQuery> _servers;
unsigned _serverIndex;
-
+
FilteringClientCursor _current;
-
+
int _needToSkip;
};
@@ -213,11 +213,11 @@ namespace mongo {
/**
* runs a query in parellel across N servers
* sots
- */
+ */
class ParallelSortClusteredCursor : public ClusteredCursor {
public:
ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , const BSONObj& sortKey );
- ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
+ ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
const Query& q , int options=0, const BSONObj& fields=BSONObj() );
virtual ~ParallelSortClusteredCursor();
virtual bool more();
@@ -232,7 +232,7 @@ namespace mongo {
int _numServers;
set<ServerAndQuery> _servers;
BSONObj _sortKey;
-
+
FilteringClientCursor * _cursors;
int _needToSkip;
};
@@ -246,11 +246,11 @@ namespace mongo {
public:
class CommandResult {
public:
-
+
string getServer() const { return _server; }
bool isDone() const { return _done; }
-
+
bool ok() const {
assert( _done );
return _ok;
@@ -266,30 +266,30 @@ namespace mongo {
returns ok()
*/
bool join();
-
+
private:
-
+
CommandResult( const string& server , const string& db , const BSONObj& cmd );
-
+
string _server;
string _db;
BSONObj _cmd;
scoped_ptr<boost::thread> _thr;
-
+
BSONObj _res;
bool _ok;
bool _done;
-
+
friend class Future;
};
-
+
static void commandThread(shared_ptr<CommandResult> res);
-
+
static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd );
};
-
+
}
#include "undef_macros.h"
diff --git a/client/simple_client_demo.cpp b/client/simple_client_demo.cpp
index f0c62fba46a..fa2f4a8ae11 100755..100644
--- a/client/simple_client_demo.cpp
+++ b/client/simple_client_demo.cpp
@@ -1,5 +1,5 @@
/* simple_client_demo.cpp
-
+
See also : http://www.mongodb.org/pages/viewpage.action?pageId=133415
How to build and run:
@@ -20,17 +20,17 @@ using namespace std;
using namespace mongo;
using namespace bson;
-int main() {
- cout << "connecting to localhost..." << endl;
- DBClientConnection c;
- c.connect("localhost");
- cout << "connected ok" << endl;
- unsigned long long count = c.count("test.foo");
- cout << "count of exiting documents in collection test.foo : " << count << endl;
+int main() {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+ unsigned long long count = c.count("test.foo");
+ cout << "count of exiting documents in collection test.foo : " << count << endl;
- bo o = BSON( "hello" << "world" );
- c.insert("test.foo", o);
+ bo o = BSON( "hello" << "world" );
+ c.insert("test.foo", o);
- return 0;
+ return 0;
}
diff --git a/client/syncclusterconnection.cpp b/client/syncclusterconnection.cpp
index a16fa8e35eb..4277a1a0bef 100644
--- a/client/syncclusterconnection.cpp
+++ b/client/syncclusterconnection.cpp
@@ -37,11 +37,11 @@ namespace mongo {
for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ )
_connect( i->toString() );
}
-
+
SyncClusterConnection::SyncClusterConnection( string commaSeperated ) : _mutex("SyncClusterConnection") {
_address = commaSeperated;
string::size_type idx;
- while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ){
+ while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ) {
string h = commaSeperated.substr( 0 , idx );
commaSeperated = commaSeperated.substr( idx + 1 );
_connect( h );
@@ -50,7 +50,7 @@ namespace mongo {
uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
}
- SyncClusterConnection::SyncClusterConnection( string a , string b , string c ) : _mutex("SyncClusterConnection") {
+ SyncClusterConnection::SyncClusterConnection( string a , string b , string c ) : _mutex("SyncClusterConnection") {
_address = a + "," + b + "," + c;
// connect to all even if not working
_connect( a );
@@ -62,30 +62,30 @@ namespace mongo {
assert(0);
}
- SyncClusterConnection::~SyncClusterConnection(){
+ SyncClusterConnection::~SyncClusterConnection() {
for ( size_t i=0; i<_conns.size(); i++ )
delete _conns[i];
_conns.clear();
}
- bool SyncClusterConnection::prepare( string& errmsg ){
+ bool SyncClusterConnection::prepare( string& errmsg ) {
_lastErrors.clear();
return fsync( errmsg );
}
-
- bool SyncClusterConnection::fsync( string& errmsg ){
+
+ bool SyncClusterConnection::fsync( string& errmsg ) {
bool ok = true;
errmsg = "";
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
BSONObj res;
try {
if ( _conns[i]->simpleCommand( "admin" , 0 , "fsync" ) )
continue;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
errmsg += e.what();
}
- catch ( ... ){
+ catch ( ... ) {
}
ok = false;
errmsg += _conns[i]->toString() + ":" + res.toString();
@@ -93,21 +93,21 @@ namespace mongo {
return ok;
}
- void SyncClusterConnection::_checkLast(){
+ void SyncClusterConnection::_checkLast() {
_lastErrors.clear();
vector<string> errors;
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
BSONObj res;
string err;
try {
if ( ! _conns[i]->runCommand( "admin" , BSON( "getlasterror" << 1 << "fsync" << 1 ) , res ) )
err = "cmd failed: ";
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
err += e.what();
}
- catch ( ... ){
+ catch ( ... ) {
err += "unknown failure";
}
_lastErrors.push_back( res.getOwned() );
@@ -115,11 +115,11 @@ namespace mongo {
}
assert( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
-
+
stringstream err;
bool ok = true;
-
- for ( size_t i = 0; i<_conns.size(); i++ ){
+
+ for ( size_t i = 0; i<_conns.size(); i++ ) {
BSONObj res = _lastErrors[i];
if ( res["ok"].trueValue() && res["fsyncFiles"].numberInt() > 0 )
continue;
@@ -132,13 +132,13 @@ namespace mongo {
throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
}
- BSONObj SyncClusterConnection::getLastErrorDetailed(){
+ BSONObj SyncClusterConnection::getLastErrorDetailed() {
if ( _lastErrors.size() )
return _lastErrors[0];
return DBClientBase::getLastErrorDetailed();
}
- void SyncClusterConnection::_connect( string host ){
+ void SyncClusterConnection::_connect( string host ) {
log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
DBClientConnection * c = new DBClientConnection( true );
string errmsg;
@@ -148,31 +148,31 @@ namespace mongo {
_conns.push_back( c );
}
- bool SyncClusterConnection::callRead( Message& toSend , Message& response ){
+ bool SyncClusterConnection::callRead( Message& toSend , Message& response ) {
// TODO: need to save state of which one to go back to somehow...
return _conns[0]->callRead( toSend , response );
}
BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
-
- if ( ns.find( ".$cmd" ) != string::npos ){
+
+ if ( ns.find( ".$cmd" ) != string::npos ) {
string cmdName = query.obj.firstElement().fieldName();
int lockType = _lockType( cmdName );
- if ( lockType > 0 ){ // write $cmd
+ if ( lockType > 0 ) { // write $cmd
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 13104 , (string)"SyncClusterConnection::findOne prepare failed: " + errmsg );
-
+
vector<BSONObj> all;
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() );
}
-
+
_checkLast();
-
- for ( size_t i=0; i<all.size(); i++ ){
+
+ for ( size_t i=0; i<all.size(); i++ ) {
BSONObj temp = all[i];
if ( isOk( temp ) )
continue;
@@ -181,7 +181,7 @@ namespace mongo {
ss << " " << _conns[i]->toString();
throw UserException( 13105 , ss.str() );
}
-
+
return all[0];
}
}
@@ -191,9 +191,9 @@ namespace mongo {
auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
_lastErrors.clear();
- if ( ns.find( ".$cmd" ) != string::npos ){
+ if ( ns.find( ".$cmd" ) != string::npos ) {
string cmdName = query.obj.firstElement().fieldName();
int lockType = _lockType( cmdName );
uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection::query for:" + cmdName , lockType <= 0 );
@@ -202,7 +202,7 @@ namespace mongo {
return _queryOnActive( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
}
- bool SyncClusterConnection::_commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options ){
+ bool SyncClusterConnection::_commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options ) {
auto_ptr<DBClientCursor> cursor = _queryOnActive( dbname + ".$cmd" , cmd , 1 , 0 , 0 , options , 0 );
if ( cursor->more() )
info = cursor->next().copy();
@@ -210,151 +210,151 @@ namespace mongo {
info = BSONObj();
return isOk( info );
}
-
+
auto_ptr<DBClientCursor> SyncClusterConnection::_queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
- const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
-
- for ( size_t i=0; i<_conns.size(); i++ ){
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
try {
- auto_ptr<DBClientCursor> cursor =
+ auto_ptr<DBClientCursor> cursor =
_conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
if ( cursor.get() )
return cursor;
log() << "query failed to: " << _conns[i]->toString() << " no data" << endl;
}
- catch ( ... ){
+ catch ( ... ) {
log() << "query failed to: " << _conns[i]->toString() << " exception" << endl;
}
}
throw UserException( 8002 , "all servers down!" );
}
-
- auto_ptr<DBClientCursor> SyncClusterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ){
- uassert( 10022 , "SyncClusterConnection::getMore not supported yet" , 0);
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ) {
+ uassert( 10022 , "SyncClusterConnection::getMore not supported yet" , 0);
auto_ptr<DBClientCursor> c;
return c;
}
-
- void SyncClusterConnection::insert( const string &ns, BSONObj obj ){
- uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
+ void SyncClusterConnection::insert( const string &ns, BSONObj obj ) {
+
+ uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() );
-
+
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
_conns[i]->insert( ns , obj );
}
-
+
_checkLast();
}
-
- void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v ){
- uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
+
+ void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v ) {
+ uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
}
- void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ){
+ void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ) {
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 8020 , (string)"SyncClusterConnection::remove prepare failed: " + errmsg );
-
- for ( size_t i=0; i<_conns.size(); i++ ){
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
_conns[i]->remove( ns , query , justOne );
}
-
+
_checkLast();
}
- void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ){
+ void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ) {
- if ( upsert ){
+ if ( upsert ) {
uassert( 13120 , "SyncClusterConnection::update upsert query needs _id" , query.obj["_id"].type() );
}
- if ( _writeConcern ){
+ if ( _writeConcern ) {
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
}
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
try {
_conns[i]->update( ns , query , obj , upsert , multi );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
if ( _writeConcern )
throw e;
}
}
-
- if ( _writeConcern ){
+
+ if ( _writeConcern ) {
_checkLast();
assert( _lastErrors.size() > 1 );
-
+
int a = _lastErrors[0]["n"].numberInt();
- for ( unsigned i=1; i<_lastErrors.size(); i++ ){
+ for ( unsigned i=1; i<_lastErrors.size(); i++ ) {
int b = _lastErrors[i]["n"].numberInt();
if ( a == b )
continue;
-
+
throw UpdateNotTheSame( 8017 , "update not consistent" , _connAddresses , _lastErrors );
}
}
}
- string SyncClusterConnection::_toString() const {
+ string SyncClusterConnection::_toString() const {
stringstream ss;
ss << "SyncClusterConnection [" << _address << "]";
return ss.str();
}
- bool SyncClusterConnection::call( Message &toSend, Message &response, bool assertOk ){
- uassert( 8006 , "SyncClusterConnection::call can only be used directly for dbQuery" ,
+ bool SyncClusterConnection::call( Message &toSend, Message &response, bool assertOk ) {
+ uassert( 8006 , "SyncClusterConnection::call can only be used directly for dbQuery" ,
toSend.operation() == dbQuery );
-
+
DbMessage d( toSend );
uassert( 8007 , "SyncClusterConnection::call can't handle $cmd" , strstr( d.getns(), "$cmd" ) == 0 );
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
try {
bool ok = _conns[i]->call( toSend , response , assertOk );
if ( ok )
return ok;
log() << "call failed to: " << _conns[i]->toString() << " no data" << endl;
}
- catch ( ... ){
+ catch ( ... ) {
log() << "call failed to: " << _conns[i]->toString() << " exception" << endl;
}
}
throw UserException( 8008 , "all servers down!" );
}
-
- void SyncClusterConnection::say( Message &toSend ){
+
+ void SyncClusterConnection::say( Message &toSend ) {
string errmsg;
if ( ! prepare( errmsg ) )
throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg );
- for ( size_t i=0; i<_conns.size(); i++ ){
+ for ( size_t i=0; i<_conns.size(); i++ ) {
_conns[i]->say( toSend );
}
-
+
_checkLast();
}
-
- void SyncClusterConnection::sayPiggyBack( Message &toSend ){
+
+ void SyncClusterConnection::sayPiggyBack( Message &toSend ) {
assert(0);
}
- int SyncClusterConnection::_lockType( const string& name ){
+ int SyncClusterConnection::_lockType( const string& name ) {
{
scoped_lock lk(_mutex);
map<string,int>::iterator i = _lockTypes.find( name );
if ( i != _lockTypes.end() )
return i->second;
}
-
+
BSONObj info;
uassert( 13053 , "help failed" , _commandOnActive( "admin" , BSON( name << "1" << "help" << 1 ) , info ) );
@@ -365,7 +365,7 @@ namespace mongo {
return lockType;
}
- void SyncClusterConnection::killCursor( long long cursorID ){
+ void SyncClusterConnection::killCursor( long long cursorID ) {
// should never need to do this
assert(0);
}
diff --git a/client/syncclusterconnection.h b/client/syncclusterconnection.h
index 1a8171716df..7d2c2abfe36 100644
--- a/client/syncclusterconnection.h
+++ b/client/syncclusterconnection.h
@@ -27,15 +27,15 @@ namespace mongo {
/**
* This is a connection to a cluster of servers that operate as one
* for super high durability.
- *
+ *
* Write operations are two-phase. First, all nodes are asked to fsync. If successful
- * everywhere, the write is sent everywhere and then followed by an fsync. There is no
- * rollback if a problem occurs during the second phase. Naturally, with all these fsyncs,
+ * everywhere, the write is sent everywhere and then followed by an fsync. There is no
+ * rollback if a problem occurs during the second phase. Naturally, with all these fsyncs,
* these operations will be quite slow -- use sparingly.
- *
+ *
* Read operations are sent to a single random node.
- *
- * The class checks if a command is read or write style, and sends to a single
+ *
+ * The class checks if a command is read or write style, and sends to a single
* node if a read lock command and to all in two phases with a write style command.
*/
class SyncClusterConnection : public DBClientBase {
@@ -47,7 +47,7 @@ namespace mongo {
SyncClusterConnection( string commaSeparated );
SyncClusterConnection( string a , string b , string c );
~SyncClusterConnection();
-
+
/**
* @return true if all servers are up and ready for writes
*/
@@ -66,9 +66,9 @@ namespace mongo {
const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn, int options );
-
+
virtual void insert( const string &ns, BSONObj obj );
-
+
virtual void insert( const string &ns, const vector< BSONObj >& v );
virtual void remove( const string &ns , Query query, bool justOne );
@@ -80,20 +80,20 @@ namespace mongo {
virtual void sayPiggyBack( Message &toSend );
virtual void killCursor( long long cursorID );
-
+
virtual string getServerAddress() const { return _address; }
virtual bool isFailed() const { return false; }
virtual string toString() { return _toString(); }
- virtual BSONObj getLastErrorDetailed();
+ virtual BSONObj getLastErrorDetailed();
virtual bool callRead( Message& toSend , Message& response );
- virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
private:
SyncClusterConnection( SyncClusterConnection& prev );
- string _toString() const;
+ string _toString() const;
bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
@@ -106,17 +106,17 @@ namespace mongo {
vector<DBClientConnection*> _conns;
map<string,int> _lockTypes;
mongo::mutex _mutex;
-
+
vector<BSONObj> _lastErrors;
};
-
+
class UpdateNotTheSame : public UserException {
public:
UpdateNotTheSame( int code , const string& msg , const vector<string>& addrs , const vector<BSONObj>& lastErrors )
- : UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ){
+ : UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) {
assert( _addrs.size() == _lastErrors.size() );
}
-
+
virtual ~UpdateNotTheSame() throw() {
}
@@ -133,7 +133,7 @@ namespace mongo {
vector<string> _addrs;
vector<BSONObj> _lastErrors;
};
-
+
};
#include "undef_macros.h"
diff --git a/db/background.h b/db/background.h
index 24ea1cbc12d..ea424c97107 100644
--- a/db/background.h
+++ b/db/background.h
@@ -21,16 +21,16 @@
#pragma once
-namespace mongo {
+namespace mongo {
- /* these are administrative operations / jobs
- for a namespace running in the background, and that only one
+ /* these are administrative operations / jobs
+ for a namespace running in the background, and that only one
at a time per namespace is permitted, and that if in progress,
you aren't allowed to do other NamespaceDetails major manipulations
- (such as dropping ns or db) even in the foreground and must
- instead uassert.
+ (such as dropping ns or db) even in the foreground and must
+ instead uassert.
- It's assumed this is not for super-high RPS things, so we don't do
+ It's assumed this is not for super-high RPS things, so we don't do
anything special in the implementation here to be fast.
*/
class BackgroundOperation : public boost::noncopyable {
diff --git a/db/btree.cpp b/db/btree.cpp
index da7bdce9c7a..ceb1d8562cf 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -33,7 +33,7 @@ namespace mongo {
#define VERIFYTHISLOC dassert( thisLoc.btree() == this || testIntent );
/**
- * give us a writable version of the btree bucket (declares write intent).
+ * give us a writable version of the btree bucket (declares write intent).
* note it is likely more efficient to declare write intent on something smaller when you can.
*/
BtreeBucket* DiskLoc::btreemod() const {
@@ -42,25 +42,25 @@ namespace mongo {
return static_cast< BtreeBucket* >( getDur().writingPtr( b, BucketSize ) );
}
- _KeyNode& _KeyNode::writing() const {
+ _KeyNode& _KeyNode::writing() const {
return *getDur().writing( const_cast< _KeyNode* >( this ) );
}
KeyNode::KeyNode(const BucketBasics& bb, const _KeyNode &k) :
- prevChildBucket(k.prevChildBucket),
- recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
+ prevChildBucket(k.prevChildBucket),
+ recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
{ }
// largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
static const int KeyMax = BucketSize / 10;
-
+
// We define this value as the maximum number of bytes such that, if we have
// fewer than this many bytes, we must be able to either merge with or receive
// keys from any neighboring node. If our utilization goes below this value we
// know we can bring up the utilization with a simple operation. Ignoring the
// 90/10 split policy which is sometimes employed and our 'unused' nodes, this
// is a lower bound on bucket utilization for non root buckets.
- //
+ //
// Note that the exact value here depends on the implementation of
// rebalancedSeparatorPos(). The conditions for lowWaterMark - 1 are as
// follows: We know we cannot merge with the neighbor, so the total data size
@@ -78,18 +78,18 @@ namespace mongo {
extern int otherTraceLevel;
/**
- * this error is ok/benign when doing a background indexing -- that logic in pdfile checks explicitly
+ * this error is ok/benign when doing a background indexing -- that logic in pdfile checks explicitly
* for the 10287 error code.
*/
- static void alreadyInIndex() {
+ static void alreadyInIndex() {
// we don't use massert() here as that does logging and this is 'benign' - see catches in _indexRecord()
throw MsgAssertionException(10287, "btree: key+recloc already in index");
}
/* BucketBasics --------------------------------------------------- */
- void BucketBasics::assertWritable() {
- if( cmdLine.dur )
+ void BucketBasics::assertWritable() {
+ if( cmdLine.dur )
dur::assertAlreadyDeclared(this, sizeof(*this));
}
@@ -103,7 +103,7 @@ namespace mongo {
ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
return ss.str();
}
-
+
int BucketBasics::Size() const {
assert( _wasSize == BucketSize );
return BucketSize;
@@ -151,7 +151,8 @@ namespace mongo {
if ( kn.isUsed() ) {
kc++;
- } else {
+ }
+ else {
if ( unusedCount ) {
++( *unusedCount );
}
@@ -160,8 +161,9 @@ namespace mongo {
DiskLoc left = kn.prevChildBucket;
const BtreeBucket *b = left.btree();
if ( strict ) {
- assert( b->parent == thisLoc );
- } else {
+ assert( b->parent == thisLoc );
+ }
+ else {
wassert( b->parent == thisLoc );
}
kc += b->fullValidate(kn.prevChildBucket, order, unusedCount, strict);
@@ -170,8 +172,9 @@ namespace mongo {
if ( !nextChild.isNull() ) {
const BtreeBucket *b = nextChild.btree();
if ( strict ) {
- assert( b->parent == thisLoc );
- } else {
+ assert( b->parent == thisLoc );
+ }
+ else {
wassert( b->parent == thisLoc );
}
kc += b->fullValidate(nextChild, order, unusedCount, strict);
@@ -192,7 +195,7 @@ namespace mongo {
// this is very slow so don't do often
{
static int _k;
- if( ++_k % 128 )
+ if( ++_k % 128 )
return;
}
@@ -293,10 +296,10 @@ namespace mongo {
}
/**
- * pull rightmost key from the bucket. this version requires its right child to be null so it
- * does not bother returning that value.
+ * pull rightmost key from the bucket. this version requires its right child to be null so it
+ * does not bother returning that value.
*/
- void BucketBasics::popBack(DiskLoc& recLoc, BSONObj& key) {
+ void BucketBasics::popBack(DiskLoc& recLoc, BSONObj& key) {
massert( 10282 , "n==0 in btree popBack()", n > 0 );
assert( k(n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
KeyNode kn = keyNode(n-1);
@@ -304,10 +307,10 @@ namespace mongo {
key = kn.key;
int keysize = kn.key.objsize();
- massert( 10283 , "rchild not null in btree popBack()", nextChild.isNull());
+ massert( 10283 , "rchild not null in btree popBack()", nextChild.isNull());
- // weirdly, we also put the rightmost down pointer in nextchild, even when bucket isn't full.
- nextChild = kn.prevChildBucket;
+ // weirdly, we also put the rightmost down pointer in nextchild, even when bucket isn't full.
+ nextChild = kn.prevChildBucket;
n--;
emptySize += sizeof(_KeyNode);
@@ -332,11 +335,11 @@ namespace mongo {
}
/* durability note
- we do separate intent declarations herein. arguably one could just declare
- the whole bucket given we do group commits. this is something we could investigate
+ we do separate intent declarations herein. arguably one could just declare
+ the whole bucket given we do group commits. this is something we could investigate
later as to what is faster under what situations.
*/
- /** insert a key in a bucket with no complexity -- no splits required
+ /** insert a key in a bucket with no complexity -- no splits required
@return false if a split is required.
*/
bool BucketBasics::basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering &order) const {
@@ -353,7 +356,7 @@ namespace mongo {
const char *p = (const char *) &k(keypos);
const char *q = (const char *) &k(n+1);
// declare that we will write to [k(keypos),k(n)]
- // todo: this writes a medium amount to the journal. we may want to add a verb "shift" to the redo log so
+ // todo: this writes a medium amount to the journal. we may want to add a verb "shift" to the redo log so
// we can log a very small amount.
b = (BucketBasics*) getDur().writingAtOffset((void *) this, p-(char*)this, q-p);
@@ -383,7 +386,7 @@ namespace mongo {
bool BucketBasics::mayDropKey( int index, int refPos ) const {
return index > 0 && ( index != refPos ) && k( index ).isUnused() && k( index ).prevChildBucket.isNull();
}
-
+
int BucketBasics::packedDataSize( int refPos ) const {
if ( flags & Packed ) {
return BucketSize - emptySize - headerSize();
@@ -397,7 +400,7 @@ namespace mongo {
}
return size;
}
-
+
/**
* when we delete things we just leave empty space until the node is
* full and then we repack it.
@@ -408,9 +411,9 @@ namespace mongo {
VERIFYTHISLOC
- /** TODO perhaps this can be optimized. for example if packing does no write, we can skip intent decl.
- an empirical approach is probably best than just adding new code : perhaps the bucket would need
- declaration anyway within the group commit interval, in which case we would just be adding
+ /** TODO perhaps this can be optimized. for example if packing does no write, we can skip intent decl.
+ an empirical approach is probably best than just adding new code : perhaps the bucket would need
+ declaration anyway within the group commit interval, in which case we would just be adding
code and complexity without benefit.
*/
thisLoc.btreemod()->_packReadyForMod(order, refPos);
@@ -460,7 +463,7 @@ namespace mongo {
assert( emptySize >= 0 );
setPacked();
-
+
assertValid( order );
}
@@ -494,7 +497,7 @@ namespace mongo {
assert( n > 2 );
int split = 0;
int rightSize = 0;
- // when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
+ // when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
// see SERVER-983
int rightSizeLimit = ( topSize + sizeof( _KeyNode ) * n ) / ( keypos == n ? 10 : 2 );
for( int i = n - 1; i > -1; --i ) {
@@ -507,22 +510,23 @@ namespace mongo {
// safeguards - we must not create an empty bucket
if ( split < 1 ) {
split = 1;
- } else if ( split > n - 2 ) {
+ }
+ else if ( split > n - 2 ) {
split = n - 2;
}
-
+
return split;
}
-
+
void BucketBasics::reserveKeysFront( int nAdd ) {
assert( emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
emptySize -= sizeof( _KeyNode ) * nAdd;
for( int i = n - 1; i > -1; --i ) {
k( i + nAdd ) = k( i );
}
- n += nAdd;
+ n += nAdd;
}
-
+
void BucketBasics::setKey( int i, const DiskLoc recordLoc, const BSONObj &key, const DiskLoc prevChildBucket ) {
_KeyNode &kn = k( i );
kn.recordLoc = recordLoc;
@@ -532,16 +536,16 @@ namespace mongo {
char *p = dataAt( ofs );
memcpy( p, key.objdata(), key.objsize() );
}
-
+
void BucketBasics::dropFront( int nDrop, const Ordering &order, int &refpos ) {
for( int i = nDrop; i < n; ++i ) {
k( i - nDrop ) = k( i );
}
n -= nDrop;
setNotPacked();
- _packReadyForMod( order, refpos );
+ _packReadyForMod( order, refpos );
}
-
+
/* - BtreeBucket --------------------------------------------------- */
/** @return largest key in the subtree. */
@@ -561,7 +565,7 @@ namespace mongo {
break;
}
}
-
+
/**
* NOTE Currently the Ordering implementation assumes a compound index will
* not have more keys than an unsigned variable has bits. The same
@@ -579,7 +583,7 @@ namespace mongo {
BSONElement rrr = rr.next();
++rr2;
++inc;
-
+
int x = lll.woCompare( rrr, false );
if ( o.descending( mask ) )
x = -x;
@@ -606,7 +610,7 @@ namespace mongo {
return 0;
}
- bool BtreeBucket::exists(const IndexDetails& idx, const DiskLoc &thisLoc, const BSONObj& key, const Ordering& order) const {
+ bool BtreeBucket::exists(const IndexDetails& idx, const DiskLoc &thisLoc, const BSONObj& key, const Ordering& order) const {
int pos;
bool found;
DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
@@ -629,10 +633,9 @@ namespace mongo {
* @return true = there is a duplicate.
*/
bool BtreeBucket::wouldCreateDup(
- const IndexDetails& idx, const DiskLoc &thisLoc,
+ const IndexDetails& idx, const DiskLoc &thisLoc,
const BSONObj& key, const Ordering& order,
- const DiskLoc &self) const
- {
+ const DiskLoc &self) const {
int pos;
bool found;
DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
@@ -652,7 +655,7 @@ namespace mongo {
return false;
}
- string BtreeBucket::dupKeyError( const IndexDetails& idx , const BSONObj& key ){
+ string BtreeBucket::dupKeyError( const IndexDetails& idx , const BSONObj& key ) {
stringstream ss;
ss << "E11000 duplicate key error ";
ss << "index: " << idx.indexNamespace() << " ";
@@ -663,7 +666,7 @@ namespace mongo {
/**
* Find a key withing this btree bucket.
*
- * When duplicate keys are allowed, we use the DiskLoc of the record as if it were part of the
+ * When duplicate keys are allowed, we use the DiskLoc of the record as if it were part of the
* key. That assures that even when there are many duplicates (e.g., 1 million) for a key,
* our performance is still good.
*
@@ -674,23 +677,23 @@ namespace mongo {
* returns n if it goes after the last existing key.
* note result might be an Unused location!
*/
- char foo;
+ char foo;
bool BtreeBucket::find(const IndexDetails& idx, const BSONObj& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const {
#if defined(_EXPERIMENT1)
- {
- char *z = (char *) this;
- int i = 0;
- while( 1 ) {
- i += 4096;
- if( i >= BucketSize )
- break;
- foo += z[i];
- }
- }
+ {
+ char *z = (char *) this;
+ int i = 0;
+ while( 1 ) {
+ i += 4096;
+ if( i >= BucketSize )
+ break;
+ foo += z[i];
+ }
+ }
#endif
-
+
globalIndexCounters.btree( (char*)this );
-
+
// binary search for this key
bool dupsChecked = false;
int l=0;
@@ -699,13 +702,13 @@ namespace mongo {
int m = (l+h)/2;
KeyNode M = keyNode(m);
int x = key.woCompare(M.key, order);
- if ( x == 0 ) {
+ if ( x == 0 ) {
if( assertIfDup ) {
- if( k(m).isUnused() ) {
- // ok that key is there if unused. but we need to check that there aren't other
- // entries for the key then. as it is very rare that we get here, we don't put any
+ if( k(m).isUnused() ) {
+ // ok that key is there if unused. but we need to check that there aren't other
+ // entries for the key then. as it is very rare that we get here, we don't put any
// coding effort in here to make this particularly fast
- if( !dupsChecked ) {
+ if( !dupsChecked ) {
dupsChecked = true;
if( idx.head.btree()->exists(idx, idx.head, key, order) ) {
if( idx.head.btree()->wouldCreateDup(idx, idx.head, key, order, recordLoc) )
@@ -716,7 +719,7 @@ namespace mongo {
}
}
else {
- if( M.recordLoc == recordLoc )
+ if( M.recordLoc == recordLoc )
alreadyInIndex();
uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
}
@@ -759,7 +762,7 @@ namespace mongo {
p->childForPos( parentIdx ).writing().Null();
deallocBucket( thisLoc, id );
}
-
+
void BtreeBucket::deallocBucket(const DiskLoc thisLoc, const IndexDetails &id) {
#if 0
// as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
@@ -784,7 +787,8 @@ namespace mongo {
_delKeyAtPos(p);
if ( isHead() ) {
// we don't delete the top bucket ever
- } else {
+ }
+ else {
if ( !mayBalanceWithNeighbors( thisLoc, id, order ) ) {
// An empty bucket is only allowed as a transient state. If
// there are no neighbors to balance with, we delete ourself.
@@ -801,11 +805,12 @@ namespace mongo {
if ( left.isNull() ) {
_delKeyAtPos(p);
mayBalanceWithNeighbors( thisLoc, id, order );
- } else {
+ }
+ else {
deleteInternalKey( thisLoc, p, id, order );
}
}
-
+
/**
* This function replaces the specified key (k) by either the prev or next
* key in the btree (k'). We require that k have either a left or right
@@ -837,15 +842,15 @@ namespace mongo {
DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
if ( !advanceLoc.btree()->childForPos( advanceKeyOfs ).isNull() ||
- !advanceLoc.btree()->childForPos( advanceKeyOfs + 1 ).isNull() ) {
+ !advanceLoc.btree()->childForPos( advanceKeyOfs + 1 ).isNull() ) {
// only expected with legacy btrees, see note above
markUnused( keypos );
return;
}
-
+
KeyNode kn = advanceLoc.btree()->keyNode( advanceKeyOfs );
setInternalKey( thisLoc, keypos, kn.recordLoc, kn.key, order, childForPos( keypos ), childForPos( keypos + 1 ), id );
- advanceLoc.btreemod()->delKeyAtPos( advanceLoc, id, advanceKeyOfs, order );
+ advanceLoc.btreemod()->delKeyAtPos( advanceLoc, id, advanceKeyOfs, order );
}
void BtreeBucket::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
@@ -853,14 +858,15 @@ namespace mongo {
if ( parent.isNull() ) {
assert( id.head == thisLoc );
id.head.writing() = nextChild;
- } else {
+ }
+ else {
parent.btree()->childForPos( indexInParent( thisLoc ) ).writing() = nextChild;
}
nextChild.btree()->parent.writing() = parent;
ClientCursor::informAboutToDeleteBucket( thisLoc );
deallocBucket( thisLoc, id );
}
-
+
bool BtreeBucket::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
assert( leftIndex >= 0 && leftIndex < n );
DiskLoc leftNodeLoc = childForPos( leftIndex );
@@ -879,7 +885,7 @@ namespace mongo {
}
return true;
}
-
+
/**
* This implementation must respect the meaning and value of lowWaterMark.
* Also see comments in splitPos().
@@ -920,13 +926,14 @@ namespace mongo {
// safeguards - we must not create an empty bucket
if ( split < 1 ) {
split = 1;
- } else if ( split > l->n + 1 + r->n - 2 ) {
+ }
+ else if ( split > l->n + 1 + r->n - 2 ) {
split = l->n + 1 + r->n - 2;
}
-
- return split;
+
+ return split;
}
-
+
void BtreeBucket::doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
DiskLoc leftNodeLoc = childForPos( leftIndex );
DiskLoc rightNodeLoc = childForPos( leftIndex + 1 );
@@ -935,7 +942,7 @@ namespace mongo {
int pos = 0;
l->_packReadyForMod( order, pos );
r->_packReadyForMod( order, pos ); // pack r in case there are droppable keys
-
+
int oldLNum = l->n;
{
KeyNode kn = keyNode( leftIndex );
@@ -956,18 +963,20 @@ namespace mongo {
// TODO To ensure all leaves are of equal height, we should ensure
// this is only called on the root.
replaceWithNextChild( thisLoc, id );
- } else {
+ }
+ else {
// balance recursively - maybe we should do this even when n == 0?
mayBalanceWithNeighbors( thisLoc, id, order );
}
}
-
+
int BtreeBucket::indexInParent( const DiskLoc &thisLoc ) const {
assert( !parent.isNull() );
const BtreeBucket *p = parent.btree();
if ( p->nextChild == thisLoc ) {
return p->n;
- } else {
+ }
+ else {
for( int i = 0; i < p->n; ++i ) {
if ( p->k( i ).prevChildBucket == thisLoc ) {
return i;
@@ -982,7 +991,7 @@ namespace mongo {
assert(false);
return -1; // just to compile
}
-
+
bool BtreeBucket::tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const {
// If we can merge, then we must merge rather than balance to preserve
// bucket utilization constraints.
@@ -992,11 +1001,11 @@ namespace mongo {
thisLoc.btreemod()->doBalanceChildren( thisLoc, leftIndex, id, order );
return true;
}
-
+
void BtreeBucket::doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
- IndexDetails &id, const Ordering &order ) {
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order ) {
// TODO maybe do some audits the same way pushBack() does?
int rAdd = l->n - split;
r->reserveKeysFront( rAdd );
@@ -1012,16 +1021,16 @@ namespace mongo {
{
KeyNode kn = l->keyNode( split );
l->nextChild = kn.prevChildBucket;
- setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
- }
+ setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
+ }
int zeropos = 0;
l->truncateTo( split, order, zeropos );
}
void BtreeBucket::doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
- IndexDetails &id, const Ordering &order ) {
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order ) {
int lN = l->n;
{
KeyNode kn = keyNode( leftIndex );
@@ -1040,7 +1049,7 @@ namespace mongo {
int zeropos = 0;
r->dropFront( split - lN, order, zeropos );
}
-
+
void BtreeBucket::doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
DiskLoc lchild = childForPos( leftIndex );
DiskLoc rchild = childForPos( leftIndex + 1 );
@@ -1050,42 +1059,43 @@ namespace mongo {
BtreeBucket *r = rchild.btreemod();
r->_packReadyForMod( order, zeropos );
int split = rebalancedSeparatorPos( thisLoc, leftIndex );
-
+
// By definition, if we are below the low water mark and cannot merge
// then we must actively balance.
assert( split != l->n );
if ( split < l->n ) {
doBalanceLeftToRight( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
- } else {
- doBalanceRightToLeft( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
+ }
+ else {
+ doBalanceRightToLeft( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
}
}
-
+
bool BtreeBucket::mayBalanceWithNeighbors( const DiskLoc thisLoc, IndexDetails &id, const Ordering &order ) const {
if ( parent.isNull() ) { // we are root, there are no neighbors
return false;
}
-
+
if ( packedDataSize( 0 ) >= lowWaterMark ) {
return false;
}
-
+
const BtreeBucket *p = parent.btree();
int parentIdx = indexInParent( thisLoc );
-
+
// TODO will missing neighbor case be possible long term? Should we try to merge/balance somehow in that case if so?
bool mayBalanceRight = ( ( parentIdx < p->n ) && !p->childForPos( parentIdx + 1 ).isNull() );
bool mayBalanceLeft = ( ( parentIdx > 0 ) && !p->childForPos( parentIdx - 1 ).isNull() );
-
+
// Balance if possible on one side - we merge only if absolutely necessary
// to preserve btree bucket utilization constraints since that's a more
// heavy duty operation (especially if we must re-split later).
- if ( mayBalanceRight &&
- p->tryBalanceChildren( parent, parentIdx, id, order ) ) {
+ if ( mayBalanceRight &&
+ p->tryBalanceChildren( parent, parentIdx, id, order ) ) {
return true;
}
- if ( mayBalanceLeft &&
- p->tryBalanceChildren( parent, parentIdx - 1, id, order ) ) {
+ if ( mayBalanceLeft &&
+ p->tryBalanceChildren( parent, parentIdx - 1, id, order ) ) {
return true;
}
@@ -1093,14 +1103,15 @@ namespace mongo {
if ( mayBalanceRight ) {
pm->doMergeChildren( parent, parentIdx, id, order );
return true;
- } else if ( mayBalanceLeft ) {
+ }
+ else if ( mayBalanceLeft ) {
pm->doMergeChildren( parent, parentIdx - 1, id, order );
return true;
}
-
+
return false;
}
-
+
/** remove a key from the index */
bool BtreeBucket::unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc ) const {
if ( key.objsize() > KeyMax ) {
@@ -1144,8 +1155,8 @@ namespace mongo {
}
void BtreeBucket::setInternalKey( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
- const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx ) {
+ const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx ) {
childForPos( keypos ).Null();
// This may leave the bucket empty (n == 0) which is ok only as a
@@ -1153,29 +1164,28 @@ namespace mongo {
// insertHere behaves correctly when n == 0 and as a side effect
// increments n.
_delKeyAtPos( keypos, true );
-
+
// Ensure we do not orphan neighbor's old child.
assert( childForPos( keypos ) == rchild );
-
+
// Just set temporarily - required to pass validation in insertHere()
childForPos( keypos ) = lchild;
-
- insertHere( thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx );
+
+ insertHere( thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx );
}
-
+
/**
* insert a key in this bucket, splitting if necessary.
* @keypos - where to insert the key in range 0..n. 0=make leftmost, n=make rightmost.
* NOTE this function may free some data, and as a result the value passed for keypos may
* be invalid after calling insertHere()
- */
+ */
void BtreeBucket::insertHere( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj& key, const Ordering& order,
- const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) const
- {
+ const DiskLoc recordLoc, const BSONObj& key, const Ordering& order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) const {
if ( insert_debug )
out() << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
- << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
+ << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
DiskLoc oldLoc = thisLoc;
@@ -1225,8 +1235,7 @@ namespace mongo {
}
}
- void BtreeBucket::split(const DiskLoc thisLoc, int keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx)
- {
+ void BtreeBucket::split(const DiskLoc thisLoc, int keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) {
assertWritable();
if ( split_debug )
@@ -1255,7 +1264,7 @@ namespace mongo {
if ( split_debug ) {
out() << " splitkey key:" << splitkey.key.toString() << endl;
}
-
+
// promote splitkey to a parent node
if ( parent.isNull() ) {
// make a new parent if we were the root
@@ -1288,7 +1297,8 @@ namespace mongo {
if ( split_debug )
out() << " keypos<split, insertHere() the new key" << endl;
insertHere(thisLoc, newpos, recordLoc, key, order, lchild, rchild, idx);
- } else {
+ }
+ else {
int kp = keypos-split-1;
assert(kp>=0);
rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
@@ -1390,7 +1400,7 @@ namespace mongo {
else
return pos == n ? DiskLoc() /*theend*/ : thisLoc;
}
-
+
bool BtreeBucket::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) const {
while( 1 ) {
if ( l + 1 == h ) {
@@ -1400,7 +1410,8 @@ namespace mongo {
bestParent = make_pair( thisLoc, keyOfs );
thisLoc = next;
return true;
- } else {
+ }
+ else {
return false;
}
}
@@ -1408,18 +1419,21 @@ namespace mongo {
int cmp = customBSONCmp( thisLoc.btree()->keyNode( m ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
if ( cmp < 0 ) {
l = m;
- } else if ( cmp > 0 ) {
+ }
+ else if ( cmp > 0 ) {
h = m;
- } else {
+ }
+ else {
if ( direction < 0 ) {
l = m;
- } else {
+ }
+ else {
h = m;
}
}
- }
+ }
}
-
+
/**
* find smallest/biggest value greater-equal/less-equal than specified
* starting thisLoc + keyOfs will be strictly less than/strictly greater than keyBegin/keyBeginLen/keyEnd
@@ -1432,7 +1446,8 @@ namespace mongo {
l = keyOfs;
h = n - 1;
dontGoUp = ( customBSONCmp( keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
- } else {
+ }
+ else {
l = 0;
h = keyOfs;
dontGoUp = ( customBSONCmp( keyNode( l ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
@@ -1443,7 +1458,8 @@ namespace mongo {
if ( !customFind( l, h, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, thisLoc, keyOfs, bestParent ) ) {
return;
}
- } else {
+ }
+ else {
// go up parents until rightmost/leftmost node is >=/<= target or at top
while( !thisLoc.btree()->parent.isNull() ) {
thisLoc = thisLoc.btree()->parent;
@@ -1451,16 +1467,17 @@ namespace mongo {
if ( customBSONCmp( thisLoc.btree()->keyNode( thisLoc.btree()->n - 1 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
break;
}
- } else {
+ }
+ else {
if ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
break;
- }
+ }
}
}
}
customLocate( thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, bestParent );
}
-
+
void BtreeBucket::customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const {
if ( thisLoc.btree()->n == 0 ) {
thisLoc = DiskLoc();
@@ -1474,7 +1491,8 @@ namespace mongo {
bool firstCheck;
if ( direction > 0 ) {
firstCheck = ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
- } else {
+ }
+ else {
firstCheck = ( customBSONCmp( thisLoc.btree()->keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
}
if ( firstCheck ) {
@@ -1482,7 +1500,8 @@ namespace mongo {
if ( direction > 0 ) {
next = thisLoc.btree()->k( 0 ).prevChildBucket;
keyOfs = 0;
- } else {
+ }
+ else {
next = thisLoc.btree()->nextChild;
keyOfs = h;
}
@@ -1490,21 +1509,24 @@ namespace mongo {
bestParent = pair< DiskLoc, int >( thisLoc, keyOfs );
thisLoc = next;
continue;
- } else {
+ }
+ else {
return;
}
}
bool secondCheck;
if ( direction > 0 ) {
secondCheck = ( customBSONCmp( thisLoc.btree()->keyNode( h ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) < 0 );
- } else {
+ }
+ else {
secondCheck = ( customBSONCmp( thisLoc.btree()->keyNode( 0 ).key, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) > 0 );
}
if ( secondCheck ) {
DiskLoc next;
if ( direction > 0 ) {
next = thisLoc.btree()->nextChild;
- } else {
+ }
+ else {
next = thisLoc.btree()->k( 0 ).prevChildBucket;
}
if ( next.isNull() ) {
@@ -1512,7 +1534,8 @@ namespace mongo {
thisLoc = bestParent.first;
keyOfs = bestParent.second;
return;
- } else {
+ }
+ else {
thisLoc = next;
continue;
}
@@ -1523,7 +1546,7 @@ namespace mongo {
}
}
-
+
/** @thisLoc disk location of *this */
int BtreeBucket::_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
const BSONObj& key, const Ordering &order, bool dupsAllowed,
@@ -1538,8 +1561,8 @@ namespace mongo {
bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
if ( insert_debug ) {
out() << " " << thisLoc.toString() << '.' << "_insert " <<
- key.toString() << '/' << recordLoc.toString() <<
- " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
+ key.toString() << '/' << recordLoc.toString() <<
+ " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
out() << " found:" << found << " pos:" << pos << " n:" << n << endl;
}
@@ -1553,7 +1576,7 @@ namespace mongo {
return 0;
}
- DEV {
+ DEV {
log() << "_insert(): key already exists in index (ok for background:true)\n";
log() << " " << idx.indexNamespace() << " thisLoc:" << thisLoc.toString() << '\n';
log() << " " << key.toString() << '\n';
@@ -1583,7 +1606,7 @@ namespace mongo {
out() << '\n';
KeyNode k = keyNode(i);
out() << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
- k.prevChildBucket.getOfs() << "\tRecLoc:" << k.recordLoc.toString() << dec;
+ k.prevChildBucket.getOfs() << "\tRecLoc:" << k.recordLoc.toString() << dec;
if ( this->k(i).isUnused() )
out() << " UNUSED";
}
@@ -1592,9 +1615,8 @@ namespace mongo {
/** todo: meaning of return code unclear clean up */
int BtreeBucket::bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
- const BSONObj& key, const Ordering &order, bool dupsAllowed,
- IndexDetails& idx, bool toplevel) const
- {
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel) const {
if ( toplevel ) {
if ( key.objsize() > KeyMax ) {
problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.objsize() << ' ' << key.toString() << endl;
@@ -1611,7 +1633,7 @@ namespace mongo {
void BtreeBucket::shape(stringstream& ss) const {
_shape(0, ss);
}
-
+
int BtreeBucket::getLowWaterMark() {
return lowWaterMark;
}
@@ -1619,7 +1641,7 @@ namespace mongo {
int BtreeBucket::getKeyMax() {
return KeyMax;
}
-
+
DiskLoc BtreeBucket::findSingle( const IndexDetails& indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const {
int pos;
bool found;
@@ -1630,7 +1652,7 @@ namespace mongo {
return bucket;
const BtreeBucket *b = bucket.btree();
- while ( 1 ){
+ while ( 1 ) {
const _KeyNode& knraw = b->k(pos);
if ( knraw.isUsed() )
break;
@@ -1699,26 +1721,25 @@ namespace mongo {
/* --- BtreeBuilder --- */
- BtreeBuilder::BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx) :
- dupsAllowed(_dupsAllowed),
- idx(_idx),
- n(0),
- order( idx.keyPattern() ),
- ordering( Ordering::make(idx.keyPattern()) )
- {
+ BtreeBuilder::BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx) :
+ dupsAllowed(_dupsAllowed),
+ idx(_idx),
+ n(0),
+ order( idx.keyPattern() ),
+ ordering( Ordering::make(idx.keyPattern()) ) {
first = cur = BtreeBucket::addBucket(idx);
b = cur.btreemod();
committed = false;
}
- void BtreeBuilder::newBucket() {
+ void BtreeBuilder::newBucket() {
DiskLoc L = BtreeBucket::addBucket(idx);
b->tempNext() = L;
cur = L;
b = cur.btreemod();
}
- void BtreeBuilder::addKey(BSONObj& key, DiskLoc loc) {
+ void BtreeBuilder::addKey(BSONObj& key, DiskLoc loc) {
if( !dupsAllowed ) {
if( n > 0 ) {
int cmp = keyLast.woCompare(key, order);
@@ -1731,12 +1752,12 @@ namespace mongo {
keyLast = key;
}
- if ( ! b->_pushBack(loc, key, ordering, DiskLoc()) ){
+ if ( ! b->_pushBack(loc, key, ordering, DiskLoc()) ) {
// no room
if ( key.objsize() > KeyMax ) {
problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.objsize() << ' ' << key.toString() << endl;
}
- else {
+ else {
// bucket was full
newBucket();
b->pushBack(loc, key, ordering, DiskLoc());
@@ -1745,10 +1766,10 @@ namespace mongo {
n++;
}
- void BtreeBuilder::buildNextLevel(DiskLoc loc) {
+ void BtreeBuilder::buildNextLevel(DiskLoc loc) {
int levels = 1;
- while( 1 ) {
- if( loc.btree()->tempNext().isNull() ) {
+ while( 1 ) {
+ if( loc.btree()->tempNext().isNull() ) {
// only 1 bucket at this level. we are done.
getDur().writingDiskLoc(idx.head) = loc;
break;
@@ -1760,34 +1781,35 @@ namespace mongo {
BtreeBucket *up = upLoc.btreemod();
DiskLoc xloc = loc;
- while( !xloc.isNull() ) {
+ while( !xloc.isNull() ) {
BtreeBucket *x = xloc.btreemod();
- BSONObj k;
+ BSONObj k;
DiskLoc r;
x->popBack(r,k);
bool keepX = ( x->n != 0 );
DiskLoc keepLoc = keepX ? xloc : x->nextChild;
- if ( ! up->_pushBack(r, k, ordering, keepLoc) ){
+ if ( ! up->_pushBack(r, k, ordering, keepLoc) ) {
// current bucket full
DiskLoc n = BtreeBucket::addBucket(idx);
up->tempNext() = n;
- upLoc = n;
+ upLoc = n;
up = upLoc.btreemod();
up->pushBack(r, k, ordering, keepLoc);
}
DiskLoc nextLoc = x->tempNext(); // get next in chain at current level
if ( keepX ) {
- x->parent = upLoc;
- } else {
+ x->parent = upLoc;
+ }
+ else {
if ( !x->nextChild.isNull() )
x->nextChild.btreemod()->parent = upLoc;
x->deallocBucket( xloc, idx );
}
xloc = nextLoc;
}
-
+
loc = upStart;
}
@@ -1796,16 +1818,16 @@ namespace mongo {
}
/** when all addKeys are done, we then build the higher levels of the tree */
- void BtreeBuilder::commit() {
+ void BtreeBuilder::commit() {
buildNextLevel(first);
committed = true;
}
- BtreeBuilder::~BtreeBuilder() {
- if( !committed ) {
+ BtreeBuilder::~BtreeBuilder() {
+ if( !committed ) {
log(2) << "Rolling back partially built index space" << endl;
DiskLoc x = first;
- while( !x.isNull() ) {
+ while( !x.isNull() ) {
DiskLoc next = x.btree()->tempNext();
string ns = idx.indexNamespace();
theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
diff --git a/db/btree.h b/db/btree.h
index f62e2776fd5..8f6c6ec3fd2 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -67,7 +67,7 @@ namespace mongo {
/**
* wrapper - this is our in memory representation of the key.
* _KeyNode is the disk representation.
- *
+ *
* This object and its bson key will become invalid if the key is moved.
*/
class KeyNode {
@@ -79,7 +79,7 @@ namespace mongo {
};
#pragma pack(1)
- class BtreeData {
+ class BtreeData {
protected:
DiskLoc parent;
DiskLoc nextChild; // child bucket off and to the right of the highest key.
@@ -133,22 +133,22 @@ namespace mongo {
* if the key is moved or reassigned, or if the node is packed.
*/
const KeyNode keyNode(int i) const {
- if ( i >= n ){
+ if ( i >= n ) {
massert( 13000 , (string)"invalid keyNode: " + BSON( "i" << i << "n" << n ).jsonString() , i < n );
}
return KeyNode(*this, k(i));
}
-
+
static int headerSize() {
const BucketBasics *d = 0;
return (char*)&(d->data) - (char*)&(d->parent);
}
static int bodySize() { return BucketSize - headerSize(); }
-
+
// for testing
int nKeys() const { return n; }
const DiskLoc getNextChild() const { return nextChild; }
-
+
protected:
char * dataAt(short ofs) { return data + ofs; }
@@ -162,10 +162,10 @@ namespace mongo {
* though it is marked const.
*/
bool basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const BSONObj& key, const Ordering &order) const;
-
+
/** @return true if works, false if not enough space */
bool _pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild);
- void pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild){
+ void pushBack(const DiskLoc recordLoc, const BSONObj& key, const Ordering &order, const DiskLoc prevChild) {
bool ok = _pushBack( recordLoc , key , order , prevChild );
assert(ok);
}
@@ -223,7 +223,7 @@ namespace mongo {
void markUnused(int keypos);
/**
- * BtreeBuilder uses the parent var as a temp place to maintain a linked list chain.
+ * BtreeBuilder uses the parent var as a temp place to maintain a linked list chain.
* we use tempNext() when we do that to be less confusing. (one might have written a union in C)
*/
const DiskLoc& tempNext() const { return parent; }
@@ -233,17 +233,17 @@ namespace mongo {
int Size() const;
const _KeyNode& k(int i) const { return ((const _KeyNode*)data)[i]; }
_KeyNode& k(int i) { return ((_KeyNode*)data)[i]; }
-
+
/** @return the key position where a split should occur on insert */
int splitPos( int keypos ) const;
-
+
/**
* Adds new entries to beginning of key array, shifting existing
* entries to the right. After this is called, setKey() must be called
* on all the newly created entries in the key array.
*/
void reserveKeysFront( int nAdd );
-
+
/**
* Sets an existing key using the given parameters.
* @i index of key to set
@@ -291,7 +291,7 @@ namespace mongo {
void dump() const;
/**
- * @return true if key exists in index
+ * @return true if key exists in index
*
* @order - indicates order of keys in the index. this is basically the index's key pattern, e.g.:
* BSONObj order = ((IndexDetails&)idx).keyPattern();
@@ -300,20 +300,20 @@ namespace mongo {
bool exists(const IndexDetails& idx, const DiskLoc &thisLoc, const BSONObj& key, const Ordering& order) const;
bool wouldCreateDup(
- const IndexDetails& idx, const DiskLoc &thisLoc,
+ const IndexDetails& idx, const DiskLoc &thisLoc,
const BSONObj& key, const Ordering& order,
- const DiskLoc &self) const;
+ const DiskLoc &self) const;
static DiskLoc addBucket(const IndexDetails&); /* start a new index off, empty */
/** invalidates 'this' and thisLoc */
void deallocBucket(const DiskLoc thisLoc, const IndexDetails &id);
-
+
static void renameIndexNamespace(const char *oldNs, const char *newNs);
/** This function may change the btree root */
int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
- const BSONObj& key, const Ordering &order, bool dupsAllowed,
- IndexDetails& idx, bool toplevel = true) const;
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const;
/** This function may change the btree root */
bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const;
@@ -322,12 +322,12 @@ namespace mongo {
* locate may return an "unused" key that is just a marker. so be careful.
* looks for a key:recordloc pair.
*
- * @found - returns true if exact match found. note you can get back a position
+ * @found - returns true if exact match found. note you can get back a position
* result even if found is false.
*/
- DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) const;
-
+
/**
* find the first instance of the key
* does not handle dups
@@ -338,10 +338,10 @@ namespace mongo {
/** advance one key position in the index: */
DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const;
-
+
void advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const;
void customLocate(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) const;
-
+
const DiskLoc getHead(const DiskLoc& thisLoc) const;
/** get tree shape */
@@ -351,7 +351,7 @@ namespace mongo {
static int getLowWaterMark();
static int getKeyMax();
-
+
protected:
/**
* Fix parent pointers for children
@@ -371,19 +371,19 @@ namespace mongo {
* @return true iff balancing was performed.
* NOTE This function may invalidate thisLoc.
*/
- bool mayBalanceWithNeighbors(const DiskLoc thisLoc, IndexDetails &id, const Ordering &order) const;
+ bool mayBalanceWithNeighbors(const DiskLoc thisLoc, IndexDetails &id, const Ordering &order) const;
/** @return true if balance succeeded */
bool tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const;
void doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order );
void doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
- IndexDetails &id, const Ordering &order );
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order );
void doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
- BtreeBucket *l, const DiskLoc lchild,
- BtreeBucket *r, const DiskLoc rchild,
- IndexDetails &id, const Ordering &order );
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order );
/** may invalidate this and thisLoc */
void doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order);
@@ -393,7 +393,7 @@ namespace mongo {
/** @return true iff left and right child can be merged into one node */
bool canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const;
-
+
/**
* @return index of the rebalanced separator; the index value is
* determined as if we had an array
@@ -404,7 +404,7 @@ namespace mongo {
* comments for splitPos().
*/
int rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const;
-
+
int indexInParent( const DiskLoc &thisLoc ) const;
BSONObj keyAt(int keyOfs) const {
return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key;
@@ -412,7 +412,7 @@ namespace mongo {
static BtreeBucket* allocTemp(); /* caller must release with free() */
/** split bucket */
- void split(const DiskLoc thisLoc, int keypos,
+ void split(const DiskLoc thisLoc, int keypos,
const DiskLoc recordLoc, const BSONObj& key,
const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx);
@@ -428,14 +428,14 @@ namespace mongo {
static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction );
static void fix(const DiskLoc thisLoc, const DiskLoc child);
-
+
/** Replaces an existing key with the new specified key, splitting if necessary */
void setInternalKey( const DiskLoc thisLoc, int keypos,
- const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
- const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx);
-
+ const DiskLoc recordLoc, const BSONObj &key, const Ordering &order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx);
+
/**
- * Deletes the specified key, replacing it with the key immediately
+ * Deletes the specified key, replacing it with the key immediately
* preceding or succeeding it in the btree. Either the left or right
* child of the specified key must be non null.
*/
@@ -465,13 +465,13 @@ namespace mongo {
* @return true if the loc has not been seen
*/
virtual bool getsetdup(DiskLoc loc) {
- if( _multikey ) {
+ if( _multikey ) {
pair<set<DiskLoc>::iterator, bool> p = _dups.insert(loc);
return !p.second;
}
return false;
}
-
+
virtual bool modifiedKeys() const { return _multikey; }
virtual bool isMultiKey() const { return _multikey; }
@@ -512,22 +512,23 @@ namespace mongo {
virtual BSONObj prettyIndexBounds() const {
if ( !_independentFieldRanges ) {
return BSON( "start" << prettyKey( startKey ) << "end" << prettyKey( endKey ) );
- } else {
+ }
+ else {
return _bounds->obj();
}
}
-
+
void forgetEndKey() { endKey = BSONObj(); }
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
-
+
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
virtual long long nscanned() { return _nscanned; }
-
+
/** for debugging only */
const DiskLoc getBucket() const { return bucket; }
-
+
private:
/**
* Our btrees may (rarely) have "unused" keys when items are deleted.
@@ -546,15 +547,15 @@ namespace mongo {
/** if afterKey is true, we want the first key with values of the keyBegin fields greater than keyBegin */
void advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive );
-
+
friend class BtreeBucket;
set<DiskLoc> _dups;
NamespaceDetails * const d;
- const int idxNo;
+ const int idxNo;
BSONObj startKey;
BSONObj endKey;
- bool _endKeyInclusive;
+ bool _endKeyInclusive;
bool _multikey; // this must be updated every getmore batch in case someone added a multikey
const IndexDetails& indexDetails;
const BSONObj _order;
@@ -573,10 +574,10 @@ namespace mongo {
};
- inline bool IndexDetails::hasKey(const BSONObj& key) {
+ inline bool IndexDetails::hasKey(const BSONObj& key) {
return head.btree()->exists(*this, head, key, Ordering::make(keyPattern()));
}
- inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc self) {
+ inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc self) {
return head.btree()->wouldCreateDup(*this, head, key, Ordering::make(keyPattern()), self);
}
@@ -585,7 +586,7 @@ namespace mongo {
* _ TODO dropDups
*/
class BtreeBuilder {
- bool dupsAllowed;
+ bool dupsAllowed;
IndexDetails& idx;
unsigned long long n;
BSONObj keyLast;
@@ -608,7 +609,7 @@ namespace mongo {
void addKey(BSONObj& key, DiskLoc loc);
/**
- * commit work. if not called, destructor will clean up partially completed work
+ * commit work. if not called, destructor will clean up partially completed work
* (in case exception has happened).
*/
void commit();
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index e70abbbd133..9cab95f83c6 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -26,21 +26,20 @@ namespace mongo {
extern int otherTraceLevel;
- BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails &_id,
+ BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails &_id,
const BSONObj &_startKey, const BSONObj &_endKey, bool endKeyInclusive, int _direction ) :
- d(_d), idxNo(_idxNo),
- startKey( _startKey ),
- endKey( _endKey ),
- _endKeyInclusive( endKeyInclusive ),
- _multikey( d->isMultikey( idxNo ) ),
- indexDetails( _id ),
- _order( _id.keyPattern() ),
- _ordering( Ordering::make( _order ) ),
- _direction( _direction ),
- _spec( _id.getSpec() ),
- _independentFieldRanges( false ),
- _nscanned( 0 )
- {
+ d(_d), idxNo(_idxNo),
+ startKey( _startKey ),
+ endKey( _endKey ),
+ _endKeyInclusive( endKeyInclusive ),
+ _multikey( d->isMultikey( idxNo ) ),
+ indexDetails( _id ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
+ _direction( _direction ),
+ _spec( _id.getSpec() ),
+ _independentFieldRanges( false ),
+ _nscanned( 0 ) {
audit();
init();
dassert( _dups.size() == 0 );
@@ -48,19 +47,18 @@ namespace mongo {
BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction )
:
- d(_d), idxNo(_idxNo),
- _endKeyInclusive( true ),
- _multikey( d->isMultikey( idxNo ) ),
- indexDetails( _id ),
- _order( _id.keyPattern() ),
- _ordering( Ordering::make( _order ) ),
- _direction( _direction ),
- _bounds( ( assert( _bounds.get() ), _bounds ) ),
- _boundsIterator( new FieldRangeVector::Iterator( *_bounds ) ),
- _spec( _id.getSpec() ),
- _independentFieldRanges( true ),
- _nscanned( 0 )
- {
+ d(_d), idxNo(_idxNo),
+ _endKeyInclusive( true ),
+ _multikey( d->isMultikey( idxNo ) ),
+ indexDetails( _id ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
+ _direction( _direction ),
+ _bounds( ( assert( _bounds.get() ), _bounds ) ),
+ _boundsIterator( new FieldRangeVector::Iterator( *_bounds ) ),
+ _spec( _id.getSpec() ),
+ _independentFieldRanges( true ),
+ _nscanned( 0 ) {
massert( 13384, "BtreeCursor FieldRangeVector constructor doesn't accept special indexes", !_spec.getType() );
audit();
startKey = _bounds->startKey();
@@ -90,20 +88,20 @@ namespace mongo {
}
void BtreeCursor::init() {
- if ( _spec.getType() ){
+ if ( _spec.getType() ) {
startKey = _spec.getType()->fixKey( startKey );
endKey = _spec.getType()->fixKey( endKey );
}
bool found;
bucket = indexDetails.head.btree()->
- locate(indexDetails, indexDetails.head, startKey, _ordering, keyOfs, found, _direction > 0 ? minDiskLoc : maxDiskLoc, _direction);
+ locate(indexDetails, indexDetails.head, startKey, _ordering, keyOfs, found, _direction > 0 ? minDiskLoc : maxDiskLoc, _direction);
if ( ok() ) {
_nscanned = 1;
- }
+ }
skipUnusedKeys( false );
checkEnd();
}
-
+
void BtreeCursor::skipAndCheck() {
skipUnusedKeys( true );
while( 1 ) {
@@ -116,7 +114,7 @@ namespace mongo {
}
}
}
-
+
bool BtreeCursor::skipOutOfRangeKeysAndCheckEnd() {
if ( !ok() ) {
return false;
@@ -125,7 +123,8 @@ namespace mongo {
if ( ret == -2 ) {
bucket = DiskLoc();
return false;
- } else if ( ret == -1 ) {
+ }
+ else if ( ret == -1 ) {
++_nscanned;
return false;
}
@@ -133,7 +132,7 @@ namespace mongo {
advanceTo( currKeyNode().key, ret, _boundsIterator->after(), _boundsIterator->cmp(), _boundsIterator->inc() );
return true;
}
-
+
/* skip unused keys. */
bool BtreeCursor::skipUnusedKeys( bool mayJump ) {
int u = 0;
@@ -171,29 +170,30 @@ namespace mongo {
if ( !endKey.isEmpty() ) {
int cmp = sgn( endKey.woCompare( currKey(), _order ) );
if ( ( cmp != 0 && cmp != _direction ) ||
- ( cmp == 0 && !_endKeyInclusive ) )
+ ( cmp == 0 && !_endKeyInclusive ) )
bucket = DiskLoc();
}
}
-
+
void BtreeCursor::advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive) {
bucket.btree()->advanceTo( bucket, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, _ordering, _direction );
}
-
+
bool BtreeCursor::advance() {
killCurrentOp.checkForInterrupt();
if ( bucket.isNull() )
return false;
bucket = bucket.btree()->advance(bucket, keyOfs, _direction, "BtreeCursor::advance");
-
+
if ( !_independentFieldRanges ) {
skipUnusedKeys( false );
checkEnd();
if ( ok() ) {
++_nscanned;
}
- } else {
+ }
+ else {
skipAndCheck();
}
return ok();
@@ -232,17 +232,17 @@ namespace mongo {
int x = 0;
while( 1 ) {
if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
- b->k(keyOfs).recordLoc == locAtKeyOfs ) {
- if ( !b->k(keyOfs).isUsed() ) {
- /* we were deleted but still exist as an unused
- marker key. advance.
- */
- skipUnusedKeys( false );
- }
- return;
+ b->k(keyOfs).recordLoc == locAtKeyOfs ) {
+ if ( !b->k(keyOfs).isUsed() ) {
+ /* we were deleted but still exist as an unused
+ marker key. advance.
+ */
+ skipUnusedKeys( false );
+ }
+ return;
}
- /* we check one key earlier too, in case a key was just deleted. this is
+ /* we check one key earlier too, in case a key was just deleted. this is
important so that multi updates are reasonably fast.
*/
if( keyOfs == 0 || x++ )
diff --git a/db/cap.cpp b/db/cap.cpp
index 1f4e9b137d1..2403494e5f2 100644
--- a/db/cap.cpp
+++ b/db/cap.cpp
@@ -1,4 +1,4 @@
-// @file cap.cpp capped collection related
+// @file cap.cpp capped collection related
// the "old" version (<= v1.6)
/**
@@ -50,7 +50,7 @@
namespace mongo {
/* combine adjacent deleted records *for the current extent* of the capped collection
-
+
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
(or 3...there will be a little unused sliver at the end of the extent.)
*/
@@ -188,15 +188,15 @@ namespace mongo {
return ret;
}
- DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
+ DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
// signal done allocating new extents.
if ( !cappedLastDelRecLastExtent().isValid() )
getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
-
+
assert( len < 400000000 );
int passes = 0;
int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
- if ( maxPasses < 5000 ){
+ if ( maxPasses < 5000 ) {
// this is for bacwards safety since 5000 was the old value
maxPasses = 5000;
}
@@ -272,11 +272,11 @@ namespace mongo {
}
}
- void NamespaceDetails::cappedDumpDelInfo() {
+ void NamespaceDetails::cappedDumpDelInfo() {
cout << "dl[0]: " << deletedList[0].toString() << endl;
- for( DiskLoc z = deletedList[0]; !z.isNull(); z = z.drec()->nextDeleted ) {
- cout << " drec:" << z.toString() << " dreclen:" << hex << z.drec()->lengthWithHeaders <<
- " ext:" << z.drec()->myExtent(z)->myLoc.toString() << endl;
+ for( DiskLoc z = deletedList[0]; !z.isNull(); z = z.drec()->nextDeleted ) {
+ cout << " drec:" << z.toString() << " dreclen:" << hex << z.drec()->lengthWithHeaders <<
+ " ext:" << z.drec()->myExtent(z)->myLoc.toString() << endl;
}
cout << "dl[1]: " << deletedList[1].toString() << endl;
}
@@ -287,29 +287,30 @@ namespace mongo {
// is no deleted record in a previous extent, so nullify
// cappedLastDelRecLastExtent().
cappedLastDelRecLastExtent().writing() = DiskLoc();
- } else {
+ }
+ else {
// Scan through all deleted records in the collection
// until the last deleted record for the extent prior
// to the new capExtent is found. Then set
// cappedLastDelRecLastExtent() to that deleted record.
DiskLoc i = cappedListOfAllDeletedRecords();
for( ;
- !i.drec()->nextDeleted.isNull() &&
- !inCapExtent( i.drec()->nextDeleted );
- i = i.drec()->nextDeleted );
+ !i.drec()->nextDeleted.isNull() &&
+ !inCapExtent( i.drec()->nextDeleted );
+ i = i.drec()->nextDeleted );
// In our capped storage model, every extent must have at least one
// deleted record. Here we check that 'i' is not the last deleted
// record. (We expect that there will be deleted records in the new
// capExtent as well.)
assert( !i.drec()->nextDeleted.isNull() );
cappedLastDelRecLastExtent().writing() = i;
- }
+ }
}
-
+
void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
DEV assert( this == nsdetails(ns) );
assert( cappedLastDelRecLastExtent().isValid() );
-
+
// We iteratively remove the newest document until the newest document
// is 'end', then we remove 'end' if requested.
bool foundLast = false;
@@ -325,28 +326,29 @@ namespace mongo {
if ( inclusive ) {
// 'end' has been found, so break next iteration.
foundLast = true;
- } else {
+ }
+ else {
// 'end' has been found, so break.
break;
}
}
-
+
// TODO The algorithm used in this function cannot generate an
// empty collection, but we could call emptyCappedCollection() in
// this case instead of asserting.
uassert( 13415, "emptying the collection is not allowed", stats.nrecords > 1 );
-
+
// Delete the newest record, and coalesce the new deleted
// record with existing deleted records.
theDataFileMgr.deleteRecord(ns, curr.rec(), curr, true);
compact();
-
+
// This is the case where we have not yet had to remove any
// documents to make room for other documents, and we are allocating
// documents from free space in fresh extents instead of reusing
// space from familiar extents.
if ( !capLooped() ) {
-
+
// We just removed the last record from the 'capExtent', and
// the 'capExtent' can't be empty, so we set 'capExtent' to
// capExtent's prev extent.
@@ -356,7 +358,7 @@ namespace mongo {
// capLooped() is false, capExtent is not the first extent
// so xprev will be nonnull.
capExtent.writing() = theCapExtent()->xprev;
- theCapExtent()->assertOk();
+ theCapExtent()->assertOk();
// update cappedLastDelRecLastExtent()
cappedTruncateLastDelUpdate();
@@ -371,7 +373,7 @@ namespace mongo {
// may point to invalid data, but we can still compare the
// references themselves.
if ( curr == capFirstNewRecord ) {
-
+
// Set 'capExtent' to the first nonempty extent prior to the
// initial capExtent. There must be such an extent because we
// have not deleted the last document in the collection. It is
@@ -384,9 +386,10 @@ namespace mongo {
// Find the previous extent, looping if necessary.
newCapExtent = ( newCapExtent == firstExtent ) ? lastExtent : newCapExtent.ext()->xprev;
newCapExtent.ext()->assertOk();
- } while ( newCapExtent.ext()->firstRecord.isNull() );
+ }
+ while ( newCapExtent.ext()->firstRecord.isNull() );
capExtent.writing() = newCapExtent;
-
+
// Place all documents in the new capExtent on the fresh side
// of the capExtent by setting capFirstNewRecord to the first
// document in the new capExtent.
@@ -397,7 +400,7 @@ namespace mongo {
}
}
}
-
+
void NamespaceDetails::emptyCappedCollection( const char *ns ) {
DEV assert( this == nsdetails(ns) );
massert( 13424, "collection must be capped", capped );
@@ -406,7 +409,7 @@ namespace mongo {
// Clear all references to this namespace.
ClientCursor::invalidate( ns );
- NamespaceDetailsTransient::clearForPrefix( ns );
+ NamespaceDetailsTransient::clearForPrefix( ns );
// Get a writeable reference to 'this' and reset all pertinent
// attributes.
@@ -414,7 +417,7 @@ namespace mongo {
t->cappedLastDelRecLastExtent() = DiskLoc();
t->cappedListOfAllDeletedRecords() = DiskLoc();
-
+
// preserve firstExtent/lastExtent
t->capExtent = firstExtent;
t->stats.datasize = stats.nrecords = 0;
diff --git a/db/client.cpp b/db/client.cpp
index 2965a8d64df..153232caa50 100644
--- a/db/client.cpp
+++ b/db/client.cpp
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/* Client represents a connection to the database (the server-side) and corresponds
+/* Client represents a connection to the database (the server-side) and corresponds
to an open socket (or logical connection if pooling on sockets) from a client.
*/
@@ -40,8 +40,8 @@ namespace mongo {
set<Client*> Client::clients; // always be in clientsMutex when manipulating this
boost::thread_specific_ptr<Client> currentClient;
- /* each thread which does db operations has a Client object in TLS.
- call this when your thread starts.
+ /* each thread which does db operations has a Client object in TLS.
+ call this when your thread starts.
*/
Client& Client::initThread(const char *desc, MessagingPort *mp) {
assert( currentClient.get() == 0 );
@@ -51,21 +51,20 @@ namespace mongo {
return *c;
}
- Client::Client(const char *desc, MessagingPort *p) :
- _context(0),
- _shutdown(false),
- _desc(desc),
- _god(0),
- _lastOp(0),
- _mp(p)
- {
+ Client::Client(const char *desc, MessagingPort *p) :
+ _context(0),
+ _shutdown(false),
+ _desc(desc),
+ _god(0),
+ _lastOp(0),
+ _mp(p) {
_connectionId = setThreadName(desc);
_curOp = new CurOp( this );
scoped_lock bl(clientsMutex);
clients.insert(this);
}
- Client::~Client() {
+ Client::~Client() {
_god = 0;
if ( _context )
@@ -74,14 +73,14 @@ namespace mongo {
if ( ! _shutdown ) {
error() << "Client::shutdown not called: " << _desc << endl;
}
-
+
scoped_lock bl(clientsMutex);
if ( ! _shutdown )
clients.erase(this);
delete _curOp;
}
-
- bool Client::shutdown(){
+
+ bool Client::shutdown() {
_shutdown = true;
if ( inShutdown() )
return false;
@@ -98,9 +97,9 @@ namespace mongo {
BSONObj CachedBSONObj::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
AtomicUInt CurOp::_nextOpNum;
-
+
Client::Context::Context( string ns , Database * db, bool doauth )
- : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
+ : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
_path( dbpath ) , _lock(0) , _justCreated(false) {
assert( db && db->isOk() );
_ns = ns;
@@ -109,37 +108,37 @@ namespace mongo {
if ( doauth )
_auth();
}
-
- Client::Context::Context(const string& ns, string path , mongolock * lock , bool doauth )
- : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
- _path( path ) , _lock( lock ) ,
- _ns( ns ), _db(0){
+
+ Client::Context::Context(const string& ns, string path , mongolock * lock , bool doauth )
+ : _client( currentClient.get() ) , _oldContext( _client->_context ) ,
+ _path( path ) , _lock( lock ) ,
+ _ns( ns ), _db(0) {
_finishInit( doauth );
}
-
+
/* this version saves the context but doesn't yet set the new one: */
-
- Client::Context::Context()
- : _client( currentClient.get() ) , _oldContext( _client->_context ),
- _path( dbpath ) , _lock(0) , _justCreated(false), _db(0){
+
+ Client::Context::Context()
+ : _client( currentClient.get() ) , _oldContext( _client->_context ),
+ _path( dbpath ) , _lock(0) , _justCreated(false), _db(0) {
_client->_context = this;
clear();
}
- void Client::Context::_finishInit( bool doauth ){
+ void Client::Context::_finishInit( bool doauth ) {
int lockState = dbMutex.getState();
assert( lockState );
-
+
_db = dbHolder.get( _ns , _path );
- if ( _db ){
+ if ( _db ) {
_justCreated = false;
}
- else if ( dbMutex.getState() > 0 ){
+ else if ( dbMutex.getState() > 0 ) {
// already in a write lock
_db = dbHolder.getOrCreate( _ns , _path , _justCreated );
assert( _db );
}
- else if ( dbMutex.getState() < -1 ){
+ else if ( dbMutex.getState() < -1 ) {
// nested read lock :(
assert( _lock );
_lock->releaseAndWriteLock();
@@ -152,37 +151,37 @@ namespace mongo {
// to do that, we're going to unlock, then get a write lock
// this is so that if this is the first query and its long doesn't block db
// we just have to check that the db wasn't closed in the interim where we unlock
- for ( int x=0; x<2; x++ ){
- {
+ for ( int x=0; x<2; x++ ) {
+ {
dbtemprelease unlock;
writelock lk( _ns );
dbHolder.getOrCreate( _ns , _path , _justCreated );
}
-
+
_db = dbHolder.get( _ns , _path );
-
+
if ( _db )
break;
-
+
log() << "db was closed on us right after we opened it: " << _ns << endl;
}
-
+
uassert( 13005 , "can't create db, keeps getting closed" , _db );
}
-
+
_client->_context = this;
_client->_curOp->enter( this );
if ( doauth )
_auth( lockState );
- switch ( _client->_curOp->getOp() ){
+ switch ( _client->_curOp->getOp() ) {
case dbGetMore: // getMore's are special and should be handled else where
case dbUpdate: // update & delete check shard version in instance.cpp, so don't check here as well
- case dbDelete:
+ case dbDelete:
break;
default: {
string errmsg;
- if ( ! shardVersionOk( _ns , lockState > 0 , errmsg ) ){
+ if ( ! shardVersionOk( _ns , lockState > 0 , errmsg ) ) {
ostringstream os;
os << "[" << _ns << "] shard version not ok in Client::Context: " << errmsg;
msgassertedNoTrace( StaleConfigInContextCode , os.str().c_str() );
@@ -190,14 +189,14 @@ namespace mongo {
}
}
}
-
- void Client::Context::_auth( int lockState ){
+
+ void Client::Context::_auth( int lockState ) {
if ( _client->_ai.isAuthorizedForLock( _db->name , lockState ) )
return;
// before we assert, do a little cleanup
_client->_context = _oldContext; // note: _oldContext may be null
-
+
stringstream ss;
ss << "unauthorized db:" << _db->name << " lock type:" << lockState << " client:" << _client->clientAddress();
uasserted( 10057 , ss.str() );
@@ -212,19 +211,19 @@ namespace mongo {
bool Client::Context::inDB( const string& db , const string& path ) const {
if ( _path != path )
return false;
-
+
if ( db == _ns )
return true;
-
+
string::size_type idx = _ns.find( db );
if ( idx != 0 )
return false;
-
+
return _ns[db.size()] == '.';
}
-
+
void Client::appendLastOp( BSONObjBuilder& b ) const {
- if( theReplSet ) {
+ if( theReplSet ) {
b.append("lastOp" , (long long) _lastOp);
}
else {
@@ -232,8 +231,8 @@ namespace mongo {
if ( ! lo.isNull() )
b.appendTimestamp( "lastOp" , lo.asDate() );
}
- }
-
+ }
+
string Client::clientAddress(bool includePort) const {
if( _curOp )
@@ -248,26 +247,26 @@ namespace mongo {
return ss.str();
}
- string sayClientState(){
+ string sayClientState() {
Client* c = currentClient.get();
if ( !c )
return "no client";
return c->toString();
}
-
- Client* curopWaitingForLock( int type ){
+
+ Client* curopWaitingForLock( int type ) {
Client * c = currentClient.get();
assert( c );
CurOp * co = c->curop();
- if ( co ){
+ if ( co ) {
co->waitingForLock( type );
}
return c;
}
- void curopGotLock(Client *c){
+ void curopGotLock(Client *c) {
assert(c);
CurOp * co = c->curop();
- if ( co )
+ if ( co )
co->gotLock();
}
@@ -276,7 +275,8 @@ namespace mongo {
return;
if ( !op ) {
globalScriptEngine->interruptAll();
- } else {
+ }
+ else {
globalScriptEngine->interrupt( *op );
}
}
@@ -298,7 +298,7 @@ namespace mongo {
l->kill();
}
found = true;
- }
+ }
}
}
}
@@ -306,9 +306,9 @@ namespace mongo {
interruptJs( &i );
}
}
-
- CurOp::~CurOp(){
- if ( _wrapped ){
+
+ CurOp::~CurOp() {
+ if ( _wrapped ) {
scoped_lock bl(Client::clientsMutex);
_client->_curOp = _wrapped;
}
@@ -323,15 +323,15 @@ namespace mongo {
if ( _lockType )
b.append("lockType" , _lockType > 0 ? "write" : "read" );
b.append("waitingForLock" , _waitingForLock );
-
- if( a ){
+
+ if( a ) {
b.append("secs_running", elapsedSeconds() );
}
-
+
b.append( "op" , opToString( _op ) );
-
+
b.append("ns", _ns);
-
+
_query.append( b , "query" );
// b.append("inLock", ??
@@ -341,9 +341,9 @@ namespace mongo {
if ( _client )
b.append( "desc" , _client->desc() );
-
- if ( ! _message.empty() ){
- if ( _progressMeter.isActive() ){
+
+ if ( ! _message.empty() ) {
+ if ( _progressMeter.isActive() ) {
StringBuilder buf(128);
buf << _message.toString() << " " << _progressMeter.toString();
b.append( "msg" , buf.str() );
@@ -356,7 +356,7 @@ namespace mongo {
return b.obj();
}
- void Client::gotHandshake( const BSONObj& o ){
+ void Client::gotHandshake( const BSONObj& o ) {
BSONObjIterator i(o);
{
@@ -364,7 +364,7 @@ namespace mongo {
assert( id.type() );
_remoteId = id.wrap( "_id" );
}
-
+
BSONObjBuilder b;
while ( i.more() )
b.append( i.next() );
@@ -374,31 +374,31 @@ namespace mongo {
class HandshakeCmd : public Command {
public:
void help(stringstream& h) const { h << "internal"; }
- HandshakeCmd() : Command( "handshake" ){}
- virtual LockType locktype() const { return NONE; }
+ HandshakeCmd() : Command( "handshake" ) {}
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return false; }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
Client& c = cc();
c.gotHandshake( cmdObj );
return 1;
- }
+ }
} handshakeCmd;
class ClientListPlugin : public WebStatusPlugin {
public:
- ClientListPlugin() : WebStatusPlugin( "clients" , 20 ){}
- virtual void init(){}
-
- virtual void run( stringstream& ss ){
+ ClientListPlugin() : WebStatusPlugin( "clients" , 20 ) {}
+ virtual void init() {}
+
+ virtual void run( stringstream& ss ) {
using namespace mongoutils::html;
ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
ss << "<tr align='left'>"
<< th( a("", "Connections to the database, both internal and external.", "Client") )
<< th( a("http://www.mongodb.org/display/DOCS/Viewing+and+Terminating+Current+Operation", "", "OpId") )
- << "<th>Active</th>"
+ << "<th>Active</th>"
<< "<th>LockType</th>"
<< "<th>Waiting</th>"
<< "<th>SecsRunning</th>"
@@ -412,11 +412,11 @@ namespace mongo {
<< "</tr>\n";
{
scoped_lock bl(Client::clientsMutex);
- for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
+ for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
CurOp& co = *(c->curop());
ss << "<tr><td>" << c->desc() << "</td>";
-
+
tablecell( ss , co.opNum() );
tablecell( ss , co.active() );
{
@@ -433,7 +433,7 @@ namespace mongo {
tablecell( ss , "" );
tablecell( ss , co.getOp() );
tablecell( ss , co.getNS() );
- if ( co.haveQuery() ){
+ if ( co.haveQuery() ) {
tablecell( ss , co.query() );
}
else
@@ -450,18 +450,18 @@ namespace mongo {
ss << "</table>\n";
}
-
+
} clientListPlugin;
- int Client::recommendedYieldMicros( int * writers , int * readers ){
+ int Client::recommendedYieldMicros( int * writers , int * readers ) {
int num = 0;
int w = 0;
int r = 0;
{
scoped_lock bl(clientsMutex);
- for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ){
+ for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ) {
Client* c = *i;
- if ( c->curop()->isWaitingForLock() ){
+ if ( c->curop()->isWaitingForLock() ) {
num++;
if ( c->curop()->getLockType() > 0 )
w++;
@@ -470,41 +470,41 @@ namespace mongo {
}
}
}
-
+
if ( writers )
*writers = w;
if ( readers )
*readers = r;
-
+
int time = r * 100;
time += r * 500;
-
+
time = min( time , 1000000 );
// there has been a kill request for this op - we should yield to allow the op to stop
if ( killCurrentOp.checkForInterruptNoAssert( false ) ) {
return 100;
}
-
+
return time;
}
- int Client::getActiveClientCount( int& writers, int& readers ){
+ int Client::getActiveClientCount( int& writers, int& readers ) {
writers = 0;
readers = 0;
-
+
scoped_lock bl(clientsMutex);
- for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ){
+ for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ) {
Client* c = *i;
if ( ! c->curop()->active() )
continue;
-
+
int l = c->curop()->getLockType();
if ( l > 0 )
writers++;
else if ( l < 0 )
readers++;
-
+
}
return writers + readers;
diff --git a/db/client.h b/db/client.h
index ccfceccf307..4e8589edcc8 100644
--- a/db/client.h
+++ b/db/client.h
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/* Client represents a connection to the database (the server-side) and corresponds
+/* Client represents a connection to the database (the server-side) and corresponds
to an open socket (or logical connection if pooling on sockets) from a client.
todo: switch to asio...this will fit nicely with that.
@@ -30,7 +30,7 @@
#include "lasterror.h"
#include "stats/top.h"
-namespace mongo {
+namespace mongo {
extern class ReplSet *theReplSet;
class AuthenticationInfo;
@@ -44,7 +44,7 @@ namespace mongo {
typedef long long ConnectionId;
- class Client : boost::noncopyable {
+ class Client : boost::noncopyable {
public:
class Context;
@@ -56,12 +56,12 @@ namespace mongo {
static Client *syncThread;
- /* each thread which does db operations has a Client object in TLS.
- call this when your thread starts.
+ /* each thread which does db operations has a Client object in TLS.
+ call this when your thread starts.
*/
static Client& initThread(const char *desc, MessagingPort *mp = 0);
- /*
+ /*
this has to be called as the client goes away, but before thread termination
@return true if anything was done
*/
@@ -70,9 +70,9 @@ namespace mongo {
~Client();
- void iAmSyncThread() {
+ void iAmSyncThread() {
wassert( syncThread == 0 );
- syncThread = this;
+ syncThread = this;
}
bool isSyncThread() const { return this == syncThread; } // true if this client is the replication secondary pull thread
@@ -80,7 +80,7 @@ namespace mongo {
string clientAddress(bool includePort=false) const;
AuthenticationInfo * getAuthenticationInfo() { return &_ai; }
bool isAdmin() { return _ai.isAuthorized( "admin" ); }
- CurOp* curop() const { return _curOp; }
+ CurOp* curop() const { return _curOp; }
Context* getContext() const { return _context; }
Database* database() const { return _context ? _context->db() : 0; }
const char *ns() const { return _context->ns(); }
@@ -96,7 +96,7 @@ namespace mongo {
void gotHandshake( const BSONObj& o );
BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; }
-
+
MessagingPort * port() const { return _mp; }
ConnectionId getConnectionId() const { return _connectionId; }
@@ -128,54 +128,54 @@ namespace mongo {
~GodScope();
};
-
+
/* Set database we want to use, then, restores when we finish (are out of scope)
Note this is also helpful if an exception happens as the state if fixed up.
*/
- class Context : boost::noncopyable{
+ class Context : boost::noncopyable {
public:
- /**
+ /**
* this is the main constructor
* use this unless there is a good reason not to
*/
Context(const string& ns, string path=dbpath, mongolock * lock = 0 , bool doauth=true );
-
+
/* this version saves the context but doesn't yet set the new one: */
Context();
-
+
/**
* if you are doing this after allowing a write there could be a race condition
* if someone closes that db. this checks that the DB is still valid
*/
Context( string ns , Database * db, bool doauth=true );
-
+
~Context();
- Client* getClient() const { return _client; }
+ Client* getClient() const { return _client; }
Database* db() const { return _db; }
- const char * ns() const { return _ns.c_str(); }
-
+ const char * ns() const { return _ns.c_str(); }
+
/** @return if the db was created by this Context */
bool justCreated() const { return _justCreated; }
bool equals( const string& ns , const string& path=dbpath ) const { return _ns == ns && _path == path; }
-
+
/**
* @return true iff the current Context is using db/path
*/
bool inDB( const string& db , const string& path=dbpath ) const;
- void clear(){ _ns = ""; _db = 0; }
+ void clear() { _ns = ""; _db = 0; }
/**
* call before unlocking, so clear any non-thread safe state
*/
- void unlocked(){ _db = 0; }
+ void unlocked() { _db = 0; }
/**
* call after going back into the lock, will re-establish non-thread safe stuff
*/
- void relocked(){ _finishInit(); }
+ void relocked() { _finishInit(); }
friend class CurOp;
@@ -187,12 +187,12 @@ namespace mongo {
* will also set _client->_context to this
*/
void _finishInit( bool doauth=true);
-
+
void _auth( int lockState = dbMutex.getState() );
Client * _client;
Context * _oldContext;
-
+
string _path;
mongolock * _lock;
bool _justCreated;
@@ -201,26 +201,26 @@ namespace mongo {
Database * _db;
}; // class Client::Context
-
+
};
-
+
/** get the Client object for this thread. */
- inline Client& cc() {
+ inline Client& cc() {
Client * c = currentClient.get();
assert( c );
return *c;
}
- inline Client::GodScope::GodScope(){
+ inline Client::GodScope::GodScope() {
_prev = cc()._god;
cc()._god = true;
}
inline Client::GodScope::~GodScope() { cc()._god = _prev; }
- /* this unlocks, does NOT upgrade. that works for our current usage */
- inline void mongolock::releaseAndWriteLock() {
+ /* this unlocks, does NOT upgrade. that works for our current usage */
+ inline void mongolock::releaseAndWriteLock() {
if( !_writelock ) {
#if BOOST_VERSION >= 103500
@@ -241,6 +241,6 @@ namespace mongo {
}
string sayClientState();
-
+
inline bool haveClient() { return currentClient.get() > 0; }
};
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index f3919988ba2..42275af212a 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -38,9 +38,9 @@ namespace mongo {
void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ); // from s/d_logic.h
- /*static*/ void ClientCursor::assertNoCursors() {
+ /*static*/ void ClientCursor::assertNoCursors() {
recursive_scoped_lock lock(ccmutex);
- if( clientCursorsById.size() ) {
+ if( clientCursorsById.size() ) {
log() << "ERROR clientcursors exist but should not at this point" << endl;
ClientCursor *cc = clientCursorsById.begin()->second;
log() << "first one: " << cc->_cursorid << ' ' << cc->_ns << endl;
@@ -75,8 +75,8 @@ namespace mongo {
/* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
works fine as the prefix will end with '.'. however, when used with drop and
- dropIndexes, this could take out cursors that belong to something else -- if you
- drop "foo", currently, this will kill cursors for "foobar".
+ dropIndexes, this could take out cursors that belong to something else -- if you
+ drop "foo", currently, this will kill cursors for "foobar".
*/
void ClientCursor::invalidate(const char *nsPrefix) {
vector<ClientCursor*> toDelete;
@@ -94,7 +94,7 @@ namespace mongo {
for( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
ClientCursor *cc = i->second;
- if( cc->_db != db )
+ if( cc->_db != db )
continue;
if ( strncmp(nsPrefix, cc->_ns.c_str(), len) == 0 ) {
toDelete.push_back(i->second);
@@ -103,9 +103,9 @@ namespace mongo {
/*
note : we can't iterate byloc because clientcursors may exist with a loc of null in which case
- they are not in the map. perhaps they should not exist though in the future? something to
+ they are not in the map. perhaps they should not exist though in the future? something to
change???
-
+
CCByLoc& bl = db->ccByLoc;
for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
ClientCursor *cc = i->second;
@@ -119,14 +119,14 @@ namespace mongo {
delete (*i);
/*cout << "TEMP after invalidate " << endl;
- for( auto i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
+ for( auto i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
cout << " " << i->second->ns << endl;
}
cout << "TEMP after invalidate done" << endl;*/
}
}
- bool ClientCursor::shouldTimeout( unsigned millis ){
+ bool ClientCursor::shouldTimeout( unsigned millis ) {
_idleAgeMillis += millis;
return _idleAgeMillis > 600000 && _pinValue == 0;
}
@@ -138,9 +138,9 @@ namespace mongo {
for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ) {
CCById::iterator j = i;
i++;
- if( j->second->shouldTimeout( millis ) ){
+ if( j->second->shouldTimeout( millis ) ) {
numberTimedOut++;
- log(1) << "killing old cursor " << j->second->_cursorid << ' ' << j->second->_ns
+ log(1) << "killing old cursor " << j->second->_cursorid << ' ' << j->second->_ns
<< " idle:" << j->second->idleTime() << "ms\n";
delete j->second;
}
@@ -161,7 +161,7 @@ namespace mongo {
i->second->_c->aboutToDeleteBucket(b);
}
void aboutToDeleteBucket(const DiskLoc& b) {
- ClientCursor::informAboutToDeleteBucket(b);
+ ClientCursor::informAboutToDeleteBucket(b);
}
/* must call this on a delete so we clean up the cursors. */
@@ -189,39 +189,39 @@ namespace mongo {
break;
}
- if( toAdvance.size() >= 3000 ) {
- log() << "perf warning MPW101: " << toAdvance.size() << " cursors for one diskloc "
- << dl.toString()
- << ' ' << toAdvance[1000]->_ns
- << ' ' << toAdvance[2000]->_ns
- << ' ' << toAdvance[1000]->_pinValue
- << ' ' << toAdvance[2000]->_pinValue
- << ' ' << toAdvance[1000]->_pos
- << ' ' << toAdvance[2000]->_pos
- << ' ' << toAdvance[1000]->_idleAgeMillis
- << ' ' << toAdvance[2000]->_idleAgeMillis
- << ' ' << toAdvance[1000]->_doingDeletes
- << ' ' << toAdvance[2000]->_doingDeletes
- << endl;
+ if( toAdvance.size() >= 3000 ) {
+ log() << "perf warning MPW101: " << toAdvance.size() << " cursors for one diskloc "
+ << dl.toString()
+ << ' ' << toAdvance[1000]->_ns
+ << ' ' << toAdvance[2000]->_ns
+ << ' ' << toAdvance[1000]->_pinValue
+ << ' ' << toAdvance[2000]->_pinValue
+ << ' ' << toAdvance[1000]->_pos
+ << ' ' << toAdvance[2000]->_pos
+ << ' ' << toAdvance[1000]->_idleAgeMillis
+ << ' ' << toAdvance[2000]->_idleAgeMillis
+ << ' ' << toAdvance[1000]->_doingDeletes
+ << ' ' << toAdvance[2000]->_doingDeletes
+ << endl;
//wassert( toAdvance.size() < 5000 );
}
-
- for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ){
+
+ for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ) {
ClientCursor* cc = *i;
wassert(cc->_db == db);
-
+
if ( cc->_doingDeletes ) continue;
Cursor *c = cc->_c.get();
- if ( c->capped() ){
- /* note we cannot advance here. if this condition occurs, writes to the oplog
- have "caught" the reader. skipping ahead, the reader would miss postentially
+ if ( c->capped() ) {
+ /* note we cannot advance here. if this condition occurs, writes to the oplog
+ have "caught" the reader. skipping ahead, the reader would miss postentially
important data.
*/
delete cc;
continue;
}
-
+
c->checkLocation();
DiskLoc tmp1 = c->refLoc();
if ( tmp1 != dl ) {
@@ -245,11 +245,10 @@ namespace mongo {
ClientCursor::ClientCursor(int queryOptions, const shared_ptr<Cursor>& c, const string& ns, BSONObj query ) :
_ns(ns), _db( cc().database() ),
- _c(c), _pos(0),
- _query(query), _queryOptions(queryOptions),
- _idleAgeMillis(0), _pinValue(0),
- _doingDeletes(false), _yieldSometimesTracker(128,10)
- {
+ _c(c), _pos(0),
+ _query(query), _queryOptions(queryOptions),
+ _idleAgeMillis(0), _pinValue(0),
+ _doingDeletes(false), _yieldSometimesTracker(128,10) {
assert( _db );
assert( str::startsWith(_ns, _db->name) );
if( queryOptions & QueryOption_NoCursorTimeout )
@@ -257,16 +256,16 @@ namespace mongo {
recursive_scoped_lock lock(ccmutex);
_cursorid = allocCursorId_inlock();
clientCursorsById.insert( make_pair(_cursorid, this) );
-
- if ( ! _c->modifiedKeys() ) {
- // store index information so we can decide if we can
+
+ if ( ! _c->modifiedKeys() ) {
+ // store index information so we can decide if we can
// get something out of the index key rather than full object
-
+
int x = 0;
BSONObjIterator i( _c->indexKeyPattern() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( e.isNumber() ){
+ if ( e.isNumber() ) {
// only want basic index fields, not "2d" etc
_indexedFields[e.fieldName()] = x;
}
@@ -275,7 +274,7 @@ namespace mongo {
}
}
-
+
ClientCursor::~ClientCursor() {
assert( _pos != -2 );
@@ -292,15 +291,15 @@ namespace mongo {
}
bool ClientCursor::getFieldsDotted( const string& name, BSONElementSet &ret ) {
-
+
map<string,int>::const_iterator i = _indexedFields.find( name );
- if ( i == _indexedFields.end() ){
+ if ( i == _indexedFields.end() ) {
current().getFieldsDotted( name , ret );
return false;
}
int x = i->second;
-
+
BSONObjIterator it( currKey() );
while ( x && it.more() )
it.next();
@@ -319,29 +318,30 @@ namespace mongo {
DiskLoc cl = _c->refLoc();
if ( lastLoc() == cl ) {
//log() << "info: lastloc==curloc " << ns << '\n';
- } else {
+ }
+ else {
recursive_scoped_lock lock(ccmutex);
setLastLoc_inlock(cl);
}
// may be necessary for MultiCursor even when cl hasn't changed
_c->noteLocation();
}
-
+
int ClientCursor::yieldSuggest() {
int writers = 0;
int readers = 0;
-
+
int micros = Client::recommendedYieldMicros( &writers , &readers );
-
- if ( micros > 0 && writers == 0 && dbMutex.getState() <= 0 ){
+
+ if ( micros > 0 && writers == 0 && dbMutex.getState() <= 0 ) {
// we have a read lock, and only reads are coming on, so why bother unlocking
micros = 0;
}
-
+
return micros;
}
-
- bool ClientCursor::yieldSometimes(){
+
+ bool ClientCursor::yieldSometimes() {
if ( ! _yieldSometimesTracker.ping() )
return true;
@@ -353,71 +353,71 @@ namespace mongo {
killCurrentOp.checkForInterrupt( false );
{
dbtempreleasecond unlock;
- if ( unlock.unlocked() ){
+ if ( unlock.unlocked() ) {
if ( micros == -1 )
micros = Client::recommendedYieldMicros();
if ( micros > 0 )
- sleepmicros( micros );
+ sleepmicros( micros );
}
else {
warning() << "ClientCursor::yield can't unlock b/c of recursive lock ns: " << ns << endl;
}
- }
+ }
}
-
+
bool ClientCursor::prepareToYield( YieldData &data ) {
if ( ! _c->supportYields() )
return false;
// need to store in case 'this' gets deleted
data._id = _cursorid;
-
+
data._doingDeletes = _doingDeletes;
_doingDeletes = false;
-
+
updateLocation();
-
+
{
- /* a quick test that our temprelease is safe.
- todo: make a YieldingCursor class
+ /* a quick test that our temprelease is safe.
+ todo: make a YieldingCursor class
and then make the following code part of a unit test.
*/
const int test = 0;
static bool inEmpty = false;
- if( test && !inEmpty ) {
+ if( test && !inEmpty ) {
inEmpty = true;
log() << "TEST: manipulate collection during cc:yield" << endl;
- if( test == 1 )
+ if( test == 1 )
Helpers::emptyCollection(_ns.c_str());
else if( test == 2 ) {
BSONObjBuilder b; string m;
dropCollection(_ns.c_str(), m, b);
}
- else {
+ else {
dropDatabase(_ns.c_str());
}
}
- }
+ }
return true;
}
-
+
bool ClientCursor::recoverFromYield( const YieldData &data ) {
ClientCursor *cc = ClientCursor::find( data._id , false );
- if ( cc == 0 ){
+ if ( cc == 0 ) {
// id was deleted
return false;
}
-
+
cc->_doingDeletes = data._doingDeletes;
cc->_c->checkLocation();
- return true;
+ return true;
}
-
+
bool ClientCursor::yield( int micros ) {
if ( ! _c->supportYields() )
return true;
- YieldData data;
+ YieldData data;
prepareToYield( data );
-
+
staticYield( micros , _ns );
return ClientCursor::recoverFromYield( data );
@@ -425,7 +425,7 @@ namespace mongo {
int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
long long ClientCursor::allocCursorId_inlock() {
- if( 0 ) {
+ if( 0 ) {
static long long z;
++z;
cout << "TEMP alloccursorid " << z << endl;
@@ -445,32 +445,32 @@ namespace mongo {
return x;
}
- void ClientCursor::storeOpForSlave( DiskLoc last ){
+ void ClientCursor::storeOpForSlave( DiskLoc last ) {
if ( ! ( _queryOptions & QueryOption_OplogReplay ))
return;
if ( last.isNull() )
return;
-
+
BSONElement e = last.obj()["ts"];
if ( e.type() == Date || e.type() == Timestamp )
_slaveReadTill = e._opTime();
}
-
- void ClientCursor::updateSlaveLocation( CurOp& curop ){
+
+ void ClientCursor::updateSlaveLocation( CurOp& curop ) {
if ( _slaveReadTill.isNull() )
return;
mongo::updateSlaveLocation( curop , _ns.c_str() , _slaveReadTill );
}
- void ClientCursor::appendStats( BSONObjBuilder& result ){
+ void ClientCursor::appendStats( BSONObjBuilder& result ) {
recursive_scoped_lock lock(ccmutex);
result.appendNumber("totalOpen", clientCursorsById.size() );
result.appendNumber("clientCursors_size", (int) numCursors());
result.appendNumber("timedOut" , numberTimedOut);
}
-
+
// QUESTION: Restrict to the namespace from which this command was issued?
// Alternatively, make this command admin-only?
class CmdCursorInfo : public Command {
@@ -481,19 +481,19 @@ namespace mongo {
help << " example: { cursorInfo : 1 }";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
ClientCursor::appendStats( result );
return true;
}
} cmdCursorInfo;
-
- void ClientCursorMonitor::run(){
+
+ void ClientCursorMonitor::run() {
Client::initThread("clientcursormon");
Client& client = cc();
-
+
unsigned old = curTimeMillis();
- while ( ! inShutdown() ){
+ while ( ! inShutdown() ) {
unsigned now = curTimeMillis();
ClientCursor::idleTimeReport( now - old );
old = now;
@@ -503,10 +503,10 @@ namespace mongo {
client.shutdown();
}
- void ClientCursor::find( const string& ns , set<CursorId>& all ){
+ void ClientCursor::find( const string& ns , set<CursorId>& all ) {
recursive_scoped_lock lock(ccmutex);
-
- for ( CCById::iterator i=clientCursorsById.begin(); i!=clientCursorsById.end(); ++i ){
+
+ for ( CCById::iterator i=clientCursorsById.begin(); i!=clientCursorsById.end(); ++i ) {
if ( i->second->_ns == ns )
all.insert( i->first );
}
diff --git a/db/clientcursor.h b/db/clientcursor.h
index af0b3cd652d..ef50f500517 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -43,11 +43,11 @@ namespace mongo {
class ParsedQuery;
struct ByLocKey {
-
- ByLocKey( const DiskLoc & l , const CursorId& i ) : loc(l), id(i){}
-
- static ByLocKey min( const DiskLoc& l ){ return ByLocKey( l , numeric_limits<long long>::min() ); }
- static ByLocKey max( const DiskLoc& l ){ return ByLocKey( l , numeric_limits<long long>::max() ); }
+
+ ByLocKey( const DiskLoc & l , const CursorId& i ) : loc(l), id(i) {}
+
+ static ByLocKey min( const DiskLoc& l ) { return ByLocKey( l , numeric_limits<long long>::min() ); }
+ static ByLocKey max( const DiskLoc& l ) { return ByLocKey( l , numeric_limits<long long>::max() ); }
bool operator<( const ByLocKey &other ) const {
int x = loc.compare( other.loc );
@@ -80,7 +80,7 @@ namespace mongo {
at the same time - which might be bad. That should never happen, but if a client driver
had a bug, it could (or perhaps some sort of attack situation).
*/
- class Pointer : boost::noncopyable {
+ class Pointer : boost::noncopyable {
ClientCursor *_c;
public:
ClientCursor * c() { return _c; }
@@ -103,8 +103,8 @@ namespace mongo {
_c->_pinValue += 100;
}
}
- };
-
+ };
+
// This object assures safe and reliable cleanup of the ClientCursor.
// The implementation assumes that there will be no duplicate ids among cursors
// (which is assured if cursors must last longer than 1 second).
@@ -117,11 +117,12 @@ namespace mongo {
if ( _c ) {
// be careful in case cursor was deleted by someone else
ClientCursor::erase( _id );
- }
+ }
if ( c ) {
_c = c;
_id = c->_cursorid;
- } else {
+ }
+ else {
_c = 0;
_id = -1;
}
@@ -141,13 +142,13 @@ namespace mongo {
~ClientCursor();
// *************** basic accessors *******************
-
+
CursorId cursorid() const { return _cursorid; }
string ns() const { return _ns; }
Database * db() const { return _db; }
const BSONObj& query() const { return _query; }
int queryOptions() const { return _queryOptions; }
-
+
DiskLoc lastLoc() const { return _lastLoc; }
/* Get rid of cursors for namespaces that begin with nsprefix.
@@ -156,14 +157,14 @@ namespace mongo {
static void invalidate(const char *nsPrefix);
/**
- * @param microsToSleep -1 : ask client
+ * @param microsToSleep -1 : ask client
* >=0 : sleep for that amount
- * do a dbtemprelease
- * note: caller should check matcher.docMatcher().atomic() first and not yield if atomic -
+ * do a dbtemprelease
+ * note: caller should check matcher.docMatcher().atomic() first and not yield if atomic -
* we don't do herein as this->matcher (above) is only initialized for true queries/getmore.
* (ie not set for remote/update)
- * @return if the cursor is still valid.
- * if false is returned, then this ClientCursor should be considered deleted -
+ * @return if the cursor is still valid.
+ * if false is returned, then this ClientCursor should be considered deleted -
* in fact, the whole database could be gone.
*/
bool yield( int microsToSleep = -1 );
@@ -172,10 +173,10 @@ namespace mongo {
* @return same as yield()
*/
bool yieldSometimes();
-
+
static int yieldSuggest();
static void staticYield( int micros , const StringData& ns );
-
+
struct YieldData { CursorId _id; bool _doingDeletes; };
bool prepareToYield( YieldData &data );
static bool recoverFromYield( const YieldData &data );
@@ -183,50 +184,50 @@ namespace mongo {
struct YieldLock : boost::noncopyable {
explicit YieldLock( ptr<ClientCursor> cc )
: _canYield(cc->_c->supportYields()) {
- if ( _canYield ){
+ if ( _canYield ) {
cc->prepareToYield( _data );
_unlock.reset(new dbtempreleasecond());
}
}
- ~YieldLock(){
- if ( _unlock ){
+ ~YieldLock() {
+ if ( _unlock ) {
log( LL_WARNING ) << "ClientCursor::YieldLock not closed properly" << endl;
relock();
}
}
- bool stillOk(){
+ bool stillOk() {
if ( ! _canYield )
return true;
relock();
return ClientCursor::recoverFromYield( _data );
}
- void relock(){
+ void relock() {
_unlock.reset();
}
private:
const bool _canYield;
- YieldData _data;
+ YieldData _data;
scoped_ptr<dbtempreleasecond> _unlock;
};
// --- some pass through helpers for Cursor ---
-
+
Cursor* c() const { return _c.get(); }
int pos() const { return _pos; }
-
+
void incPos( int n ) { _pos += n; } // TODO: this is bad
void setPos( int n ) { _pos = n; } // TODO : this is bad too
-
+
BSONObj indexKeyPattern() { return _c->indexKeyPattern(); }
bool modifiedKeys() const { return _c->modifiedKeys(); }
bool isMultiKey() const { return _c->isMultiKey(); }
bool ok() { return _c->ok(); }
- bool advance(){ return _c->advance(); }
+ bool advance() { return _c->advance(); }
BSONObj current() { return _c->current(); }
DiskLoc currLoc() { return _c->currLoc(); }
BSONObj currKey() const { return _c->currKey(); }
-
+
/**
* same as BSONObj::getFieldsDotted
@@ -236,8 +237,8 @@ namespace mongo {
bool getFieldsDotted( const string& name, BSONElementSet &ret );
bool currentIsDup() { return _c->getsetdup( _c->currLoc() ); }
-
- bool currentMatches(){
+
+ bool currentMatches() {
if ( ! _c->matcher() )
return true;
return _c->matcher()->matchesCurrent( _c.get() );
@@ -256,12 +257,12 @@ namespace mongo {
return it->second;
}
public:
- static ClientCursor* find(CursorId id, bool warn = true) {
+ static ClientCursor* find(CursorId id, bool warn = true) {
recursive_scoped_lock lock(ccmutex);
ClientCursor *c = find_inlock(id, warn);
- // if this asserts, your code was not thread safe - you either need to set no timeout
- // for the cursor or keep a ClientCursor::Pointer in scope for it.
- massert( 12521, "internal error: use of an unlocked ClientCursor", c == 0 || c->_pinValue );
+ // if this asserts, your code was not thread safe - you either need to set no timeout
+ // for the cursor or keep a ClientCursor::Pointer in scope for it.
+ massert( 12521, "internal error: use of an unlocked ClientCursor", c == 0 || c->_pinValue );
return c;
}
@@ -302,17 +303,17 @@ namespace mongo {
void storeOpForSlave( DiskLoc last );
void updateSlaveLocation( CurOp& curop );
-
+
unsigned idleTime() const { return _idleAgeMillis; }
void setDoingDeletes( bool doingDeletes ) {_doingDeletes = doingDeletes; }
void slaveReadTill( const OpTime& t ) { _slaveReadTill = t; }
-
+
public: // static methods
static void idleTimeReport(unsigned millis);
-
+
static void appendStats( BSONObjBuilder& result );
static unsigned numCursors() { return clientCursorsById.size(); }
static void informAboutToDeleteBucket(const DiskLoc& b);
@@ -321,27 +322,27 @@ namespace mongo {
private: // methods
-
+
// cursors normally timeout after an inactivy period to prevent excess memory use
// setting this prevents timeout of the cursor in question.
void noTimeout() { _pinValue++; }
-
+
CCByLoc& byLoc() { return _db->ccByLoc; }
private:
CursorId _cursorid;
-
+
const string _ns;
Database * _db;
const shared_ptr<Cursor> _c;
map<string,int> _indexedFields; // map from indexed field to offset in key object
- int _pos; // # objects into the cursor so far
-
+ int _pos; // # objects into the cursor so far
+
const BSONObj _query; // used for logging diags only; optional in constructor
int _queryOptions; // see enum QueryOptions dbclient.h
-
+
OpTime _slaveReadTill;
DiskLoc _lastLoc; // use getter and setter not this (important)
@@ -367,8 +368,8 @@ namespace mongo {
static CCById clientCursorsById;
static long long numberTimedOut;
- static boost::recursive_mutex ccmutex; // must use this for all statics above!
- static CursorId allocCursorId_inlock();
+ static boost::recursive_mutex ccmutex; // must use this for all statics above!
+ static CursorId allocCursorId_inlock();
};
@@ -379,5 +380,5 @@ namespace mongo {
};
extern ClientCursorMonitor clientCursorMonitor;
-
+
} // namespace mongo
diff --git a/db/cloner.cpp b/db/cloner.cpp
index 2e4afab1200..ec02b7abfd8 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -40,7 +40,7 @@ namespace mongo {
struct Fun;
public:
Cloner() { }
-
+
/* slaveOk - if true it is ok if the source of the data is !ismaster.
useReplAuth - use the credentials we normally use as a replication slave for the cloning
snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
@@ -92,14 +92,14 @@ namespace mongo {
if ( context ) {
context->relocked();
}
-
+
while( i.moreInCurrentBatch() ) {
if ( n % 128 == 127 /*yield some*/ ) {
dbtemprelease t;
}
-
+
BSONObj tmp = i.nextSafe();
-
+
/* assure object is valid. note this will slow us down a little. */
if ( !tmp.valid() ) {
stringstream ss;
@@ -109,15 +109,15 @@ namespace mongo {
e.validate();
ss << " firstElement: " << e;
}
- catch( ... ){
+ catch( ... ) {
ss << " firstElement corrupt";
}
out() << ss.str() << endl;
continue;
}
-
+
++n;
-
+
BSONObj js = tmp;
if ( isindex ) {
assert( strstr(from_collection, "system.indexes") );
@@ -125,16 +125,16 @@ namespace mongo {
storedForLater->push_back( js.getOwned() );
continue;
}
-
- try {
+
+ try {
theDataFileMgr.insertWithObjMod(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
}
- catch( UserException& e ) {
+ catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
-
+
RARELY if ( time( 0 ) - saveLast > 60 ) {
log() << n << " objects cloned so far from collection " << from_collection << endl;
saveLast = time( 0 );
@@ -146,17 +146,17 @@ namespace mongo {
const char *from_collection;
const char *to_collection;
time_t saveLast;
- list<BSONObj> *storedForLater;
+ list<BSONObj> *storedForLater;
bool logForRepl;
Client::Context *context;
};
-
+
/* copy the specified collection
isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
*/
void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, Query query) {
list<BSONObj> storedForLater;
-
+
Fun f;
f.n = 0;
f.isindex = isindex;
@@ -165,7 +165,7 @@ namespace mongo {
f.saveLast = time( 0 );
f.storedForLater = &storedForLater;
f.logForRepl = logForRepl;
-
+
int options = QueryOption_NoCursorTimeout | ( slaveOk ? QueryOption_SlaveOk : 0 );
{
dbtemprelease r;
@@ -173,7 +173,8 @@ namespace mongo {
DBClientConnection *remote = dynamic_cast< DBClientConnection* >( conn.get() );
if ( remote ) {
remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
- } else {
+ }
+ else {
// there is no exhaust mode for direct client, so we have this hack
auto_ptr<DBClientCursor> c = conn->query( from_collection, query, 0, 0, 0, options );
assert( c.get() );
@@ -183,16 +184,16 @@ namespace mongo {
}
}
}
-
- if ( storedForLater.size() ){
- for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ){
+
+ if ( storedForLater.size() ) {
+ for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ) {
BSONObj js = *i;
- try {
+ try {
theDataFileMgr.insertWithObjMod(to_collection, js);
if ( logForRepl )
logOp("i", to_collection, js);
}
- catch( UserException& e ) {
+ catch( UserException& e ) {
log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
}
}
@@ -211,40 +212,43 @@ namespace mongo {
return false;
conn.reset( myconn.release() );
-
+
writelock lk(ns); // TODO: make this lower down
Client::Context ctx(ns);
- { // config
+ {
+ // config
string temp = ctx.db()->name + ".system.namespaces";
BSONObj config = conn->findOne( temp , BSON( "name" << ns ) );
if ( config["options"].isABSONObj() )
if ( ! userCreateNS( ns.c_str() , config["options"].Obj() , errmsg, true , 0 ) )
return false;
}
-
- { // main data
+
+ {
+ // main data
copy( ns.c_str() , ns.c_str() , /*isindex*/false , logForRepl , false , true , Query(query).snapshot() );
}
-
+
/* TODO : copyIndexes bool does not seem to be implemented! */
- if( !copyIndexes ) {
+ if( !copyIndexes ) {
log() << "ERROR copy collection copyIndexes not implemented? " << ns << endl;
}
- { // indexes
+ {
+ // indexes
string temp = ctx.db()->name + ".system.indexes";
copy( temp.c_str() , temp.c_str() , /*isindex*/true , logForRepl , false , true , BSON( "ns" << ns ) );
}
return true;
}
-
+
extern bool inDBRepair;
void ensureIdIndexForNewNs(const char *ns);
bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot) {
- massert( 10289 , "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
+ massert( 10289 , "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
string todb = cc().database()->name;
stringstream a,b;
@@ -264,24 +268,26 @@ namespace mongo {
*/
string ns = fromdb + ".system.namespaces";
list<BSONObj> toClone;
- {
+ {
dbtemprelease r;
-
+
// just using exhaust for collection copying right now
auto_ptr<DBClientCursor> c;
{
if ( conn.get() ) {
// nothing to do
- } else if ( !masterSameProcess ) {
+ }
+ else if ( !masterSameProcess ) {
ConnectionString cs = ConnectionString::parse( masterHost, errmsg );
auto_ptr<DBClientBase> con( cs.connect( errmsg ));
if ( !con.get() )
return false;
if( !replAuthenticate(con.get()) )
return false;
-
+
conn = con;
- } else {
+ }
+ else {
conn.reset( new DBDirectClient() );
}
c = conn->query( ns.c_str(), BSONObj(), 0, 0, 0, slaveOk ? QueryOption_SlaveOk : 0 );
@@ -291,8 +297,8 @@ namespace mongo {
errmsg = "query failed " + ns;
return false;
}
-
- while ( c->more() ){
+
+ while ( c->more() ) {
BSONObj collection = c->next();
log(2) << "\t cloner got " << collection << endl;
@@ -306,23 +312,23 @@ namespace mongo {
assert( e.type() == String );
const char *from_name = e.valuestr();
- if( strstr(from_name, ".system.") ) {
+ if( strstr(from_name, ".system.") ) {
/* system.users and s.js is cloned -- but nothing else from system.
* system.indexes is handled specially at the end*/
- if( legalClientSystemNS( from_name , true ) == 0 ){
+ if( legalClientSystemNS( from_name , true ) == 0 ) {
log(2) << "\t\t not cloning because system collection" << endl;
continue;
}
}
- if( ! isANormalNSName( from_name ) ){
+ if( ! isANormalNSName( from_name ) ) {
log(2) << "\t\t not cloning because has $ " << endl;
continue;
- }
+ }
toClone.push_back( collection.getOwned() );
}
}
- for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ){
+ for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ) {
{
dbtemprelease r;
}
@@ -330,7 +336,7 @@ namespace mongo {
log(2) << " really will clone: " << collection << endl;
const char * from_name = collection["name"].valuestr();
BSONObj options = collection.getObjectField("options");
-
+
/* change name "<fromdb>.collection" -> <todb>.collection */
const char *p = strchr(from_name, '.');
assert(p);
@@ -340,17 +346,17 @@ namespace mongo {
{
string err;
const char *toname = to_name.c_str();
- /* we defer building id index for performance - building it in batch is much faster */
+ /* we defer building id index for performance - building it in batch is much faster */
userCreateNS(toname, options, err, logForRepl, &wantIdIndex);
}
log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
Query q;
- if( snapshot )
+ if( snapshot )
q.snapshot();
copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, q);
if( wantIdIndex ) {
- /* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations
+ /* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations
that occur during the initial sync. inDBRepair makes dropDups be true.
*/
bool old = inDBRepair;
@@ -359,7 +365,7 @@ namespace mongo {
ensureIdIndexForNewNs(to_name.c_str());
inDBRepair = old;
}
- catch(...) {
+ catch(...) {
inDBRepair = old;
throw;
}
@@ -370,27 +376,26 @@ namespace mongo {
string system_indexes_from = fromdb + ".system.indexes";
string system_indexes_to = todb + ".system.indexes";
- /* [dm]: is the ID index sometimes not called "_id_"? There is other code in the system that looks for a "_id" prefix
- rather than this exact value. we should standardize. OR, remove names - which is in the bugdb. Anyway, this
+ /* [dm]: is the ID index sometimes not called "_id_"? There is other code in the system that looks for a "_id" prefix
+ rather than this exact value. we should standardize. OR, remove names - which is in the bugdb. Anyway, this
is dubious here at the moment.
*/
copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, BSON( "name" << NE << "_id_" ) );
return true;
}
-
+
/* slaveOk - if true it is ok if the source of the data is !ismaster.
useReplAuth - use the credentials we normally use as a replication slave for the cloning
snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
for example repairDatabase need not use it.
*/
- bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot)
- {
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot) {
Cloner c;
return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk, useReplAuth, snapshot);
}
-
+
/* Usage:
mydb.$cmd.findOne( { clone: "fromhost" } );
*/
@@ -412,11 +417,11 @@ namespace mongo {
/* replication note: we must logOp() not the command, but the cloned data -- if the slave
were to clone it would get a different point-in-time and not match.
*/
- return cloneFrom(from.c_str(), errmsg, dbname,
+ return cloneFrom(from.c_str(), errmsg, dbname,
/*logForReplication=*/!fromRepl, /*slaveok*/false, /*usereplauth*/false, /*snapshot*/true);
}
} cmdclone;
-
+
class CmdCloneCollection : public Command {
public:
virtual bool slaveOk() const {
@@ -426,10 +431,10 @@ namespace mongo {
CmdCloneCollection() : Command("cloneCollection") { }
virtual void help( stringstream &help ) const {
help << "{ cloneCollection: <namespace>, from: <host> [,query: <query_filter>] [,copyIndexes:<bool>] }"
- "\nCopies a collection from one server to another. Do not use on a single server as the destination "
- "is placed at the same db.collection (namespace) as the source.\n"
- "Warning: the local copy of 'ns' is emptied before the copying begins. Any existing data will be lost there."
- ;
+ "\nCopies a collection from one server to another. Do not use on a single server as the destination "
+ "is placed at the same db.collection (namespace) as the source.\n"
+ "Warning: the local copy of 'ns' is emptied before the copying begins. Any existing data will be lost there."
+ ;
}
virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string fromhost = cmdObj.getStringField("from");
@@ -439,7 +444,7 @@ namespace mongo {
}
{
HostAndPort h(fromhost);
- if( h.isSelf() ) {
+ if( h.isSelf() ) {
errmsg = "can't cloneCollection from self";
return false;
}
@@ -452,13 +457,13 @@ namespace mongo {
BSONObj query = cmdObj.getObjectField("query");
if ( query.isEmpty() )
query = BSONObj();
-
+
BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
-
- log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
+
+ log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
<< " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
-
+
Cloner c;
return c.copyCollection( fromhost , collection , query, errmsg , copyIndexes );
}
@@ -559,7 +564,7 @@ namespace mongo {
return res;
}
} cmdcopydb;
-
+
class CmdRenameCollection : public Command {
public:
CmdRenameCollection() : Command( "renameCollection" ) {}
@@ -583,7 +588,7 @@ namespace mongo {
errmsg = "invalid command syntax";
return false;
}
-
+
bool capped = false;
long long size = 0;
{
@@ -595,10 +600,10 @@ namespace mongo {
for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
size += i.ext()->length;
}
-
+
Client::Context ctx( target );
-
- if ( nsdetails( target.c_str() ) ){
+
+ if ( nsdetails( target.c_str() ) ) {
uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() );
BSONObjBuilder bb( result.subobjStart( "dropTarget" ) );
dropCollection( target , errmsg , bb );
@@ -625,7 +630,7 @@ namespace mongo {
}
if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) )
return false;
-
+
auto_ptr< DBClientCursor > c;
DBDirectClient bridge;
@@ -640,7 +645,7 @@ namespace mongo {
BSONObj o = c->next();
theDataFileMgr.insertWithObjMod( target.c_str(), o );
}
-
+
char cl[256];
nsToDatabase( source.c_str(), cl );
string sourceIndexes = string( cl ) + ".system.indexes";
@@ -663,7 +668,8 @@ namespace mongo {
break;
if ( strcmp( e.fieldName(), "ns" ) == 0 ) {
b.append( "ns", target );
- } else {
+ }
+ else {
b.append( e );
}
}
diff --git a/db/cmdline.cpp b/db/cmdline.cpp
index d65a54fcf0c..c508da3fe7b 100644
--- a/db/cmdline.cpp
+++ b/db/cmdline.cpp
@@ -30,65 +30,66 @@ namespace mongo {
string getHostNameCached();
BSONArray argvArray;
- void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
- boost::program_options::options_description& hidden ){
+ void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
+ boost::program_options::options_description& hidden ) {
/* support for -vv -vvvv etc. */
for (string s = "vv"; s.length() <= 12; s.append("v")) {
hidden.add_options()(s.c_str(), "verbose");
}
-
+
general.add_options()
- ("help,h", "show this usage information")
- ("version", "show version information")
- ("config,f", po::value<string>(), "configuration file specifying additional options")
- ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
- ("quiet", "quieter output")
- ("port", po::value<int>(&cmdLine.port), "specify port number")
- ("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
- ("logpath", po::value<string>() , "log file to send write to instead of stdout - has to be a file, not directory" )
- ("logappend" , "append to logpath instead of over-writing" )
- ("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
- ("keyFile", po::value<string>(), "private key for cluster authentication (only for replica sets)")
+ ("help,h", "show this usage information")
+ ("version", "show version information")
+ ("config,f", po::value<string>(), "configuration file specifying additional options")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("quiet", "quieter output")
+ ("port", po::value<int>(&cmdLine.port), "specify port number")
+ ("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
+ ("logpath", po::value<string>() , "log file to send write to instead of stdout - has to be a file, not directory" )
+ ("logappend" , "append to logpath instead of over-writing" )
+ ("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
+ ("keyFile", po::value<string>(), "private key for cluster authentication (only for replica sets)")
#ifndef _WIN32
- ("fork" , "fork server process" )
+ ("fork" , "fork server process" )
#endif
- ;
-
+ ;
+
}
#if defined(_WIN32)
- void CmdLine::addWindowsOptions( boost::program_options::options_description& windows ,
- boost::program_options::options_description& hidden ){
+ void CmdLine::addWindowsOptions( boost::program_options::options_description& windows ,
+ boost::program_options::options_description& hidden ) {
windows.add_options()
- ("install", "install mongodb service")
- ("remove", "remove mongodb service")
- ("reinstall", "reinstall mongodb service (equivilant of mongod --remove followed by mongod --install)")
- ("serviceName", po::value<string>(), "windows service name")
- ("serviceDisplayName", po::value<string>(), "windows service display name")
- ("serviceDescription", po::value<string>(), "windows service description")
- ("serviceUser", po::value<string>(), "user name service executes as")
- ("servicePassword", po::value<string>(), "password used to authenticate serviceUser")
- ;
+ ("install", "install mongodb service")
+ ("remove", "remove mongodb service")
+ ("reinstall", "reinstall mongodb service (equivilant of mongod --remove followed by mongod --install)")
+ ("serviceName", po::value<string>(), "windows service name")
+ ("serviceDisplayName", po::value<string>(), "windows service display name")
+ ("serviceDescription", po::value<string>(), "windows service description")
+ ("serviceUser", po::value<string>(), "user name service executes as")
+ ("servicePassword", po::value<string>(), "password used to authenticate serviceUser")
+ ;
hidden.add_options()("service", "start mongodb service");
}
#endif
- bool CmdLine::store( int argc , char ** argv ,
+ bool CmdLine::store( int argc , char ** argv ,
boost::program_options::options_description& visible,
boost::program_options::options_description& hidden,
boost::program_options::positional_options_description& positional,
- boost::program_options::variables_map &params ){
-
-
- { // setup binary name
+ boost::program_options::variables_map &params ) {
+
+
+ {
+ // setup binary name
cmdLine.binaryName = argv[0];
size_t i = cmdLine.binaryName.rfind( '/' );
if ( i != string::npos )
cmdLine.binaryName = cmdLine.binaryName.substr( i + 1 );
}
-
+
/* don't allow guessing - creates ambiguities when some options are
* prefixes of others. allow long disguises and don't allow guessing
* to get away with our vvvvvvv trick. */
@@ -97,7 +98,7 @@ namespace mongo {
po::command_line_style::allow_long_disguise) ^
po::command_line_style::allow_sticky);
-
+
try {
po::options_description all;
@@ -108,23 +109,23 @@ namespace mongo {
.options( all )
.positional( positional )
.style( style )
- .run(),
+ .run(),
params );
- if ( params.count("config") ){
+ if ( params.count("config") ) {
ifstream f( params["config"].as<string>().c_str() );
- if ( ! f.is_open() ){
+ if ( ! f.is_open() ) {
cout << "ERROR: could not read from config file" << endl << endl;
cout << visible << endl;
return false;
}
-
+
po::store( po::parse_config_file( f , all ) , params );
f.close();
}
-
+
po::notify(params);
- }
+ }
catch (po::error &e) {
cout << "error command line: " << e.what() << endl;
cout << "use --help for help" << endl;
@@ -150,43 +151,44 @@ namespace mongo {
#ifndef _WIN32
if (params.count("fork")) {
- if ( ! params.count( "logpath" ) ){
+ if ( ! params.count( "logpath" ) ) {
cout << "--fork has to be used with --logpath" << endl;
::exit(-1);
}
-
- { // test logpath
+
+ {
+ // test logpath
logpath = params["logpath"].as<string>();
assert( logpath.size() );
- if ( logpath[0] != '/' ){
+ if ( logpath[0] != '/' ) {
char temp[256];
assert( getcwd( temp , 256 ) );
logpath = (string)temp + "/" + logpath;
}
FILE * test = fopen( logpath.c_str() , "a" );
- if ( ! test ){
+ if ( ! test ) {
cout << "can't open [" << logpath << "] for log file: " << errnoWithDescription() << endl;
::exit(-1);
}
fclose( test );
}
-
+
cout.flush();
cerr.flush();
pid_t c = fork();
- if ( c ){
+ if ( c ) {
_exit(0);
}
- if ( chdir("/") < 0 ){
+ if ( chdir("/") < 0 ) {
cout << "Cant chdir() while forking server process: " << strerror(errno) << endl;
::exit(-1);
}
setsid();
-
+
pid_t c2 = fork();
- if ( c2 ){
+ if ( c2 ) {
cout << "forked process: " << c2 << endl;
_exit(0);
}
@@ -199,13 +201,13 @@ namespace mongo {
fclose(stdin);
FILE* f = freopen("/dev/null", "w", stderr);
- if ( f == NULL ){
+ if ( f == NULL ) {
cout << "Cant reassign stderr while forking server process: " << strerror(errno) << endl;
::exit(-1);
}
f = freopen("/dev/null", "r", stdin);
- if ( f == NULL ){
+ if ( f == NULL ) {
cout << "Cant reassign stdin while forking server process: " << strerror(errno) << endl;
::exit(-1);
}
@@ -225,14 +227,14 @@ namespace mongo {
writePidFile( params["pidfilepath"].as<string>() );
}
- if (params.count("keyFile")){
+ if (params.count("keyFile")) {
const string f = params["keyFile"].as<string>();
if (!setUpSecurityKey(f)) {
// error message printed in setUpPrivateKey
dbexit(EXIT_BADOPTIONS);
}
-
+
noauth = false;
}
@@ -247,45 +249,45 @@ namespace mongo {
return true;
}
- void ignoreSignal( int sig ){}
-
- void setupCoreSignals(){
+ void ignoreSignal( int sig ) {}
+
+ void setupCoreSignals() {
#if !defined(_WIN32)
assert( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
assert( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
#endif
}
- class CmdGetCmdLineOpts : Command{
- public:
+ class CmdGetCmdLineOpts : Command {
+ public:
CmdGetCmdLineOpts(): Command("getCmdLineOpts") {}
void help(stringstream& h) const { h << "get argv"; }
virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return true; }
virtual bool slaveOk() const { return true; }
- virtual bool run(const string&, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ virtual bool run(const string&, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.append("argv", argvArray);
return true;
}
} cmdGetCmdLineOpts;
- string prettyHostName() {
+ string prettyHostName() {
StringBuilder s(128);
s << getHostNameCached();
- if( cmdLine.port != CmdLine::DefaultDBPort )
+ if( cmdLine.port != CmdLine::DefaultDBPort )
s << ':' << mongo::cmdLine.port;
return s.str();
}
- ParameterValidator::ParameterValidator( const string& name ) : _name( name ){
+ ParameterValidator::ParameterValidator( const string& name ) : _name( name ) {
if ( ! _all )
_all = new map<string,ParameterValidator*>();
(*_all)[_name] = this;
}
- ParameterValidator * ParameterValidator::get( const string& name ){
+ ParameterValidator * ParameterValidator::get( const string& name ) {
map<string,ParameterValidator*>::iterator i = _all->find( name );
if ( i == _all->end() )
return NULL;
diff --git a/db/cmdline.h b/db/cmdline.h
index cda39e3f9ea..fbb6bde00bb 100644
--- a/db/cmdline.h
+++ b/db/cmdline.h
@@ -20,29 +20,28 @@
#include "jsobj.h"
namespace mongo {
-
- /* command line options
+
+ /* command line options
*/
/* concurrency: OK/READ */
- struct CmdLine {
+ struct CmdLine {
- CmdLine() :
+ CmdLine() :
port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), smallfiles(false),
- quota(false), quotaFiles(8), cpu(false), durOptions(0), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
- syncdelay(60)
- {
+ quota(false), quotaFiles(8), cpu(false), durOptions(0), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
+ syncdelay(60) {
// default may change for this later.
#if defined(_DURABLEDEFAULTON)
dur = true;
#else
dur = false;
#endif
- }
-
+ }
+
string binaryName; // mongod or mongos
int port; // --port
- enum {
+ enum {
DefaultDBPort = 27017,
ConfigServerPort = 27019,
ShardServerPort = 27018
@@ -54,7 +53,7 @@ namespace mongo {
bool jsonp; // --jsonp
string _replSet; // --replSet[/<seedlist>]
- string ourSetName() const {
+ string ourSetName() const {
string setname;
size_t sl = _replSet.find('/');
if( sl == string::npos )
@@ -66,19 +65,19 @@ namespace mongo {
// for master/slave replication
string source; // --source
string only; // --only
-
+
bool quiet; // --quiet
bool noTableScan; // --notablescan no table scans allowed
bool prealloc; // --noprealloc no preallocation of data files
bool smallfiles; // --smallfiles allocate smaller data files
-
+
bool quota; // --quota
int quotaFiles; // --quotaFiles
bool cpu; // --cpu show cpu time periodically
bool dur; // --dur durability
- /** --durOptions 7 dump journal and terminate without doing anything further
+ /** --durOptions 7 dump journal and terminate without doing anything further
--durOptions 4 recover and terminate without listening
*/
enum { // bits to be ORed
@@ -95,26 +94,26 @@ namespace mongo {
int slowMS; // --time in ms that is "slow"
int pretouch; // --pretouch for replication application (experimental)
- bool moveParanoia; // for move chunk paranoia
+ bool moveParanoia; // for move chunk paranoia
double syncdelay; // seconds between fsyncs
- static void addGlobalOptions( boost::program_options::options_description& general ,
+ static void addGlobalOptions( boost::program_options::options_description& general ,
boost::program_options::options_description& hidden );
- static void addWindowsOptions( boost::program_options::options_description& windows ,
- boost::program_options::options_description& hidden );
+ static void addWindowsOptions( boost::program_options::options_description& windows ,
+ boost::program_options::options_description& hidden );
+
-
/**
* @return true if should run program, false if should exit
*/
- static bool store( int argc , char ** argv ,
+ static bool store( int argc , char ** argv ,
boost::program_options::options_description& visible,
boost::program_options::options_description& hidden,
boost::program_options::positional_options_description& positional,
boost::program_options::variables_map &output );
};
-
+
extern CmdLine cmdLine;
void setupCoreSignals();
@@ -132,7 +131,7 @@ namespace mongo {
class ParameterValidator {
public:
ParameterValidator( const string& name );
- virtual ~ParameterValidator(){}
+ virtual ~ParameterValidator() {}
virtual bool isValid( BSONElement e , string& errmsg ) = 0;
@@ -140,9 +139,9 @@ namespace mongo {
private:
string _name;
-
+
// don't need to lock since this is all done in static init
- static map<string,ParameterValidator*> * _all;
+ static map<string,ParameterValidator*> * _all;
};
-
+
}
diff --git a/db/commands.cpp b/db/commands.cpp
index de5738d9884..770d035326b 100644
--- a/db/commands.cpp
+++ b/db/commands.cpp
@@ -55,7 +55,7 @@ namespace mongo {
ss << "<td>";
if( helpStr != "no help defined" ) {
const char *p = helpStr.c_str();
- while( *p ) {
+ while( *p ) {
if( *p == '<' ) {
ss << "&lt;";
p++; continue;
@@ -67,7 +67,7 @@ namespace mongo {
p++;
continue;
}
- if( strncmp(p, "http:", 5) == 0 ) {
+ if( strncmp(p, "http:", 5) == 0 ) {
ss << "<a href=\"";
const char *q = p;
while( *q && *q != ' ' && *q != '\n' )
@@ -79,7 +79,7 @@ namespace mongo {
while( *q && *q != ' ' && *q != '\n' ) {
ss << (*q == '+' ? ' ' : *q);
q++;
- if( *q == '#' )
+ if( *q == '#' )
while( *q && *q != ' ' && *q != '\n' ) q++;
}
ss << "</a>";
@@ -120,7 +120,7 @@ namespace mongo {
void Command::help( stringstream& help ) const {
help << "no help defined";
}
-
+
bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
const char *p = strchr(ns, '.');
if ( !p ) return false;
@@ -145,7 +145,7 @@ namespace mongo {
ok = false;
errmsg = "access denied - use admin db";
}
- else if ( jsobj.getBoolField( "help" ) ){
+ else if ( jsobj.getBoolField( "help" ) ) {
stringstream help;
help << "help for: " << e.fieldName() << " ";
c->help( help );
@@ -161,18 +161,18 @@ namespace mongo {
if (!have_ok)
anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
-
+
if ( !ok && !have_errmsg) {
anObjBuilder.append("errmsg", errmsg);
uassert_nothrow(errmsg.c_str());
}
return true;
}
-
+
return false;
}
- Command* Command::findCommand( const string& name ){
+ Command* Command::findCommand( const string& name ) {
map<string,Command*>::iterator i = _commands->find( name );
if ( i == _commands->end() )
return 0;
@@ -180,7 +180,7 @@ namespace mongo {
}
- Command::LockType Command::locktype( const string& name ){
+ Command::LockType Command::locktype( const string& name ) {
Command * c = findCommand( name );
if ( ! c )
return WRITE;
@@ -189,10 +189,10 @@ namespace mongo {
void Command::logIfSlow( const Timer& timer, const string& msg ) {
int ms = timer.millis();
- if ( ms > cmdLine.slowMS ){
+ if ( ms > cmdLine.slowMS ) {
out() << msg << " took " << ms << " ms." << endl;
}
}
-
-
+
+
} // namespace mongo
diff --git a/db/commands.h b/db/commands.h
index 070c981789b..42e46a0125a 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -34,7 +34,7 @@ namespace mongo {
*/
class Command {
public:
-
+
enum LockType { READ = -1 , NONE = 0 , WRITE = 1 };
const string name;
@@ -49,11 +49,11 @@ namespace mongo {
*/
virtual bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
- /*
- note: logTheTop() MUST be false if READ
+ /*
+ note: logTheTop() MUST be false if READ
if NONE, can't use Client::Context setup
use with caution
- */
+ */
virtual LockType locktype() const = 0;
/* Return true if only the admin ns has privileges to run this command. */
@@ -63,7 +63,7 @@ namespace mongo {
void htmlHelp(stringstream&) const;
- /* Like adminOnly, but even stricter: we must either be authenticated for admin db,
+ /* Like adminOnly, but even stricter: we must either be authenticated for admin db,
or, if running without auth, on the local interface.
When localHostOnlyIfNoAuth() is true, adminOnly() must also be true.
@@ -74,7 +74,7 @@ namespace mongo {
(the command directly from a client -- if fromRepl, always allowed).
*/
virtual bool slaveOk() const = 0;
-
+
/* Return true if the client force a command to be run on a slave by
turning on the 'slaveok' option in the command query.
*/
@@ -91,12 +91,12 @@ namespace mongo {
virtual void help( stringstream& help ) const;
- /* Return true if authentication and security applies to the commands. Some commands
+ /* Return true if authentication and security applies to the commands. Some commands
(e.g., getnonce, authenticate) can be done by anyone even unauthorized.
*/
virtual bool requiresAuth() { return true; }
- /** @param webUI expose the command in the web ui as localhost:28017/<name>
+ /** @param webUI expose the command in the web ui as localhost:28017/<name>
@param oldName an optional old, deprecated name for the command
*/
Command(const char *_name, bool webUI = false, const char *oldName = 0);
@@ -104,7 +104,7 @@ namespace mongo {
virtual ~Command() {}
protected:
- BSONObj getQuery( const BSONObj& cmdObj ){
+ BSONObj getQuery( const BSONObj& cmdObj ) {
if ( cmdObj["query"].type() == Object )
return cmdObj["query"].embeddedObject();
if ( cmdObj["q"].type() == Object )
diff --git a/db/commands/distinct.cpp b/db/commands/distinct.cpp
index 162b4cbfa85..b1909687bb9 100644
--- a/db/commands/distinct.cpp
+++ b/db/commands/distinct.cpp
@@ -25,36 +25,36 @@ namespace mongo {
class DistinctCommand : public Command {
public:
- DistinctCommand() : Command("distinct"){}
+ DistinctCommand() : Command("distinct") {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + '.' + cmdObj.firstElement().valuestr();
string key = cmdObj["key"].valuestrsafe();
BSONObj keyPattern = BSON( key << 1 );
BSONObj query = getQuery( cmdObj );
-
+
int bufSize = BSONObjMaxUserSize - 4096;
BufBuilder bb( bufSize );
char * start = bb.buf();
-
+
BSONArrayBuilder arr( bb );
BSONElementSet values;
-
+
long long nscanned = 0; // locations looked at
long long nscannedObjects = 0; // full objects looked at
long long n = 0; // matches
MatchDetails md;
-
+
NamespaceDetails * d = nsdetails( ns.c_str() );
- if ( ! d ){
+ if ( ! d ) {
result.appendArray( "values" , BSONObj() );
result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) );
return true;
@@ -69,7 +69,7 @@ namespace mongo {
// query is empty, so lets see if we can find an index
// with the key so we don't have to hit the raw data
NamespaceDetails::IndexIterator ii = d->ii();
- while ( ii.more() ){
+ while ( ii.more() ) {
IndexDetails& idx = ii.next();
if ( d->isMultikey( ii.pos() - 1 ) )
@@ -79,33 +79,33 @@ namespace mongo {
cursor = bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() );
break;
}
-
+
}
-
+
if ( ! cursor.get() )
cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
-
+
}
-
+
scoped_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
-
- while ( cursor->ok() ){
+
+ while ( cursor->ok() ) {
nscanned++;
bool loadedObject = false;
-
- if ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() , &md ) ){
+
+ if ( !cursor->matcher() || cursor->matcher()->matchesCurrent( cursor.get() , &md ) ) {
n++;
BSONElementSet temp;
loadedObject = ! cc->getFieldsDotted( key , temp );
-
- for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ){
+
+ for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) {
BSONElement e = *i;
if ( values.count( e ) )
continue;
-
+
int now = bb.len();
uassert(10044, "distinct too big, 4mb cap", ( now + e.size() + 1024 ) < bufSize );
@@ -117,7 +117,7 @@ namespace mongo {
}
}
- if ( loadedObject || md.loadedObject )
+ if ( loadedObject || md.loadedObject )
nscannedObjects++;
cursor->advance();
@@ -129,9 +129,9 @@ namespace mongo {
}
assert( start == bb.buf() );
-
+
result.appendArray( "values" , arr.done() );
-
+
{
BSONObjBuilder b;
b.appendNumber( "n" , n );
@@ -139,7 +139,7 @@ namespace mongo {
b.appendNumber( "nscannedObjects" , nscannedObjects );
result.append( "stats" , b.obj() );
}
-
+
return true;
}
diff --git a/db/commands/group.cpp b/db/commands/group.cpp
index d4fa5c00364..5db472a5a5d 100644
--- a/db/commands/group.cpp
+++ b/db/commands/group.cpp
@@ -24,16 +24,16 @@ namespace mongo {
class GroupCommand : public Command {
public:
- GroupCommand() : Command("group"){}
- virtual LockType locktype() const { return READ; }
+ GroupCommand() : Command("group") {}
+ virtual LockType locktype() const { return READ; }
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() { return true; }
virtual void help( stringstream &help ) const {
help << "http://www.mongodb.org/display/DOCS/Aggregation";
}
- BSONObj getKey( const BSONObj& obj , const BSONObj& keyPattern , ScriptingFunction func , double avgSize , Scope * s ){
- if ( func ){
+ BSONObj getKey( const BSONObj& obj , const BSONObj& keyPattern , ScriptingFunction func , double avgSize , Scope * s ) {
+ if ( func ) {
BSONObjBuilder b( obj.objsize() + 32 );
b.append( "0" , obj );
int res = s->invoke( func , b.obj() );
@@ -45,10 +45,10 @@ namespace mongo {
return obj.extractFields( keyPattern , true );
}
- bool group( string realdbname , const string& ns , const BSONObj& query ,
+ bool group( string realdbname , const string& ns , const BSONObj& query ,
BSONObj keyPattern , string keyFunctionCode , string reduceCode , const char * reduceScope ,
BSONObj initial , string finalize ,
- string& errmsg , BSONObjBuilder& result ){
+ string& errmsg , BSONObjBuilder& result ) {
auto_ptr<Scope> s = globalScriptEngine->getPooledScope( realdbname );
@@ -62,19 +62,19 @@ namespace mongo {
s->exec( "$reduce = " + reduceCode , "reduce setup" , false , true , true , 100 );
s->exec( "$arr = [];" , "reduce setup 2" , false , true , true , 100 );
ScriptingFunction f = s->createFunction(
- "function(){ "
- " if ( $arr[n] == null ){ "
- " next = {}; "
- " Object.extend( next , $key ); "
- " Object.extend( next , $initial , true ); "
- " $arr[n] = next; "
- " next = null; "
- " } "
- " $reduce( obj , $arr[n] ); "
- "}" );
+ "function(){ "
+ " if ( $arr[n] == null ){ "
+ " next = {}; "
+ " Object.extend( next , $key ); "
+ " Object.extend( next , $initial , true ); "
+ " $arr[n] = next; "
+ " next = null; "
+ " } "
+ " $reduce( obj , $arr[n] ); "
+ "}" );
ScriptingFunction keyFunction = 0;
- if ( keyFunctionCode.size() ){
+ if ( keyFunctionCode.size() ) {
keyFunction = s->createFunction( keyFunctionCode.c_str() );
}
@@ -87,8 +87,8 @@ namespace mongo {
shared_ptr<Cursor> cursor = bestGuessCursor(ns.c_str() , query , BSONObj() );
- while ( cursor->ok() ){
- if ( cursor->matcher() && ! cursor->matcher()->matchesCurrent( cursor.get() ) ){
+ while ( cursor->ok() ) {
+ if ( cursor->matcher() && ! cursor->matcher()->matchesCurrent( cursor.get() ) ) {
cursor->advance();
continue;
}
@@ -101,7 +101,7 @@ namespace mongo {
keynum++;
int& n = map[key];
- if ( n == 0 ){
+ if ( n == 0 ) {
n = map.size();
s->setObject( "$key" , key , true );
@@ -110,24 +110,24 @@ namespace mongo {
s->setObject( "obj" , obj , true );
s->setNumber( "n" , n - 1 );
- if ( s->invoke( f , BSONObj() , 0 , true ) ){
+ if ( s->invoke( f , BSONObj() , 0 , true ) ) {
throw UserException( 9010 , (string)"reduce invoke failed: " + s->getError() );
}
}
- if (!finalize.empty()){
+ if (!finalize.empty()) {
s->exec( "$finalize = " + finalize , "finalize define" , false , true , true , 100 );
ScriptingFunction g = s->createFunction(
- "function(){ "
- " for(var i=0; i < $arr.length; i++){ "
- " var ret = $finalize($arr[i]); "
- " if (ret !== undefined) "
- " $arr[i] = ret; "
- " } "
- "}" );
+ "function(){ "
+ " for(var i=0; i < $arr.length; i++){ "
+ " var ret = $finalize($arr[i]); "
+ " if (ret !== undefined) "
+ " $arr[i] = ret; "
+ " } "
+ "}" );
s->invoke( g , BSONObj() , 0 , true );
}
-
+
result.appendArray( "retval" , s->getObject( "$arr" ) );
result.append( "count" , keynum - 1 );
result.append( "keys" , (int)(map.size()) );
@@ -137,7 +137,7 @@ namespace mongo {
return true;
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
/* db.$cmd.findOne( { group : <p> } ) */
const BSONObj& p = jsobj.firstElement().embeddedObjectUserCheck();
@@ -147,26 +147,26 @@ namespace mongo {
q = p["cond"].embeddedObject();
else if ( p["condition"].type() == Object )
q = p["condition"].embeddedObject();
- else
+ else
q = getQuery( p );
- if ( p["ns"].type() != String ){
+ if ( p["ns"].type() != String ) {
errmsg = "ns has to be set";
return false;
}
-
+
string ns = dbname + "." + p["ns"].String();
BSONObj key;
string keyf;
- if ( p["key"].type() == Object ){
+ if ( p["key"].type() == Object ) {
key = p["key"].embeddedObjectUserCheck();
- if ( ! p["$keyf"].eoo() ){
+ if ( ! p["$keyf"].eoo() ) {
errmsg = "can't have key and $keyf";
return false;
}
}
- else if ( p["$keyf"].type() ){
+ else if ( p["$keyf"].type() ) {
keyf = p["$keyf"]._asCode();
}
else {
@@ -174,13 +174,13 @@ namespace mongo {
}
BSONElement reduce = p["$reduce"];
- if ( reduce.eoo() ){
+ if ( reduce.eoo() ) {
errmsg = "$reduce has to be set";
return false;
}
BSONElement initial = p["initial"];
- if ( initial.type() != Object ){
+ if ( initial.type() != Object ) {
errmsg = "initial has to be an object";
return false;
}
@@ -198,5 +198,5 @@ namespace mongo {
} cmdGroup;
-
+
} // namespace mongo
diff --git a/db/commands/isself.cpp b/db/commands/isself.cpp
index 626728cc0a2..b97f51e2b6f 100644
--- a/db/commands/isself.cpp
+++ b/db/commands/isself.cpp
@@ -17,17 +17,17 @@
namespace mongo {
#if !defined(_WIN32) && !defined(__sunos__)
-
- vector<string> getMyAddrs(){
+
+ vector<string> getMyAddrs() {
ifaddrs * addrs;
-
+
int status = getifaddrs(&addrs);
massert(13469, "getifaddrs failure: " + errnoWithDescription(errno), status == 0);
vector<string> out;
// based on example code from linux getifaddrs manpage
- for (ifaddrs * addr = addrs; addr != NULL; addr = addr->ifa_next){
+ for (ifaddrs * addr = addrs; addr != NULL; addr = addr->ifa_next) {
if ( addr->ifa_addr == NULL ) continue;
int family = addr->ifa_addr->sa_family;
char host[NI_MAXHOST];
@@ -36,7 +36,7 @@ namespace mongo {
status = getnameinfo(addr->ifa_addr,
(family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)),
host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
- if ( status != 0 ){
+ if ( status != 0 ) {
freeifaddrs( addrs );
addrs = NULL;
msgasserted( 13470, string("getnameinfo() failed: ") + gai_strerror(status) );
@@ -50,9 +50,9 @@ namespace mongo {
freeifaddrs( addrs );
addrs = NULL;
- if (logLevel >= 1){
+ if (logLevel >= 1) {
log(1) << "getMyAddrs():";
- for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it){
+ for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
log(1) << " [" << *it << ']';
}
log(1) << endl;
@@ -61,24 +61,24 @@ namespace mongo {
return out;
}
- vector<string> getAllIPs(StringData iporhost){
+ vector<string> getAllIPs(StringData iporhost) {
addrinfo* addrs = NULL;
addrinfo hints;
memset(&hints, 0, sizeof(addrinfo));
hints.ai_socktype = SOCK_STREAM;
hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
-
+
static string portNum = BSONObjBuilder::numStr(cmdLine.port);
vector<string> out;
int ret = getaddrinfo(iporhost.data(), portNum.c_str(), &hints, &addrs);
- if ( ret ){
+ if ( ret ) {
warning() << "getaddrinfo(\"" << iporhost.data() << "\") failed: " << gai_strerror(ret) << endl;
return out;
}
- for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next){
+ for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) {
int family = addr->ai_family;
char host[NI_MAXHOST];
@@ -94,9 +94,9 @@ namespace mongo {
freeaddrinfo(addrs);
- if (logLevel >= 1){
+ if (logLevel >= 1) {
log(1) << "getallIPs(\"" << iporhost << "\"):";
- for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it){
+ for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
log(1) << " [" << *it << ']';
}
log(1) << endl;
@@ -106,46 +106,46 @@ namespace mongo {
}
#endif
-
+
class IsSelfCommand : public Command {
public:
IsSelfCommand() : Command("_isSelf") , _cacheLock( "IsSelfCommand::_cacheLock" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const {
help << "{ _isSelf : 1 } INTERNAL ONLY";
}
-
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
init();
result.append( "id" , _id );
return true;
}
- void init(){
+ void init() {
scoped_lock lk( _cacheLock );
if ( ! _id.isSet() )
_id.init();
}
OID _id;
-
+
mongo::mutex _cacheLock;
map<string,bool> _cache;
} isSelfCommand;
- bool HostAndPort::isSelf() const {
+ bool HostAndPort::isSelf() const {
int p = _port == -1 ? CmdLine::DefaultDBPort : _port;
- if( p != cmdLine.port ){
+ if( p != cmdLine.port ) {
// shortcut - ports have to match at the very least
return false;
}
-
+
string host = str::stream() << _host << ":" << p;
-
- {
+
+ {
// check cache for this host
// debatably something _could_ change, but I'm not sure right now (erh 10/14/2010)
scoped_lock lk( isSelfCommand._cacheLock );
@@ -153,22 +153,22 @@ namespace mongo {
if ( i != isSelfCommand._cache.end() )
return i->second;
}
-
+
#if !defined(_WIN32) && !defined(__sunos__)
// on linux and os x we can do a quick check for an ip match
const vector<string> myaddrs = getMyAddrs();
const vector<string> addrs = getAllIPs(_host);
-
- for (vector<string>::const_iterator i=myaddrs.begin(), iend=myaddrs.end(); i!=iend; ++i){
- for (vector<string>::const_iterator j=addrs.begin(), jend=addrs.end(); j!=jend; ++j){
+
+ for (vector<string>::const_iterator i=myaddrs.begin(), iend=myaddrs.end(); i!=iend; ++i) {
+ for (vector<string>::const_iterator j=addrs.begin(), jend=addrs.end(); j!=jend; ++j) {
string a = *i;
string b = *j;
-
+
if ( a == b ||
- ( str::startsWith( a , "127." ) && str::startsWith( b , "127." ) ) // 127. is all loopback
- ){
-
+ ( str::startsWith( a , "127." ) && str::startsWith( b , "127." ) ) // 127. is all loopback
+ ) {
+
// add to cache
scoped_lock lk( isSelfCommand._cacheLock );
isSelfCommand._cache[host] = true;
@@ -176,42 +176,42 @@ namespace mongo {
}
}
}
-
+
#endif
- if ( ! Listener::getTimeTracker() ){
+ if ( ! Listener::getTimeTracker() ) {
// this ensures we are actually running a server
// this may return true later, so may want to retry
return false;
}
-
+
try {
-
+
isSelfCommand.init();
-
+
DBClientConnection conn;
string errmsg;
- if ( ! conn.connect( host , errmsg ) ){
+ if ( ! conn.connect( host , errmsg ) ) {
// should this go in the cache?
return false;
}
-
+
BSONObj out;
bool ok = conn.simpleCommand( "admin" , &out , "_isSelf" );
-
+
bool me = ok && out["id"].type() == jstOID && isSelfCommand._id == out["id"].OID();
-
+
// add to cache
scoped_lock lk( isSelfCommand._cacheLock );
isSelfCommand._cache[host] = me;
return me;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
warning() << "could't check isSelf (" << host << ") " << e.what() << endl;
}
-
+
return false;
}
diff --git a/db/commands/mr.cpp b/db/commands/mr.cpp
index 7579413c35b..3df37b5a23d 100644
--- a/db/commands/mr.cpp
+++ b/db/commands/mr.cpp
@@ -35,29 +35,29 @@ namespace mongo {
AtomicUInt Config::JOB_NUMBER;
- JSFunction::JSFunction( string type , const BSONElement& e ){
+ JSFunction::JSFunction( string type , const BSONElement& e ) {
_type = type;
_code = e._asCode();
-
+
if ( e.type() == CodeWScope )
_wantedScope = e.codeWScopeObject();
}
- void JSFunction::init( State * state ){
+ void JSFunction::init( State * state ) {
_scope = state->scope();
assert( _scope );
_scope->init( &_wantedScope );
-
+
_func = _scope->createFunction( _code.c_str() );
uassert( 13598 , str::stream() << "couldn't compile code for: " << _type , _func );
}
- void JSMapper::init( State * state ){
- _func.init( state );
- _params = state->config().mapParams;
+ void JSMapper::init( State * state ) {
+ _func.init( state );
+ _params = state->config().mapParams;
}
- void JSMapper::map( const BSONObj& o ){
+ void JSMapper::map( const BSONObj& o ) {
Scope * s = _func.scope();
assert( s );
s->setThis( &o );
@@ -65,25 +65,25 @@ namespace mongo {
throw UserException( 9014, str::stream() << "map invoke failed: " + s->getError() );
}
- BSONObj JSFinalizer::finalize( const BSONObj& o ){
+ BSONObj JSFinalizer::finalize( const BSONObj& o ) {
Scope * s = _func.scope();
Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
s->invokeSafe( _func.func() , o );
-
- // don't want to use o.objsize() to size b
+
+ // don't want to use o.objsize() to size b
// since there are many cases where the point of finalize
// is converting many fields to 1
- BSONObjBuilder b;
+ BSONObjBuilder b;
b.append( o.firstElement() );
s->append( b , "value" , "return" );
return b.obj();
}
- BSONObj JSReducer::reduce( const BSONList& tuples ){
+ BSONObj JSReducer::reduce( const BSONList& tuples ) {
BSONObj key;
int endSizeEstimate = 16;
-
+
_reduce( tuples , key , endSizeEstimate );
BSONObjBuilder b(endSizeEstimate);
@@ -91,12 +91,12 @@ namespace mongo {
_func.scope()->append( b , "1" , "return" );
return b.obj();
}
-
- BSONObj JSReducer::reduce( const BSONList& tuples , Finalizer * finalizer ){
+
+ BSONObj JSReducer::reduce( const BSONList& tuples , Finalizer * finalizer ) {
BSONObj key;
int endSizeEstimate = 16;
-
+
_reduce( tuples , key , endSizeEstimate );
BSONObjBuilder b(endSizeEstimate);
@@ -104,54 +104,54 @@ namespace mongo {
_func.scope()->append( b , "value" , "return" );
BSONObj res = b.obj();
- if ( finalizer ){
+ if ( finalizer ) {
res = finalizer->finalize( res );
}
return res;
}
- void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ){
+ void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
uassert( 10074 , "need values" , tuples.size() );
-
+
int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
-
+
BSONObjBuilder reduceArgs( sizeEstimate );
boost::scoped_ptr<BSONArrayBuilder> valueBuilder;
-
+
int sizeSoFar = 0;
unsigned n = 0;
- for ( ; n<tuples.size(); n++ ){
+ for ( ; n<tuples.size(); n++ ) {
BSONObjIterator j(tuples[n]);
BSONElement keyE = j.next();
- if ( n == 0 ){
+ if ( n == 0 ) {
reduceArgs.append( keyE );
key = keyE.wrap();
sizeSoFar = 5 + keyE.size();
valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
}
-
+
BSONElement ee = j.next();
-
+
uassert( 13070 , "value to large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
- if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ){
+ if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
assert( n > 1 ); // if not, inf. loop
break;
}
-
+
valueBuilder->append( ee );
sizeSoFar += ee.size();
}
assert(valueBuilder);
valueBuilder->done();
- BSONObj args = reduceArgs.obj();
+ BSONObj args = reduceArgs.obj();
Scope * s = _func.scope();
s->invokeSafe( _func.func() , args );
- if ( s->type( "return" ) == Array ){
+ if ( s->type( "return" ) == Array ) {
uasserted( 10075 , "reduce -> multiple not supported yet");
return;
}
@@ -160,11 +160,11 @@ namespace mongo {
if ( n == tuples.size() )
return;
-
+
// the input list was too large
-
+
BSONList x;
- for ( ; n < tuples.size(); n++ ){
+ for ( ; n < tuples.size(); n++ ) {
x.push_back( tuples[n] );
}
BSONObjBuilder temp( endSizeEstimate );
@@ -173,146 +173,150 @@ namespace mongo {
x.push_back( temp.obj() );
_reduce( x , key , endSizeEstimate );
}
-
- Config::Config( const string& _dbname , const BSONObj& cmdObj ){
-
+
+ Config::Config( const string& _dbname , const BSONObj& cmdObj ) {
+
dbname = _dbname;
ns = dbname + "." + cmdObj.firstElement().valuestr();
-
+
verbose = cmdObj["verbose"].trueValue();
uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
- if ( cmdObj["out"].type() == String ){
+ if ( cmdObj["out"].type() == String ) {
finalShort = cmdObj["out"].String();
outType = REPLACE;
}
- else if ( cmdObj["out"].type() == Object ){
+ else if ( cmdObj["out"].type() == Object ) {
BSONObj o = cmdObj["out"].embeddedObject();
uassert( 13607 , "'out' has to have a single field" , o.nFields() == 1 );
BSONElement e = o.firstElement();
string t = e.fieldName();
-
- if ( t == "normal" || t == "replace" ){
+
+ if ( t == "normal" || t == "replace" ) {
outType = REPLACE;
finalShort = e.String();
}
- else if ( t == "merge" ){
+ else if ( t == "merge" ) {
outType = MERGE;
finalShort = e.String();
}
- else if ( t == "reduce" ){
+ else if ( t == "reduce" ) {
outType = REDUCE;
finalShort = e.String();
}
- else if ( t == "inline" ){
+ else if ( t == "inline" ) {
outType = INMEMORY;
}
- else{
+ else {
uasserted( 13522 , str::stream() << "unknown out specifier [" << t << "]" );
- }
+ }
}
else {
uasserted( 13606 , "'out' has to be a string or an object" );
}
-
- if ( outType != INMEMORY ){ // setup names
+
+ if ( outType != INMEMORY ) { // setup names
tempLong = str::stream() << dbname << ".tmp.mr." << cmdObj.firstElement().String() << "_" << finalShort << "_" << JOB_NUMBER++;
-
+
incLong = tempLong + "_inc";
-
+
finalLong = str::stream() << dbname << "." << finalShort;
}
- { // scope and code
-
+ {
+ // scope and code
+
if ( cmdObj["scope"].type() == Object )
scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
-
+
mapper.reset( new JSMapper( cmdObj["map"] ) );
reducer.reset( new JSReducer( cmdObj["reduce"] ) );
if ( cmdObj["finalize"].type() )
finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
- if ( cmdObj["mapparams"].type() == Array ){
+ if ( cmdObj["mapparams"].type() == Array ) {
mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
}
-
+
}
-
- { // query options
+
+ {
+ // query options
BSONElement q = cmdObj["query"];
if ( q.type() == Object )
filter = q.embeddedObjectUserCheck();
- else
+ else
uassert( 13608 , "query has to be blank or an Object" , ! q.trueValue() );
-
-
+
+
BSONElement s = cmdObj["sort"];
if ( s.type() == Object )
sort = s.embeddedObjectUserCheck();
- else
+ else
uassert( 13609 , "sort has to be blank or an Object" , ! s.trueValue() );
-
+
if ( cmdObj["limit"].isNumber() )
limit = cmdObj["limit"].numberLong();
- else
+ else
limit = 0;
}
}
- void State::prepTempCollection(){
+ void State::prepTempCollection() {
if ( ! _onDisk )
return;
_db.dropCollection( _config.tempLong );
- { // create
+ {
+ // create
writelock lock( _config.tempLong.c_str() );
Client::Context ctx( _config.tempLong.c_str() );
string errmsg;
assert( userCreateNS( _config.tempLong.c_str() , BSONObj() , errmsg , true ) );
}
-
-
- { // copy indexes
+
+
+ {
+ // copy indexes
auto_ptr<DBClientCursor> idx = _db.getIndexes( _config.finalLong );
- while ( idx->more() ){
+ while ( idx->more() ) {
BSONObj i = idx->next();
-
+
BSONObjBuilder b( i.objsize() + 16 );
b.append( "ns" , _config.tempLong );
BSONObjIterator j( i );
- while ( j.more() ){
+ while ( j.more() ) {
BSONElement e = j.next();
- if ( str::equals( e.fieldName() , "_id" ) ||
- str::equals( e.fieldName() , "ns" ) )
+ if ( str::equals( e.fieldName() , "_id" ) ||
+ str::equals( e.fieldName() , "ns" ) )
continue;
-
+
b.append( e );
}
-
+
BSONObj indexToInsert = b.obj();
insert( Namespace( _config.tempLong.c_str() ).getSisterNS( "system.indexes" ).c_str() , indexToInsert );
}
-
+
}
}
- void State::appendResults( BSONObjBuilder& final ){
+ void State::appendResults( BSONObjBuilder& final ) {
if ( _onDisk )
return;
-
+
uassert( 13604 , "too much data for in memory map/reduce" , _size < ( BSONObjMaxUserSize / 2 ) );
BSONArrayBuilder b( (int)(_size * 1.2) ); // _size is data size, doesn't count overhead and keys
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ){
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
BSONObj key = i->first;
BSONList& all = i->second;
-
+
assert( all.size() == 1 );
BSONObjIterator vi( all[0] );
@@ -323,55 +327,55 @@ namespace mongo {
temp.appendAs( vi.next() , "value" );
temp.done();
}
-
+
BSONArray res = b.arr();
- uassert( 13605 , "too much data for in memory map/reduce" , res.objsize() < ( BSONObjMaxUserSize * 2 / 3 ) );
+ uassert( 13605 , "too much data for in memory map/reduce" , res.objsize() < ( BSONObjMaxUserSize * 2 / 3 ) );
final.append( "results" , res );
}
- long long State::renameIfNeeded(){
+ long long State::renameIfNeeded() {
if ( ! _onDisk )
return _temp->size();
-
+
dblock lock;
if ( _config.finalLong == _config.tempLong )
return _db.count( _config.finalLong );
-
- switch ( _config.outType ){
+
+ switch ( _config.outType ) {
case Config::REPLACE: {
_db.dropCollection( _config.finalLong );
BSONObj info;
- uassert( 10076 , "rename failed" ,
+ uassert( 10076 , "rename failed" ,
_db.runCommand( "admin" , BSON( "renameCollection" << _config.tempLong << "to" << _config.finalLong ) , info ) );
_db.dropCollection( _config.tempLong );
break;
}
case Config::MERGE: {
auto_ptr<DBClientCursor> cursor = _db.query( _config.tempLong , BSONObj() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj o = cursor->next();
Helpers::upsert( _config.finalLong , o );
}
_db.dropCollection( _config.tempLong );
break;
}
- case Config::REDUCE: {
+ case Config::REDUCE: {
BSONList values;
-
+
auto_ptr<DBClientCursor> cursor = _db.query( _config.tempLong , BSONObj() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj temp = cursor->next();
BSONObj old;
-
+
bool found;
{
Client::Context tx( _config.finalLong );
found = Helpers::findOne( _config.finalLong.c_str() , temp["_id"].wrap() , old , true );
}
-
- if ( found ){
+
+ if ( found ) {
// need to reduce
values.clear();
values.push_back( temp );
@@ -389,54 +393,54 @@ namespace mongo {
return _temp->size();
}
}
-
+
return _db.count( _config.finalLong );
}
-
- void State::insert( const string& ns , BSONObj& o ){
+
+ void State::insert( const string& ns , BSONObj& o ) {
assert( _onDisk );
writelock l( ns );
Client::Context ctx( ns );
-
+
theDataFileMgr.insertAndLog( ns.c_str() , o , false );
}
- void State::_insertToInc( BSONObj& o ){
+ void State::_insertToInc( BSONObj& o ) {
assert( _onDisk );
theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
}
- State::State( const Config& c ) : _config( c ), _size(0), _numEmits(0){
+ State::State( const Config& c ) : _config( c ), _size(0), _numEmits(0) {
_temp.reset( new InMemory() );
_onDisk = _config.outType != Config::INMEMORY;
}
- bool State::sourceExists(){
+ bool State::sourceExists() {
return _db.exists( _config.ns );
}
-
- long long State::incomingDocuments(){
+
+ long long State::incomingDocuments() {
return _db.count( _config.ns , _config.filter , 0 , (unsigned) _config.limit );
}
- State::~State(){
- if ( _onDisk ){
+ State::~State() {
+ if ( _onDisk ) {
try {
_db.dropCollection( _config.tempLong );
_db.dropCollection( _config.incLong );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
error() << "couldn't cleanup after map reduce: " << e.what() << endl;
}
}
}
- void State::init(){
+ void State::init() {
// setup js
_scope.reset(globalScriptEngine->getPooledScope( _config.dbname ).release() );
_scope->localConnect( _config.dbname.c_str() );
-
+
if ( ! _config.scopeSetup.isEmpty() )
_scope->init( &_config.scopeSetup );
@@ -446,40 +450,40 @@ namespace mongo {
_config.finalizer->init( this );
_scope->injectNative( "emit" , fast_emit );
-
- if ( _onDisk ){
+
+ if ( _onDisk ) {
// clear temp collections
_db.dropCollection( _config.tempLong );
_db.dropCollection( _config.incLong );
-
+
writelock l( _config.incLong );
Client::Context ctx( _config.incLong );
string err;
assert( userCreateNS( _config.incLong.c_str() , BSON( "autoIndexId" << 0 ) , err , false ) );
}
-
+
}
-
- void State::finalReduce( BSONList& values ){
+
+ void State::finalReduce( BSONList& values ) {
if ( values.size() == 0 )
return;
-
+
BSONObj key = values.begin()->firstElement().wrap( "_id" );
BSONObj res = _config.reducer->reduce( values , _config.finalizer.get() );
-
+
insert( _config.tempLong , res );
}
- void State::finalReduce( CurOp * op , ProgressMeterHolder& pm ){
- if ( ! _onDisk ){
- if ( _config.finalizer ){
+ void State::finalReduce( CurOp * op , ProgressMeterHolder& pm ) {
+ if ( ! _onDisk ) {
+ if ( _config.finalizer ) {
long size = 0;
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ){
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
BSONObj key = i->first;
BSONList& all = i->second;
-
+
assert( all.size() == 1 );
-
+
BSONObj res = _config.finalizer->finalize( all[0] );
all.clear();
@@ -500,69 +504,69 @@ namespace mongo {
readlock rl( _config.incLong.c_str() );
Client::Context ctx( _config.incLong );
-
+
BSONObj prev;
BSONList all;
-
+
assert( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong ) ) );
-
+
shared_ptr<Cursor> temp = bestGuessCursor( _config.incLong.c_str() , BSONObj() , sortKey );
auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , _config.incLong.c_str() ) );
-
- while ( cursor->ok() ){
+
+ while ( cursor->ok() ) {
BSONObj o = cursor->current().getOwned();
cursor->advance();
-
+
pm.hit();
-
- if ( o.woSortOrder( prev , sortKey ) == 0 ){
+
+ if ( o.woSortOrder( prev , sortKey ) == 0 ) {
all.push_back( o );
- if ( pm->hits() % 1000 == 0 ){
- if ( ! cursor->yield() ){
+ if ( pm->hits() % 1000 == 0 ) {
+ if ( ! cursor->yield() ) {
cursor.release();
break;
- }
+ }
killCurrentOp.checkForInterrupt();
}
continue;
}
-
+
ClientCursor::YieldLock yield (cursor.get());
finalReduce( all );
-
+
all.clear();
prev = o;
all.push_back( o );
-
- if ( ! yield.stillOk() ){
+
+ if ( ! yield.stillOk() ) {
cursor.release();
break;
}
-
+
killCurrentOp.checkForInterrupt();
}
-
+
{
dbtempreleasecond tl;
if ( ! tl.unlocked() )
log( LL_WARNING ) << "map/reduce can't temp release" << endl;
finalReduce( all );
}
-
+
pm.finished();
}
-
- void State::reduceInMemory(){
+
+ void State::reduceInMemory() {
InMemory * n = new InMemory(); // for new data
long nSize = 0;
-
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ){
+
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
BSONObj key = i->first;
BSONList& all = i->second;
-
- if ( all.size() == 1 ){
- if ( _onDisk ){
+
+ if ( all.size() == 1 ) {
+ if ( _onDisk ) {
// this key has low cardinality, so just write to db
writelock l(_config.incLong);
Client::Context ctx(_config.incLong.c_str());
@@ -572,28 +576,28 @@ namespace mongo {
_add( n , all[0] , nSize );
}
}
- else if ( all.size() > 1 ){
+ else if ( all.size() > 1 ) {
BSONObj res = _config.reducer->reduce( all );
_add( n , res , nSize );
}
}
-
+
_temp.reset( n );
_size = nSize;
}
-
- void State::dumpToInc(){
+
+ void State::dumpToInc() {
if ( ! _onDisk )
return;
writelock l(_config.incLong);
Client::Context ctx(_config.incLong);
-
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ){
+
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
BSONList& all = i->second;
if ( all.size() < 1 )
continue;
-
+
for ( BSONList::iterator j=all.begin(); j!=all.end(); j++ )
_insertToInc( *j );
}
@@ -601,19 +605,19 @@ namespace mongo {
_size = 0;
}
-
- void State::emit( const BSONObj& a ){
+
+ void State::emit( const BSONObj& a ) {
_numEmits++;
_add( _temp.get() , a , _size );
}
- void State::_add( InMemory* im, const BSONObj& a , long& size ){
+ void State::_add( InMemory* im, const BSONObj& a , long& size ) {
BSONList& all = (*im)[a];
all.push_back( a );
size += a.objsize() + 16;
}
- void State::checkSize(){
+ void State::checkSize() {
if ( ! _onDisk )
return;
@@ -626,14 +630,14 @@ namespace mongo {
if ( _size < 1024 * 15 )
return;
-
+
dumpToInc();
log(1) << " mr: dumping to db" << endl;
}
boost::thread_specific_ptr<State*> _tl;
- BSONObj fast_emit( const BSONObj& args ){
+ BSONObj fast_emit( const BSONObj& args ) {
uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
uassert( 13069 , "an emit can't be more than half max bson size" , args.objsize() < ( BSONObjMaxUserSize / 2 ) );
(*_tl)->emit( args );
@@ -642,16 +646,16 @@ namespace mongo {
class MapReduceCommand : public Command {
public:
- MapReduceCommand() : Command("mapReduce", false, "mapreduce"){}
+ MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
virtual bool slaveOk() const { return true; }
-
+
virtual void help( stringstream &help ) const {
help << "Run a map/reduce operation on the server.\n";
help << "Note this is used for aggregation, not querying, in MongoDB.\n";
help << "http://www.mongodb.org/display/DOCS/MapReduce";
}
- virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname , BSONObj& cmd, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ virtual LockType locktype() const { return NONE; }
+ bool run(const string& dbname , BSONObj& cmd, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer t;
Client::GodScope cg;
Client& client = cc();
@@ -660,24 +664,24 @@ namespace mongo {
Config config( dbname , cmd );
log(1) << "mr ns: " << config.ns << endl;
-
+
bool shouldHaveData = false;
-
+
long long num = 0;
long long inReduce = 0;
-
+
BSONObjBuilder countsBuilder;
BSONObjBuilder timingBuilder;
State state( config );
- if ( ! state.sourceExists() ){
+ if ( ! state.sourceExists() ) {
errmsg = "ns doesn't exist";
return false;
}
-
+
try {
state.init();
-
+
{
State** s = new State*[1];
s[0] = &state;
@@ -690,38 +694,38 @@ namespace mongo {
{
readlock lock( config.ns );
Client::Context ctx( config.ns );
-
+
shared_ptr<Cursor> temp = bestGuessCursor( config.ns.c_str(), config.filter, config.sort );
auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , config.ns.c_str() ) );
Timer mt;
- while ( cursor->ok() ){
+ while ( cursor->ok() ) {
- if ( cursor->currentIsDup() ){
+ if ( cursor->currentIsDup() ) {
cursor->advance();
continue;
}
-
- if ( ! cursor->currentMatches() ){
+
+ if ( ! cursor->currentMatches() ) {
cursor->advance();
continue;
}
-
- BSONObj o = cursor->current();
+
+ BSONObj o = cursor->current();
cursor->advance();
-
+
if ( config.verbose ) mt.reset();
config.mapper->map( o );
if ( config.verbose ) mapTime += mt.micros();
-
+
num++;
- if ( num % 100 == 0 ){
+ if ( num % 100 == 0 ) {
ClientCursor::YieldLock yield (cursor.get());
Timer t;
state.checkSize();
inReduce += t.micros();
-
- if ( ! yield.stillOk() ){
+
+ if ( ! yield.stillOk() ) {
cursor.release();
break;
}
@@ -729,50 +733,50 @@ namespace mongo {
killCurrentOp.checkForInterrupt();
}
pm.hit();
-
+
if ( config.limit && num >= config.limit )
break;
}
}
pm.finished();
-
+
killCurrentOp.checkForInterrupt();
countsBuilder.appendNumber( "input" , num );
countsBuilder.appendNumber( "emit" , state.numEmits() );
if ( state.numEmits() )
shouldHaveData = true;
-
+
timingBuilder.append( "mapTime" , mapTime / 1000 );
timingBuilder.append( "emitLoop" , t.millis() );
-
+
// final reduce
op->setMessage( "m/r: (2/3) final reduce in memory" );
state.reduceInMemory();
state.dumpToInc();
-
+
state.prepTempCollection();
state.finalReduce( op , pm );
-
+
_tl.reset();
}
- catch ( ... ){
+ catch ( ... ) {
log() << "mr failed, removing collection" << endl;
throw;
}
-
+
long long finalCount = state.renameIfNeeded();
state.appendResults( result );
-
+
timingBuilder.append( "total" , t.millis() );
-
+
result.append( "result" , config.finalShort );
result.append( "timeMillis" , t.millis() );
countsBuilder.appendNumber( "output" , finalCount );
if ( config.verbose ) result.append( "timing" , timingBuilder.obj() );
result.append( "counts" , countsBuilder.obj() );
- if ( finalCount == 0 && shouldHaveData ){
+ if ( finalCount == 0 && shouldHaveData ) {
result.append( "cmd" , cmd );
errmsg = "there were emits but no data!";
return false;
@@ -782,105 +786,107 @@ namespace mongo {
}
} mapReduceCommand;
-
+
class MapReduceFinishCommand : public Command {
public:
- MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ){}
+ MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ) {}
virtual bool slaveOk() const { return true; }
-
- virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ virtual LockType locktype() const { return NONE; }
+ bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
Config config( dbname , cmdObj.firstElement().embeddedObjectUserCheck() );
config.incLong = config.tempLong;
set<ServerAndQuery> servers;
-
+
BSONObjBuilder shardCounts;
map<string,long long> counts;
-
+
BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck();
vector< auto_ptr<DBClientCursor> > shardCursors;
- { // parse per shard results
+ {
+ // parse per shard results
BSONObjIterator i( shards );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
string shard = e.fieldName();
-
+
BSONObj res = e.embeddedObjectUserCheck();
-
+
uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() );
servers.insert( shard );
shardCounts.appendAs( res["counts"] , shard );
-
+
BSONObjIterator j( res["counts"].embeddedObjectUserCheck() );
- while ( j.more() ){
+ while ( j.more() ) {
BSONElement temp = j.next();
counts[temp.fieldName()] += temp.numberLong();
}
-
+
}
-
+
}
-
+
State state(config);
state.prepTempCollection();
- { // reduce from each stream
-
+ {
+ // reduce from each stream
+
BSONObj sortKey = BSON( "_id" << 1 );
-
+
ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection ,
Query().sort( sortKey ) );
cursor.init();
state.init();
-
+
BSONList values;
-
+
result.append( "result" , config.finalShort );
-
- while ( cursor.more() ){
+
+ while ( cursor.more() ) {
BSONObj t = cursor.next().getOwned();
-
- if ( values.size() == 0 ){
+
+ if ( values.size() == 0 ) {
values.push_back( t );
continue;
}
-
- if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ){
+
+ if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
values.push_back( t );
continue;
}
-
-
+
+
state.emit( config.reducer->reduce( values , config.finalizer.get() ) );
values.clear();
values.push_back( t );
}
-
+
if ( values.size() )
state.emit( config.reducer->reduce( values , config.finalizer.get() ) );
}
-
-
+
+
state.dumpToInc();
-
+
state.renameIfNeeded();
state.appendResults( result );
-
- for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ){
+
+ for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
ScopedDbConnection conn( i->_server );
conn->dropCollection( dbname + "." + shardedOutputCollection );
conn.done();
}
-
+
result.append( "shardCounts" , shardCounts.obj() );
-
+
{
BSONObjBuilder c;
- for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ){
+ for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ) {
c.append( i->first , i->second );
}
result.append( "counts" , c.obj() );
diff --git a/db/commands/mr.h b/db/commands/mr.h
index ac56520f684..a9fe34ed7ed 100644
--- a/db/commands/mr.h
+++ b/db/commands/mr.h
@@ -20,7 +20,7 @@
#include "pch.h"
namespace mongo {
-
+
namespace mr {
typedef vector<BSONObj> BSONList;
@@ -31,35 +31,35 @@ namespace mongo {
class Mapper : boost::noncopyable {
public:
- virtual ~Mapper(){}
+ virtual ~Mapper() {}
virtual void init( State * state ) = 0;
virtual void map( const BSONObj& o ) = 0;
};
-
+
class Finalizer : boost::noncopyable {
public:
- virtual ~Finalizer(){}
+ virtual ~Finalizer() {}
virtual void init( State * state ) = 0;
-
+
/**
* this takes a tuple and returns a tuple
*/
virtual BSONObj finalize( const BSONObj& tuple ) = 0;
};
-
+
class Reducer : boost::noncopyable {
public:
- virtual ~Reducer(){}
+ virtual ~Reducer() {}
virtual void init( State * state ) = 0;
-
+
virtual BSONObj reduce( const BSONList& tuples ) = 0;
/** this means its a fianl reduce, even if there is no finalizer */
virtual BSONObj reduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
};
-
- // ------------ js function implementations -----------
-
+
+ // ------------ js function implementations -----------
+
/**
* used as a holder for Scope and ScriptingFunction
* visitor like pattern as Scope is gotten from first access
@@ -70,8 +70,8 @@ namespace mongo {
* @param type (map|reduce|finalzie)
*/
JSFunction( string type , const BSONElement& e );
- virtual ~JSFunction(){}
-
+ virtual ~JSFunction() {}
+
virtual void init( State * state );
Scope * scope() const { return _scope; }
@@ -80,27 +80,27 @@ namespace mongo {
private:
string _type;
string _code; // actual javascript code
- BSONObj _wantedScope; // this is for CodeWScope
-
+ BSONObj _wantedScope; // this is for CodeWScope
+
Scope * _scope; // this is not owned by us, and might be shared
ScriptingFunction _func;
};
class JSMapper : public Mapper {
public:
- JSMapper( const BSONElement & code ) : _func( "map" , code ){}
+ JSMapper( const BSONElement & code ) : _func( "map" , code ) {}
virtual void map( const BSONObj& o );
virtual void init( State * state );
-
+
private:
JSFunction _func;
BSONObj _params;
};
-
+
class JSReducer : public Reducer {
public:
- JSReducer( const BSONElement& code ) : _func( "reduce" , code ){}
- virtual void init( State * state ){ _func.init( state ); }
+ JSReducer( const BSONElement& code ) : _func( "reduce" , code ) {}
+ virtual void init( State * state ) { _func.init( state ); }
virtual BSONObj reduce( const BSONList& tuples );
virtual BSONObj reduce( const BSONList& tuples , Finalizer * finalizer );
@@ -109,36 +109,36 @@ namespace mongo {
/**
* result in "return"
- * @param key OUT
+ * @param key OUT
* @param endSizeEstimate OUT
*/
void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
-
+
JSFunction _func;
};
-
+
class JSFinalizer : public Finalizer {
public:
- JSFinalizer( const BSONElement& code ) : _func( "finalize" , code ){}
+ JSFinalizer( const BSONElement& code ) : _func( "finalize" , code ) {}
virtual BSONObj finalize( const BSONObj& o );
- virtual void init( State * state ){ _func.init( state ); }
+ virtual void init( State * state ) { _func.init( state ); }
private:
JSFunction _func;
};
// -----------------
-
+
class TupleKeyCmp {
public:
- TupleKeyCmp(){}
+ TupleKeyCmp() {}
bool operator()( const BSONObj &l, const BSONObj &r ) const {
return l.firstElement().woCompare( r.firstElement() ) < 0;
}
};
-
+
typedef map< BSONObj,BSONList,TupleKeyCmp > InMemory; // from key to list of tuples
/**
@@ -150,29 +150,29 @@ namespace mongo {
string dbname;
string ns;
-
+
// options
- bool verbose;
+ bool verbose;
// query options
-
+
BSONObj filter;
BSONObj sort;
long long limit;
// functions
-
+
scoped_ptr<Mapper> mapper;
scoped_ptr<Reducer> reducer;
scoped_ptr<Finalizer> finalizer;
-
+
BSONObj mapParams;
BSONObj scopeSetup;
-
+
// output tables
string incLong;
string tempLong;
-
+
string finalShort;
string finalLong;
@@ -180,11 +180,11 @@ namespace mongo {
MERGE , // merge keys, override dups
REDUCE , // merge keys, reduce dups
INMEMORY // only store in memory, limited in size
- } outType;
-
+ } outType;
+
static AtomicUInt JOB_NUMBER;
}; // end MRsetup
-
+
/**
* stores information about intermediate map reduce state
* controls flow of data from map->reduce->finalize->output
@@ -195,14 +195,14 @@ namespace mongo {
~State();
void init();
-
+
// ---- prep -----
bool sourceExists();
long long incomingDocuments();
- // ---- map stage ----
-
+ // ---- map stage ----
+
/**
* stages on in in-memory storage
*/
@@ -226,19 +226,19 @@ namespace mongo {
// ------ reduce stage -----------
- void prepTempCollection();
-
+ void prepTempCollection();
+
void finalReduce( BSONList& values );
-
+
void finalReduce( CurOp * op , ProgressMeterHolder& pm );
-
+
// ------- cleanup/data positioning ----------
-
+
/**
@return number objects in collection
*/
long long renameIfNeeded();
-
+
/**
* if INMEMORY will append
* may also append stats or anything else it likes
@@ -246,17 +246,17 @@ namespace mongo {
void appendResults( BSONObjBuilder& b );
// -------- util ------------
-
+
/**
* inserts with correct replication semantics
*/
void insert( const string& ns , BSONObj& o );
-
+
// ------ simple accessors -----
/** State maintains ownership, do no use past State lifetime */
Scope* scope() { return _scope.get(); }
-
+
const Config& config() { return _config; }
long long numEmits() const { return _numEmits; }
@@ -274,7 +274,7 @@ namespace mongo {
scoped_ptr<InMemory> _temp;
long _size; // bytes in _temp
-
+
long long _numEmits;
};
diff --git a/db/common.cpp b/db/common.cpp
index bf78f8b71a1..44bc54d0ab0 100644
--- a/db/common.cpp
+++ b/db/common.cpp
@@ -26,7 +26,7 @@ namespace mongo {
/* we use new here so we don't have to worry about destructor orders at program shutdown */
MongoMutex &dbMutex( *(new MongoMutex("rw:dbMutex")) );
- MongoMutex::MongoMutex(const char *name) : _m(name) {
+ MongoMutex::MongoMutex(const char *name) : _m(name) {
_remapPrivateViewRequested = false;
}
diff --git a/db/compact.cpp b/db/compact.cpp
index 569e40458ee..6bafd91be15 100644
--- a/db/compact.cpp
+++ b/db/compact.cpp
@@ -27,7 +27,7 @@
#include "curop-inl.h"
#include "../util/concurrency/task.h"
-namespace mongo {
+namespace mongo {
class CompactJob : public task::Task {
public:
@@ -44,9 +44,9 @@ namespace mongo {
DiskLoc _firstExtent;
};
- // lock & set context first. this checks that collection still exists, and that it hasn't
+ // lock & set context first. this checks that collection still exists, and that it hasn't
// morphed into a capped collection between locks (which is possible)
- NamespaceDetails * CompactJob::beginBlock() {
+ NamespaceDetails * CompactJob::beginBlock() {
NamespaceDetails *nsd = nsdetails(_ns.c_str());
if( nsd == 0 ) throw "ns no longer present";
if( nsd->firstExtent.isNull() )
@@ -60,7 +60,7 @@ namespace mongo {
unsigned n = 0;
{
/* pre-touch records in a read lock so that paging happens in read not write lock.
- note we are only touching the records though; if indexes aren't in RAM, they will
+ note we are only touching the records though; if indexes aren't in RAM, they will
page later. So the concept is only partial.
*/
readlock lk;
@@ -69,8 +69,8 @@ namespace mongo {
NamespaceDetails *nsd = beginBlock();
if( nsd->firstExtent != _firstExtent ) {
// TEMP DEV - stop after 1st extent
- throw "change of first extent";
- }
+ throw "change of first extent";
+ }
DiskLoc loc = nsd->firstExtent.ext()->firstRecord;
while( !loc.isNull() ) {
Record *r = loc.rec();
@@ -86,12 +86,12 @@ namespace mongo {
for( unsigned i = 0; i < n; i++ ) {
if( nsd->firstExtent != _firstExtent ) {
// TEMP DEV - stop after 1st extent
- throw "change of first extent (or it is now null)";
+ throw "change of first extent (or it is now null)";
}
DiskLoc loc = nsd->firstExtent.ext()->firstRecord;
Record *rec = loc.rec();
BSONObj o = loc.obj().getOwned(); // todo: inefficient, double mem copy...
- try {
+ try {
theDataFileMgr.deleteRecord(_ns.c_str(), rec, loc, false);
}
catch(DBException&) { throw "error deleting record"; }
@@ -110,7 +110,7 @@ namespace mongo {
}
}
- void CompactJob::prep() {
+ void CompactJob::prep() {
readlock lk;
Client::Context ctx(_ns);
NamespaceDetails *nsd = beginBlock();
@@ -124,21 +124,21 @@ namespace mongo {
static mutex m("compact");
static volatile bool running;
- void CompactJob::doWork() {
+ void CompactJob::doWork() {
Client::initThread("compact");
cc().curop()->reset();
cc().curop()->setNS(_ns.c_str());
cc().curop()->markCommand();
sleepsecs(60);
- try {
+ try {
prep();
- while( _ncompacted < _nrecords )
+ while( _ncompacted < _nrecords )
doBatch();
}
- catch(const char *p) {
+ catch(const char *p) {
log() << "info: exception compact " << p << endl;
}
- catch(...) {
+ catch(...) {
log() << "info: exception compact" << endl;
}
mongo::running = false;
@@ -147,11 +147,11 @@ namespace mongo {
/* --- CompactCmd --- */
- class CompactCmd : public Command {
+ class CompactCmd : public Command {
public:
- virtual bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(const string& db, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string coll = cmdObj.firstElement().valuestr();
- if( coll.empty() || db.empty() ) {
+ if( coll.empty() || db.empty() ) {
errmsg = "no collection name specified";
return false;
}
@@ -160,7 +160,7 @@ namespace mongo {
{
readlock lk;
Client::Context ctx(ns);
- if( nsdetails(ns.c_str()) == 0 ) {
+ if( nsdetails(ns.c_str()) == 0 ) {
errmsg = "namespace " + ns + " does not exist";
return false;
}
@@ -181,15 +181,15 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
+ virtual bool slaveOk() const { return true; }
virtual bool logTheOp() { return false; }
- virtual void help( stringstream& help ) const {
+ virtual void help( stringstream& help ) const {
help << "compact / defragment a collection in the background, slowly, attempting to minimize disruptions to other operations\n"
- "{ compact : <collection> }";
+ "{ compact : <collection> }";
}
virtual bool requiresAuth() { return true; }
- /** @param webUI expose the command in the web ui as localhost:28017/<name>
+ /** @param webUI expose the command in the web ui as localhost:28017/<name>
@param oldName an optional old, deprecated name for the command
*/
CompactCmd() : Command("compact") { }
diff --git a/db/concurrency.h b/db/concurrency.h
index 97b66c6516a..39cd853e230 100644
--- a/db/concurrency.h
+++ b/db/concurrency.h
@@ -37,7 +37,7 @@ namespace mongo {
string sayClientState();
bool haveClient();
-
+
class Client;
Client* curopWaitingForLock( int type );
void curopGotLock(Client*);
@@ -84,33 +84,33 @@ namespace mongo {
struct writelock {
writelock() { dbMutex.lock(); }
writelock(const string& ns) { dbMutex.lock(); }
- ~writelock() {
+ ~writelock() {
DESTRUCTOR_GUARD(
dbunlocking_write();
dbMutex.unlock();
);
}
};
-
+
struct readlock {
readlock(const string& ns) {
dbMutex.lock_shared();
}
readlock() { dbMutex.lock_shared(); }
- ~readlock() {
+ ~readlock() {
DESTRUCTOR_GUARD(
dbunlocking_read();
dbMutex.unlock_shared();
);
}
- };
+ };
struct readlocktry {
- readlocktry( const string&ns , int tryms ){
+ readlocktry( const string&ns , int tryms ) {
_got = dbMutex.lock_shared_try( tryms );
}
~readlocktry() {
- if ( _got ){
+ if ( _got ) {
dbunlocking_read();
dbMutex.unlock_shared();
}
@@ -121,11 +121,11 @@ namespace mongo {
};
struct writelocktry {
- writelocktry( const string&ns , int tryms ){
+ writelocktry( const string&ns , int tryms ) {
_got = dbMutex.lock_try( tryms );
}
~writelocktry() {
- if ( _got ){
+ if ( _got ) {
dbunlocking_read();
dbMutex.unlock();
}
@@ -135,10 +135,10 @@ namespace mongo {
bool _got;
};
- struct readlocktryassert : public readlocktry {
- readlocktryassert(const string& ns, int tryms) :
- readlocktry(ns,tryms) {
- uassert(13142, "timeout getting readlock", got());
+ struct readlocktryassert : public readlocktry {
+ readlocktryassert(const string& ns, int tryms) :
+ readlocktry(ns,tryms) {
+ uassert(13142, "timeout getting readlock", got());
}
};
@@ -146,12 +146,12 @@ namespace mongo {
if you have a write lock, that's ok too.
*/
struct atleastreadlock {
- atleastreadlock( const string& ns ){
+ atleastreadlock( const string& ns ) {
_prev = dbMutex.getState();
if ( _prev == 0 )
dbMutex.lock_shared();
}
- ~atleastreadlock(){
+ ~atleastreadlock() {
if ( _prev == 0 )
dbMutex.unlock_shared();
}
@@ -159,7 +159,7 @@ namespace mongo {
int _prev;
};
- /* parameterized choice of read or write locking
+ /* parameterized choice of read or write locking
use readlock and writelock instead of this when statically known which you want
*/
class mongolock {
@@ -172,21 +172,22 @@ namespace mongo {
else
dbMutex.lock_shared();
}
- ~mongolock() {
+ ~mongolock() {
DESTRUCTOR_GUARD(
- if( _writelock ) {
- dbunlocking_write();
- dbMutex.unlock();
- } else {
- dbunlocking_read();
- dbMutex.unlock_shared();
- }
+ if( _writelock ) {
+ dbunlocking_write();
+ dbMutex.unlock();
+ }
+ else {
+ dbunlocking_read();
+ dbMutex.unlock_shared();
+ }
);
}
/* this unlocks, does NOT upgrade. that works for our current usage */
void releaseAndWriteLock();
};
-
+
/* deprecated - use writelock and readlock instead */
struct dblock : public writelock {
dblock() : writelock("") { }
diff --git a/db/curop-inl.h b/db/curop-inl.h
index eb625f2292c..21d6f0a72a0 100644
--- a/db/curop-inl.h
+++ b/db/curop-inl.h
@@ -20,11 +20,11 @@
#include "curop.h"
-namespace mongo {
+namespace mongo {
// todo : move more here
- inline CurOp::CurOp( Client * client , CurOp * wrapped ) {
+ inline CurOp::CurOp( Client * client , CurOp * wrapped ) {
_client = client;
_wrapped = wrapped;
if ( _wrapped )
diff --git a/db/curop.h b/db/curop.h
index 5c6a63426d0..5011b633afa 100644
--- a/db/curop.h
+++ b/db/curop.h
@@ -27,7 +27,7 @@
#include "db.h"
#include "../scripting/engine.h"
-namespace mongo {
+namespace mongo {
/* lifespan is different than CurOp because of recursives with DBDirectClient */
class OpDebug {
@@ -35,7 +35,7 @@ namespace mongo {
StringBuilder str;
void reset() { str.reset(); }
};
-
+
/**
* stores a copy of a bson obj in a fixed size buffer
* if its too big for the buffer, says "too big"
@@ -46,67 +46,67 @@ namespace mongo {
enum { TOO_BIG_SENTINEL = 1 } ;
static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
- CachedBSONObj(){
+ CachedBSONObj() {
_size = (int*)_buf;
reset();
}
-
+
void reset( int sz = 0 ) { _size[0] = sz; }
-
- void set( const BSONObj& o ){
+
+ void set( const BSONObj& o ) {
_lock.lock();
try {
int sz = o.objsize();
-
- if ( sz > (int) sizeof(_buf) ) {
+
+ if ( sz > (int) sizeof(_buf) ) {
reset(TOO_BIG_SENTINEL);
}
else {
memcpy(_buf, o.objdata(), sz );
}
-
+
_lock.unlock();
}
- catch ( ... ){
+ catch ( ... ) {
_lock.unlock();
throw;
}
-
+
}
-
+
int size() const { return *_size; }
bool have() const { return size() > 0; }
- BSONObj get(){
- _lock.lock();
+ BSONObj get() {
+ _lock.lock();
BSONObj o;
try {
o = _get();
_lock.unlock();
}
- catch ( ... ){
+ catch ( ... ) {
_lock.unlock();
throw;
- }
- return o;
+ }
+ return o;
}
- void append( BSONObjBuilder& b , const StringData& name ){
+ void append( BSONObjBuilder& b , const StringData& name ) {
_lock.lock();
try {
BSONObj temp = _get();
b.append( name , temp );
_lock.unlock();
}
- catch ( ... ){
+ catch ( ... ) {
_lock.unlock();
throw;
}
}
-
+
private:
/** you have to be locked when you call this */
- BSONObj _get(){
+ BSONObj _get() {
int sz = size();
if ( sz == 0 )
return BSONObj();
@@ -121,7 +121,7 @@ namespace mongo {
};
/* Current operation (for the current Client).
- an embedded member of Client class, and typically used from within the mutex there.
+ an embedded member of Client class, and typically used from within the mutex there.
*/
class CurOp : boost::noncopyable {
public:
@@ -129,26 +129,26 @@ namespace mongo {
~CurOp();
bool haveQuery() const { return _query.have(); }
- BSONObj query(){ return _query.get(); }
+ BSONObj query() { return _query.get(); }
- void ensureStarted(){
+ void ensureStarted() {
if ( _start == 0 )
- _start = _checkpoint = curTimeMicros64();
+ _start = _checkpoint = curTimeMicros64();
}
- void enter( Client::Context * context ){
+ void enter( Client::Context * context ) {
ensureStarted();
setNS( context->ns() );
if ( context->_db && context->_db->profile > _dbprofile )
_dbprofile = context->_db->profile;
}
- void leave( Client::Context * context ){
+ void leave( Client::Context * context ) {
unsigned long long now = curTimeMicros64();
Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
_checkpoint = now;
}
- void reset(){
+ void reset() {
_reset();
_start = _checkpoint = 0;
_active = true;
@@ -157,16 +157,16 @@ namespace mongo {
_debug.reset();
_query.reset();
}
-
+
void reset( const SockAddr & remote, int op ) {
reset();
_remote = remote;
_op = op;
}
-
+
void markCommand() { _command = true; }
- void waitingForLock( int type ){
+ void waitingForLock( int type ) {
_waitingForLock = true;
if ( type > 0 )
_lockType = 1;
@@ -174,26 +174,26 @@ namespace mongo {
_lockType = -1;
}
void gotLock() { _waitingForLock = false; }
- OpDebug& debug() { return _debug; }
+ OpDebug& debug() { return _debug; }
int profileLevel() const { return _dbprofile; }
const char * getNS() const { return _ns; }
bool shouldDBProfile( int ms ) const {
if ( _dbprofile <= 0 )
return false;
-
+
return _dbprofile >= 2 || ms >= cmdLine.slowMS;
}
-
+
AtomicUInt opNum() const { return _opNum; }
/** if this op is running */
bool active() const { return _active; }
-
+
int getLockType() const { return _lockType; }
- bool isWaitingForLock() const { return _waitingForLock; }
+ bool isWaitingForLock() const { return _waitingForLock; }
int getOp() const { return _op; }
-
+
/** micros */
unsigned long long startTime() {
ensureStarted();
@@ -204,7 +204,7 @@ namespace mongo {
_active = false;
_end = curTimeMicros64();
}
-
+
unsigned long long totalTimeMicros() {
massert( 12601 , "CurOp not marked done yet" , ! _active );
return _end - startTime();
@@ -223,22 +223,22 @@ namespace mongo {
Client * getClient() const { return _client; }
- BSONObj info() {
- if( ! cc().getAuthenticationInfo()->isAuthorized("admin") ) {
+ BSONObj info() {
+ if( ! cc().getAuthenticationInfo()->isAuthorized("admin") ) {
BSONObjBuilder b;
b.append("err", "unauthorized");
return b.obj();
}
return infoNoauth();
}
-
+
BSONObj infoNoauth();
string getRemoteString( bool includePort = true ) { return _remote.toString(includePort); }
ProgressMeter& setMessage( const char * msg , unsigned long long progressMeterTotal = 0 , int secondsBetween = 3 ) {
- if ( progressMeterTotal ){
- if ( _progressMeter.isActive() ){
+ if ( progressMeterTotal ) {
+ if ( _progressMeter.isActive() ) {
cout << "about to assert, old _message: " << _message << " new message:" << msg << endl;
assert( ! _progressMeter.isActive() );
}
@@ -247,19 +247,19 @@ namespace mongo {
else {
_progressMeter.finished();
}
-
+
_message = msg;
-
+
return _progressMeter;
}
-
+
string getMessage() const { return _message.toString(); }
ProgressMeter& getProgressMeter() { return _progressMeter; }
CurOp *parent() const { return _wrapped; }
void kill() { _killed = true; }
bool killed() const { return _killed; }
- void setNS(const char *ns) {
- strncpy(_ns, ns, Namespace::MaxNsLen);
+ void setNS(const char *ns) {
+ strncpy(_ns, ns, Namespace::MaxNsLen);
_ns[Namespace::MaxNsLen] = 0;
}
friend class Client;
@@ -286,7 +286,7 @@ namespace mongo {
ProgressMeter _progressMeter;
volatile bool _killed;
- void _reset(){
+ void _reset() {
_command = false;
_lockType = 0;
_dbprofile = 0;
@@ -303,14 +303,14 @@ namespace mongo {
this class does not handle races between interruptJs and the checkForInterrupt functions - those must be
handled by the client of this class
*/
- extern class KillCurrentOp {
+ extern class KillCurrentOp {
public:
void killAll();
void kill(AtomicUInt i);
/** @return true if global interrupt and should terminate the operation */
bool globalInterruptCheck() const { return _globalKill; }
-
+
void checkForInterrupt( bool heedMutex = true ) {
if ( heedMutex && dbMutex.isWriteLocked() )
return;
@@ -319,7 +319,7 @@ namespace mongo {
if( cc().curop()->killed() )
uasserted(11601,"interrupted");
}
-
+
/** @return "" if not interrupted. otherwise, you should stop. */
const char *checkForInterruptNoAssert( bool heedMutex = true ) {
if ( heedMutex && dbMutex.isWriteLocked() )
@@ -330,7 +330,7 @@ namespace mongo {
return "interrupted";
return "";
}
-
+
private:
void interruptJs( AtomicUInt *op );
volatile bool _globalKill;
diff --git a/db/cursor.cpp b/db/cursor.cpp
index 51a24f3e947..ac7afc1532b 100644
--- a/db/cursor.cpp
+++ b/db/cursor.cpp
@@ -24,11 +24,13 @@ namespace mongo {
killCurrentOp.checkForInterrupt();
if ( eof() ) {
if ( tailable_ && !last.isNull() ) {
- curr = s->next( last );
- } else {
+ curr = s->next( last );
+ }
+ else {
return false;
}
- } else {
+ }
+ else {
last = curr;
curr = s->next( curr );
}
@@ -73,7 +75,7 @@ namespace mongo {
}
ForwardCappedCursor::ForwardCappedCursor( NamespaceDetails *_nsd, const DiskLoc &startLoc ) :
- nsd( _nsd ) {
+ nsd( _nsd ) {
if ( !nsd )
return;
DiskLoc start = startLoc;
@@ -114,14 +116,15 @@ namespace mongo {
}
ReverseCappedCursor::ReverseCappedCursor( NamespaceDetails *_nsd, const DiskLoc &startLoc ) :
- nsd( _nsd ) {
+ nsd( _nsd ) {
if ( !nsd )
return;
DiskLoc start = startLoc;
if ( start.isNull() ) {
if ( !nsd->capLooped() ) {
start = nsd->lastRecord();
- } else {
+ }
+ else {
start = nsd->capExtent.ext()->lastRecord;
}
}
@@ -141,7 +144,8 @@ namespace mongo {
if ( i == nextLoop( nsd, nsd->capExtent.ext()->lastRecord ) ) {
return DiskLoc();
}
- } else {
+ }
+ else {
if ( i == nsd->capExtent.ext()->firstRecord ) {
return DiskLoc();
}
diff --git a/db/cursor.h b/db/cursor.h
index 1b0d402c2d2..9797d668dbc 100644
--- a/db/cursor.h
+++ b/db/cursor.h
@@ -23,7 +23,7 @@
#include "matcher.h"
namespace mongo {
-
+
class NamespaceDetails;
class Record;
class CoveredIndexMatcher;
@@ -31,7 +31,7 @@ namespace mongo {
/* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
concept and is for the user's cursor.
- WARNING concurrency: the vfunctions below are called back from within a
+ WARNING concurrency: the vfunctions below are called back from within a
ClientCursor::ccmutex. Don't cause a deadlock, you've been warned.
*/
class Cursor : boost::noncopyable {
@@ -50,7 +50,7 @@ namespace mongo {
virtual DiskLoc refLoc() = 0;
/* Implement these if you want the cursor to be "tailable" */
-
+
/* Request that the cursor starts tailing after advancing past last record. */
/* The implementation may or may not honor this request. */
virtual void setTailable() {}
@@ -77,10 +77,10 @@ namespace mongo {
/* called before query getmore block is iterated */
virtual void checkLocation() { }
-
+
virtual bool supportGetMore() = 0;
virtual bool supportYields() = 0;
-
+
virtual string toString() { return "abstract?"; }
/* used for multikey index traversal to avoid sending back dups. see Matcher::matches().
@@ -88,17 +88,17 @@ namespace mongo {
if loc has already been sent, returns true.
otherwise, marks loc as sent.
@param deep - match was against an array, so we know it is multikey. this is legacy and kept
- for backwards datafile compatibility. 'deep' can be eliminated next time we
+ for backwards datafile compatibility. 'deep' can be eliminated next time we
force a data file conversion. 7Jul09
*/
virtual bool getsetdup(DiskLoc loc) = 0;
-
+
virtual bool isMultiKey() const = 0;
/**
* return true if the keys in the index have been modified from the main doc
- * if you have { a : 1 , b : [ 1 , 2 ] }
- * an index on { a : 1 } would not be modified
+ * if you have { a : 1 , b : [ 1 , 2 ] }
+ * an index on { a : 1 } would not be modified
* an index on { b : 1 } would be since the values of the array are put in the index
* not the array
*/
@@ -109,12 +109,12 @@ namespace mongo {
virtual bool capped() const { return false; }
virtual long long nscanned() = 0;
-
+
// The implementation may return different matchers depending on the
// position of the cursor. If matcher() is nonzero at the start,
// matcher() should be checked each time advance() is called.
virtual CoveredIndexMatcher *matcher() const { return 0; }
-
+
// A convenience function for setting the value of matcher() manually
// so it may accessed later. Implementations which must generate
// their own matcher() should assert here.
@@ -154,7 +154,7 @@ namespace mongo {
return j;
}
virtual DiskLoc currLoc() { return curr; }
- virtual DiskLoc refLoc() { return curr.isNull() ? last : curr; }
+ virtual DiskLoc refLoc() { return curr.isNull() ? last : curr; }
bool advance();
virtual string toString() { return "BasicCursor"; }
virtual void setTailable() {
@@ -167,9 +167,9 @@ namespace mongo {
virtual bool modifiedKeys() const { return false; }
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return true; }
- virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
+ virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
- virtual long long nscanned() { return _nscanned; }
+ virtual long long nscanned() { return _nscanned; }
protected:
DiskLoc curr, last;
diff --git a/db/database.cpp b/db/database.cpp
index 39a8901f62e..1b2add807ec 100644
--- a/db/database.cpp
+++ b/db/database.cpp
@@ -31,17 +31,17 @@ namespace mongo {
size_t n = files.size();
for ( size_t i = 0; i < n; i++ )
delete files[i];
- if( ccByLoc.size() ) {
+ if( ccByLoc.size() ) {
log() << "\n\n\nWARNING: ccByLoc not empty on database close! " << ccByLoc.size() << ' ' << name << endl;
}
}
Database::Database(const char *nm, bool& newDb, const string& _path )
- : name(nm), path(_path), namespaceIndex( path, name ),
- profileName(name + ".system.profile")
- {
-
- { // check db name is valid
+ : name(nm), path(_path), namespaceIndex( path, name ),
+ profileName(name + ".system.profile") {
+
+ {
+ // check db name is valid
size_t L = strlen(nm);
uassert( 10028 , "db name is empty", L > 0 );
uassert( 10029 , "bad db name [1]", *nm != '.' );
@@ -49,38 +49,38 @@ namespace mongo {
uassert( 10031 , "bad char(s) in db name", strchr(nm, ' ') == 0 );
uassert( 10032 , "db name too long", L < 64 );
}
-
+
newDb = namespaceIndex.exists();
profile = 0;
{
vector<string> others;
getDatabaseNames( others , path );
-
- for ( unsigned i=0; i<others.size(); i++ ){
+
+ for ( unsigned i=0; i<others.size(); i++ ) {
if ( strcasecmp( others[i].c_str() , nm ) )
continue;
if ( strcmp( others[i].c_str() , nm ) == 0 )
continue;
-
+
stringstream ss;
ss << "db already exists with different case other: [" << others[i] << "] me [" << nm << "]";
uasserted( DatabaseDifferCaseCode , ss.str() );
}
}
-
+
// If already exists, open. Otherwise behave as if empty until
// there's a write, then open.
if ( ! newDb || cmdLine.defaultProfile ) {
namespaceIndex.init();
if( _openAllFiles )
openAllFiles();
-
+
}
-
+
magic = 781231;
}
@@ -96,9 +96,9 @@ namespace mongo {
return fullName;
}
- void Database::openAllFiles() {
+ void Database::openAllFiles() {
int n = 0;
- while( exists(n) ) {
+ while( exists(n) ) {
getFile(n);
n++;
}
@@ -112,7 +112,7 @@ namespace mongo {
MongoDataFile* Database::getFile( int n, int sizeNeeded , bool preallocateOnly) {
assert(this);
-
+
namespaceIndex.init();
if ( n < 0 || n >= DiskLoc::MaxFiles ) {
out() << "getFile(): n=" << n << endl;
@@ -151,7 +151,7 @@ namespace mongo {
}
return preallocateOnly ? 0 : p;
}
-
+
MongoDataFile* Database::addAFile( int sizeNeeded, bool preallocateNextFile ) {
int n = (int) files.size();
MongoDataFile *ret = getFile( n, sizeNeeded );
@@ -163,7 +163,7 @@ namespace mongo {
MongoDataFile* Database::suitableFile( int sizeNeeded, bool preallocate ) {
// check existing files
- for ( int i=numFiles()-1; i>=0; i-- ){
+ for ( int i=numFiles()-1; i>=0; i-- ) {
MongoDataFile* f = getFile( i );
if ( f->getHeader()->unusedLength >= sizeNeeded )
return f;
@@ -172,14 +172,14 @@ namespace mongo {
// allocate files until we either get one big enough or hit maxSize
for ( int i = 0; i < 8; i++ ) {
MongoDataFile* f = addAFile( sizeNeeded, preallocate );
-
+
if ( f->getHeader()->unusedLength >= sizeNeeded )
return f;
if ( f->getHeader()->fileLength >= MongoDataFile::maxSize() ) // this is as big as they get so might as well stop
return f;
}
-
+
return 0;
}
@@ -190,37 +190,37 @@ namespace mongo {
return getFile(n-1);
}
-
- Extent* Database::allocExtent( const char *ns, int size, bool capped ) {
+
+ Extent* Database::allocExtent( const char *ns, int size, bool capped ) {
Extent *e = DataFileMgr::allocFromFreeList( ns, size, capped );
- if( e )
+ if( e )
return e;
return suitableFile( size, !capped )->createExtent( ns, size, capped );
}
-
-
- bool Database::setProfilingLevel( int newLevel , string& errmsg ){
+
+
+ bool Database::setProfilingLevel( int newLevel , string& errmsg ) {
if ( profile == newLevel )
return true;
-
- if ( newLevel < 0 || newLevel > 2 ){
+
+ if ( newLevel < 0 || newLevel > 2 ) {
errmsg = "profiling level has to be >=0 and <= 2";
return false;
}
-
- if ( newLevel == 0 ){
+
+ if ( newLevel == 0 ) {
profile = 0;
return true;
}
-
+
assert( cc().database() == this );
- if ( ! namespaceIndex.details( profileName.c_str() ) ){
+ if ( ! namespaceIndex.details( profileName.c_str() ) ) {
log(1) << "creating profile ns: " << profileName << endl;
BSONObjBuilder spec;
spec.appendBool( "capped", true );
spec.append( "size", 131072.0 );
- if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , true ) ){
+ if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , true ) ) {
return false;
}
}
@@ -228,15 +228,15 @@ namespace mongo {
return true;
}
- void Database::finishInit(){
+ void Database::finishInit() {
if ( cmdLine.defaultProfile == profile )
return;
-
+
string errmsg;
massert( 12506 , errmsg , setProfilingLevel( cmdLine.defaultProfile , errmsg ) );
}
- bool Database::validDBName( const string& ns ){
+ bool Database::validDBName( const string& ns ) {
if ( ns.size() == 0 || ns.size() > 64 )
return false;
size_t good = strcspn( ns.c_str() , "/\\. \"" );
@@ -245,7 +245,7 @@ namespace mongo {
void Database::flushFiles( bool sync ) const {
dbMutex.assertAtLeastReadLocked();
- for ( unsigned i=0; i<files.size(); i++ ){
+ for ( unsigned i=0; i<files.size(); i++ ) {
files[i]->flush( sync );
}
}
@@ -257,28 +257,28 @@ namespace mongo {
return size;
}
- Database* DatabaseHolder::getOrCreate( const string& ns , const string& path , bool& justCreated ){
+ Database* DatabaseHolder::getOrCreate( const string& ns , const string& path , bool& justCreated ) {
dbMutex.assertWriteLocked();
DBs& m = _paths[path];
-
+
string dbname = _todb( ns );
-
+
Database* & db = m[dbname];
- if ( db ){
+ if ( db ) {
justCreated = false;
return db;
}
-
+
log(1) << "Accessing: " << dbname << " for the first time" << endl;
try {
db = new Database( dbname.c_str() , justCreated , path );
}
- catch ( ... ){
+ catch ( ... ) {
m.erase( dbname );
throw;
}
_size++;
return db;
}
-
+
} // namespace mongo
diff --git a/db/database.h b/db/database.h
index 1e6a1fb02af..6e72ba84645 100644
--- a/db/database.h
+++ b/db/database.h
@@ -34,7 +34,7 @@ namespace mongo {
class Database {
public:
static bool _openAllFiles;
-
+
Database(const char *nm, /*out*/ bool& newDb, const string& _path = dbpath);
private:
~Database();
@@ -52,30 +52,30 @@ namespace mongo {
* tries to make sure that this hasn't been deleted
*/
bool isOk() const { return magic == 781231; }
-
- bool isEmpty(){ return ! namespaceIndex.allocated(); }
+
+ bool isEmpty() { return ! namespaceIndex.allocated(); }
/**
* total file size of Database in bytes
*/
long long fileSize() const;
-
+
int numFiles() const { return (int)files.size(); }
- /**
- * returns file valid for file number n
+ /**
+ * returns file valid for file number n
*/
boost::filesystem::path fileName( int n ) const;
-
+
bool exists(int n) const { return boost::filesystem::exists( fileName( n ) ); }
- /**
- * return file n. if it doesn't exist, create it
+ /**
+ * return file n. if it doesn't exist, create it
*/
MongoDataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false );
-
+
MongoDataFile* addAFile( int sizeNeeded, bool preallocateNextFile );
-
+
/**
* makes sure we have an extra file at the end that is empty
* safe to call this multiple times - the implementation will only preallocate one file
@@ -85,9 +85,9 @@ namespace mongo {
MongoDataFile* suitableFile( int sizeNeeded, bool preallocate );
Extent* allocExtent( const char *ns, int size, bool capped );
-
+
MongoDataFile* newestFile();
-
+
/**
* @return true if success. false if bad level or error creating profile ns
*/
@@ -95,7 +95,7 @@ namespace mongo {
void flushFiles( bool sync ) const;
-
+
/**
* @return true if ns is part of the database
* ns=foo.bar, db=foo returns true
@@ -109,7 +109,7 @@ namespace mongo {
static bool validDBName( const string& ns );
public: // this should be private later
-
+
vector<MongoDataFile*> files;
const string name; // "alleyinsider"
const string path;
@@ -117,7 +117,7 @@ namespace mongo {
int profile; // 0=off.
const string profileName; // "alleyinsider.system.profile"
CCByLoc ccByLoc;
- int magic; // used for making sure the object is still loaded in memory
+ int magic; // used for making sure the object is still loaded in memory
};
} // namespace mongo
diff --git a/db/db.cpp b/db/db.cpp
index a761331cc29..273d463038d 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -60,7 +60,7 @@ namespace mongo {
extern int diagLogging;
extern unsigned lenForNewNsFiles;
extern int lockFile;
- extern bool checkNsFilesOnLoad;
+ extern bool checkNsFilesOnLoad;
extern string repairpath;
void setupSignals( bool inFork );
@@ -76,7 +76,7 @@ namespace mongo {
static bool forceRepair = 0;
Timer startupSrandTimer;
- const char *ourgetns() {
+ const char *ourgetns() {
Client *c = currentClient.get();
if ( ! c )
return "";
@@ -99,7 +99,7 @@ namespace mongo {
OurListener(const string &ip, int p) : Listener(ip, p) { }
virtual void accepted(MessagingPort *mp) {
- if ( ! connTicketHolder.tryAcquire() ){
+ if ( ! connTicketHolder.tryAcquire() ) {
log() << "connection refused because too many open connections: " << connTicketHolder.used() << " of " << connTicketHolder.outof() << endl;
// TODO: would be nice if we notified them...
mp->shutdown();
@@ -110,12 +110,12 @@ namespace mongo {
try {
boost::thread thr(boost::bind(&connThread,mp));
}
- catch ( boost::thread_resource_error& ){
+ catch ( boost::thread_resource_error& ) {
log() << "can't create new thread, closing connection" << endl;
mp->shutdown();
delete mp;
}
- catch ( ... ){
+ catch ( ... ) {
log() << "unkonwn exception starting connThread" << endl;
mp->shutdown();
delete mp;
@@ -123,14 +123,14 @@ namespace mongo {
}
};
-/* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */
+ /* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */
// QueryOption_Exhaust
#define TESTEXHAUST 0
#if( TESTEXHAUST )
- void testExhaust() {
+ void testExhaust() {
sleepsecs(1);
unsigned n = 0;
- auto f = [&n](const BSONObj& o) {
+ auto f = [&n](const BSONObj& o) {
assert( o.valid() );
//cout << o << endl;
n++;
@@ -142,20 +142,20 @@ namespace mongo {
db.connect("localhost");
const char *ns = "local.foo";
if( db.count(ns) < 10000 )
- for( int i = 0; i < 20000; i++ )
+ for( int i = 0; i < 20000; i++ )
db.insert(ns, BSON("aaa" << 3 << "b" << "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
try {
db.query(f, ns, Query() );
}
- catch(...) {
+ catch(...) {
cout << "hmmm" << endl;
}
try {
db.query(f, ns, Query() );
}
- catch(...) {
+ catch(...) {
cout << "caught" << endl;
}
@@ -200,8 +200,7 @@ namespace mongo {
app server will open a pool of threads.
todo: one day, asio...
*/
- void connThread( MessagingPort * inPort )
- {
+ void connThread( MessagingPort * inPort ) {
TicketHolderReleaser connTicketReleaser( &connTicketHolder );
/* todo: move to Client object */
@@ -222,7 +221,7 @@ namespace mongo {
if ( !dbMsgPort->recv(m) ) {
if( !cmdLine.quiet )
- log() << "end connection " << dbMsgPort->farEnd.toString() << endl;
+ log() << "end connection " << dbMsgPort->farEnd.toString() << endl;
dbMsgPort->shutdown();
break;
}
@@ -231,7 +230,7 @@ sendmore:
log() << "got request after shutdown()" << endl;
break;
}
-
+
lastError.startRequest( m , le );
DbResponse dbresponse;
@@ -251,7 +250,7 @@ sendmore:
if ( dbresponse.response ) {
dbMsgPort->reply(m, *dbresponse.response, dbresponse.responseTo);
- if( dbresponse.exhaust ) {
+ if( dbresponse.exhaust ) {
MsgData *header = dbresponse.response->header();
QueryResult *qr = (QueryResult *) header;
long long cursorid = qr->cursorId;
@@ -294,7 +293,7 @@ sendmore:
}
catch ( const ClockSkewException & ) {
exitCleanly( EXIT_CLOCK_SKEW );
- }
+ }
catch ( std::exception &e ) {
problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
dbexit( EXIT_UNCAUGHT );
@@ -317,7 +316,7 @@ sendmore:
SockAddr db(address, port);
MessagingPort p;
- if ( !p.connect(db) ){
+ if ( !p.connect(db) ) {
log() << "msg couldn't connect" << endl;
return;
}
@@ -346,40 +345,40 @@ sendmore:
msg(m, "127.0.0.1", CmdLine::DefaultDBPort, extras);
}
- bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ){
+ bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ) {
static DBDirectClient db;
-
- if ( h->version == 4 && h->versionMinor == 4 ){
+
+ if ( h->version == 4 && h->versionMinor == 4 ) {
assert( VERSION == 4 );
assert( VERSION_MINOR == 5 );
-
+
list<string> colls = db.getCollectionNames( dbName );
- for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++){
+ for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) {
string c = *i;
log() << "\t upgrading collection:" << c << endl;
BSONObj out;
bool ok = db.runCommand( dbName , BSON( "reIndex" << c.substr( dbName.size() + 1 ) ) , out );
- if ( ! ok ){
+ if ( ! ok ) {
errmsg = "reindex failed";
log() << "\t\t reindex failed: " << out << endl;
return false;
}
}
-
+
h->versionMinor = 5;
return true;
}
-
+
// do this in the general case
return repairDatabase( dbName.c_str(), errmsg );
}
-
+
// ran at startup.
static void repairDatabasesAndCheckVersion() {
- // LastError * le = lastError.get( true );
+ // LastError * le = lastError.get( true );
Client::GodScope gs;
log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
-
+
//assert(checkNsFilesOnLoad);
checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
@@ -402,7 +401,7 @@ sendmore:
log() << "****" << endl;
log() << "need to upgrade database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", "
<< "new version: " << VERSION << "." << VERSION_MINOR << endl;
- if ( shouldRepairDatabases ){
+ if ( shouldRepairDatabases ) {
// QUESTION: Repair even if file format is higher version than code?
log() << "\t starting upgrade" << endl;
string errmsg;
@@ -416,14 +415,15 @@ sendmore:
shouldRepairDatabases = 1;
return;
}
- } else {
+ }
+ else {
Database::closeDatabase( dbName.c_str(), dbpath );
}
}
log(1) << "done repairDatabases" << endl;
- if ( shouldRepairDatabases ){
+ if ( shouldRepairDatabases ) {
log() << "finished checking dbs" << endl;
cc().shutdown();
dbexit( EXIT_CLEAN );
@@ -438,11 +438,11 @@ sendmore:
i != boost::filesystem::directory_iterator(); ++i ) {
string fileName = boost::filesystem::path(*i).leaf();
if ( boost::filesystem::is_directory( *i ) &&
- fileName.length() && fileName[ 0 ] == '$' )
+ fileName.length() && fileName[ 0 ] == '$' )
boost::filesystem::remove_all( *i );
}
}
-
+
void clearTmpCollections() {
Client::GodScope gs;
vector< string > toDelete;
@@ -457,38 +457,38 @@ sendmore:
cli.dropCollection( *i );
}
}
-
+
void flushDiagLog();
-
+
/**
* does background async flushes of mmapped files
*/
class DataFileSync : public BackgroundJob {
public:
string name() const { return "DataFileSync"; }
- void run(){
+ void run() {
if( cmdLine.syncdelay == 0 )
log() << "warning: --syncdelay 0 is not recommended and can have strange performance" << endl;
- else if( cmdLine.syncdelay == 1 )
+ else if( cmdLine.syncdelay == 1 )
log() << "--syncdelay 1" << endl;
else if( cmdLine.syncdelay != 60 )
log(1) << "--syncdelay " << cmdLine.syncdelay << endl;
int time_flushing = 0;
- while ( ! inShutdown() ){
+ while ( ! inShutdown() ) {
flushDiagLog();
- if ( cmdLine.syncdelay == 0 ){
+ if ( cmdLine.syncdelay == 0 ) {
// in case at some point we add an option to change at runtime
sleepsecs(5);
continue;
}
sleepmillis( (long long) std::max(0.0, (cmdLine.syncdelay * 1000) - time_flushing) );
-
- if ( inShutdown() ){
+
+ if ( inShutdown() ) {
// occasional issue trying to flush during shutdown when sleep interrupted
break;
}
-
+
Date_t start = jsTime();
int numFiles = MemoryMappedFile::flushAll( true );
time_flushing = (int) (jsTime() - start);
@@ -505,11 +505,11 @@ sendmore:
// should be safe to interrupt in js code, even if we have a write lock
return killCurrentOp.checkForInterruptNoAssert( false );
}
-
+
unsigned jsGetInterruptSpecCallback() {
return cc().curop()->opNum();
}
-
+
void _initAndListen(int listenPort, const char *appserverLoc = NULL) {
bool is32bit = sizeof(int*) == 4;
@@ -542,7 +542,7 @@ sendmore:
ss << "repairpath (" << repairpath << ") does not exist";
uassert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) );
}
-
+
acquirePathLock();
remove_all( dbpath + "/_tmp/" );
@@ -555,7 +555,7 @@ sendmore:
dur::startup();
- if( cmdLine.durOptions & CmdLine::DurRecoverOnly )
+ if( cmdLine.durOptions & CmdLine::DurRecoverOnly )
return;
// comes after getDur().startup() because this reads from the database
@@ -573,7 +573,7 @@ sendmore:
/* we didn't want to pre-open all fiels for the repair check above. for regular
operation we do for read/write lock concurrency reasons.
- */
+ */
Database::_openAllFiles = true;
if ( shouldRepairDatabases )
@@ -605,7 +605,7 @@ sendmore:
log() << "exception in initAndListen std::exception: " << e.what() << ", terminating" << endl;
dbexit( EXIT_UNCAUGHT );
}
- catch ( int& n ){
+ catch ( int& n ) {
log() << "exception in initAndListen int: " << n << ", terminating" << endl;
dbexit( EXIT_UNCAUGHT );
}
@@ -615,13 +615,13 @@ sendmore:
}
}
- #if defined(_WIN32)
+#if defined(_WIN32)
bool initService() {
ServiceController::reportStatus( SERVICE_RUNNING );
initAndListen( cmdLine.port, appsrvPath );
return true;
}
- #endif
+#endif
} // namespace mongo
@@ -655,15 +655,14 @@ string arg_error_check(int argc, char* argv[]) {
return "";
}
-int main(int argc, char* argv[])
-{
+int main(int argc, char* argv[]) {
static StaticObserver staticObserver;
getcurns = ourgetns;
po::options_description general_options("General options");
- #if defined(_WIN32)
- po::options_description windows_scm_options("Windows Service Control Manager options");
- #endif
+#if defined(_WIN32)
+ po::options_description windows_scm_options("Windows Service Control Manager options");
+#endif
po::options_description replication_options("Replication options");
po::options_description ms_options("Master/slave options");
po::options_description rs_options("Replica set options");
@@ -676,87 +675,87 @@ int main(int argc, char* argv[])
CmdLine::addGlobalOptions( general_options , hidden_options );
general_options.add_options()
- ("dbpath", po::value<string>() , "directory for datafiles")
- ("directoryperdb", "each database will be stored in a separate directory")
- ("repairpath", po::value<string>() , "root directory for repair files - defaults to dbpath" )
- ("cpu", "periodically show cpu and iowait utilization")
- ("noauth", "run without security")
- ("auth", "run with security")
- ("objcheck", "inspect client data for validity on receipt")
- ("quota", "limits each database to a certain number of files (8 default)")
- ("quotaFiles", po::value<int>(), "number of files allower per db, requires --quota")
- ("appsrvpath", po::value<string>(), "root directory for the babble app server")
- ("nocursors", "diagnostic/debugging option")
- ("nohints", "ignore query hints")
- ("nohttpinterface", "disable http interface")
- ("rest","turn on simple rest api")
- ("jsonp","allow JSONP access via http (has security implications)")
- ("noscripting", "disable scripting engine")
- ("noprealloc", "disable data file preallocation - will often hurt performance")
- ("smallfiles", "use a smaller default file size")
- ("nssize", po::value<int>()->default_value(16), ".ns file size (in MB) for new databases")
- ("diaglog", po::value<int>(), "0=off 1=W 2=R 3=both 7=W+some reads")
- ("sysinfo", "print some diagnostic system information")
- ("upgrade", "upgrade db if needed")
- ("repair", "run repair on all dbs")
- ("notablescan", "do not allow table scans")
- ("syncdelay",po::value<double>(&cmdLine.syncdelay)->default_value(60), "seconds between disk syncs (0=never, but not recommended)")
- ("profile",po::value<int>(), "0=off 1=slow, 2=all")
- ("slowms",po::value<int>(&cmdLine.slowMS)->default_value(100), "value of slow for profile and console log" )
- ("maxConns",po::value<int>(), "max number of simultaneous connections")
- #if !defined(_WIN32)
- ("nounixsocket", "disable listening on unix sockets")
- #endif
- ("ipv6", "enable IPv6 support (disabled by default)")
- ;
+ ("dbpath", po::value<string>() , "directory for datafiles")
+ ("directoryperdb", "each database will be stored in a separate directory")
+ ("repairpath", po::value<string>() , "root directory for repair files - defaults to dbpath" )
+ ("cpu", "periodically show cpu and iowait utilization")
+ ("noauth", "run without security")
+ ("auth", "run with security")
+ ("objcheck", "inspect client data for validity on receipt")
+ ("quota", "limits each database to a certain number of files (8 default)")
+ ("quotaFiles", po::value<int>(), "number of files allower per db, requires --quota")
+ ("appsrvpath", po::value<string>(), "root directory for the babble app server")
+ ("nocursors", "diagnostic/debugging option")
+ ("nohints", "ignore query hints")
+ ("nohttpinterface", "disable http interface")
+ ("rest","turn on simple rest api")
+ ("jsonp","allow JSONP access via http (has security implications)")
+ ("noscripting", "disable scripting engine")
+ ("noprealloc", "disable data file preallocation - will often hurt performance")
+ ("smallfiles", "use a smaller default file size")
+ ("nssize", po::value<int>()->default_value(16), ".ns file size (in MB) for new databases")
+ ("diaglog", po::value<int>(), "0=off 1=W 2=R 3=both 7=W+some reads")
+ ("sysinfo", "print some diagnostic system information")
+ ("upgrade", "upgrade db if needed")
+ ("repair", "run repair on all dbs")
+ ("notablescan", "do not allow table scans")
+ ("syncdelay",po::value<double>(&cmdLine.syncdelay)->default_value(60), "seconds between disk syncs (0=never, but not recommended)")
+ ("profile",po::value<int>(), "0=off 1=slow, 2=all")
+ ("slowms",po::value<int>(&cmdLine.slowMS)->default_value(100), "value of slow for profile and console log" )
+ ("maxConns",po::value<int>(), "max number of simultaneous connections")
+#if !defined(_WIN32)
+ ("nounixsocket", "disable listening on unix sockets")
+#endif
+ ("ipv6", "enable IPv6 support (disabled by default)")
+ ;
#if defined(_WIN32)
CmdLine::addWindowsOptions( windows_scm_options, hidden_options );
#endif
- replication_options.add_options()
- ("fastsync", "indicate that this instance is starting from a dbpath snapshot of the repl peer")
- ("autoresync", "automatically resync if slave data is stale")
- ("oplogSize", po::value<int>(), "size limit (in MB) for op log")
- ;
-
- ms_options.add_options()
- ("master", "master mode")
- ("slave", "slave mode")
- ("source", po::value<string>(), "when slave: specify master as <server:port>")
- ("only", po::value<string>(), "when slave: specify a single database to replicate")
- ("slavedelay", po::value<int>(), "specify delay (in seconds) to be used when applying master ops to slave")
- ;
-
- rs_options.add_options()
- ("replSet", po::value<string>(), "arg is <setname>[/<optionalseedhostlist>]")
- ;
-
- sharding_options.add_options()
- ("configsvr", "declare this is a config db of a cluster; default port 27019; default dir /data/configdb")
- ("shardsvr", "declare this is a shard db of a cluster; default port 27018")
- ("noMoveParanoia" , "turn off paranoid saving of data for moveChunk. this is on by default for now, but default will switch" )
- ;
+ replication_options.add_options()
+ ("fastsync", "indicate that this instance is starting from a dbpath snapshot of the repl peer")
+ ("autoresync", "automatically resync if slave data is stale")
+ ("oplogSize", po::value<int>(), "size limit (in MB) for op log")
+ ;
+
+ ms_options.add_options()
+ ("master", "master mode")
+ ("slave", "slave mode")
+ ("source", po::value<string>(), "when slave: specify master as <server:port>")
+ ("only", po::value<string>(), "when slave: specify a single database to replicate")
+ ("slavedelay", po::value<int>(), "specify delay (in seconds) to be used when applying master ops to slave")
+ ;
+
+ rs_options.add_options()
+ ("replSet", po::value<string>(), "arg is <setname>[/<optionalseedhostlist>]")
+ ;
+
+ sharding_options.add_options()
+ ("configsvr", "declare this is a config db of a cluster; default port 27019; default dir /data/configdb")
+ ("shardsvr", "declare this is a shard db of a cluster; default port 27018")
+ ("noMoveParanoia" , "turn off paranoid saving of data for moveChunk. this is on by default for now, but default will switch" )
+ ;
hidden_options.add_options()
- ("pretouch", po::value<int>(), "n pretouch threads for applying replicationed operations")
- ("command", po::value< vector<string> >(), "command")
- ("cacheSize", po::value<long>(), "cache size (in MB) for rec store")
- // these move to unhidden later:
- ("dur", "enable journaling")
- ("durOptions", po::value<int>(), "durability diagnostic options")
- ("opIdMem", po::value<long>(), "size limit (in bytes) for in memory storage of op ids for replica pairs DEPRECATED")
- ("pairwith", po::value<string>(), "address of server to pair with DEPRECATED")
- ("arbiter", po::value<string>(), "address of replica pair arbiter server DEPRECATED")
- ("nodur", "disable journaling (currently the default)")
- ;
+ ("pretouch", po::value<int>(), "n pretouch threads for applying replicationed operations")
+ ("command", po::value< vector<string> >(), "command")
+ ("cacheSize", po::value<long>(), "cache size (in MB) for rec store")
+ // these move to unhidden later:
+ ("dur", "enable journaling")
+ ("durOptions", po::value<int>(), "durability diagnostic options")
+ ("opIdMem", po::value<long>(), "size limit (in bytes) for in memory storage of op ids for replica pairs DEPRECATED")
+ ("pairwith", po::value<string>(), "address of server to pair with DEPRECATED")
+ ("arbiter", po::value<string>(), "address of replica pair arbiter server DEPRECATED")
+ ("nodur", "disable journaling (currently the default)")
+ ;
positional_options.add("command", 3);
visible_options.add(general_options);
- #if defined(_WIN32)
- visible_options.add(windows_scm_options);
- #endif
+#if defined(_WIN32)
+ visible_options.add(windows_scm_options);
+#endif
visible_options.add(replication_options);
visible_options.add(ms_options);
visible_options.add(rs_options);
@@ -787,7 +786,7 @@ int main(int argc, char* argv[])
{
po::variables_map params;
-
+
string error_message = arg_error_check(argc, argv);
if (error_message != "") {
cout << error_message << endl << endl;
@@ -831,10 +830,10 @@ int main(int argc, char* argv[])
cmdLine.quota = true;
cmdLine.quotaFiles = params["quotaFiles"].as<int>() - 1;
}
- if( params.count("nodur") ) {
+ if( params.count("nodur") ) {
cmdLine.dur = false;
}
- if( params.count("dur") ) {
+ if( params.count("dur") ) {
cmdLine.dur = true;
log() << "***** WARNING --dur should not be used yet except for testing" << endl;
}
@@ -854,7 +853,8 @@ int main(int argc, char* argv[])
out() << "repairpath has to be non-zero" << endl;
dbexit( EXIT_BADOPTIONS );
}
- } else {
+ }
+ else {
repairpath = dbpath;
}
if (params.count("nocursors")) {
@@ -923,14 +923,15 @@ int main(int argc, char* argv[])
/* specifies what the source in local.sources should be */
cmdLine.source = params["source"].as<string>().c_str();
}
- if( params.count("pretouch") ) {
+ if( params.count("pretouch") ) {
cmdLine.pretouch = params["pretouch"].as<int>();
}
if (params.count("replSet")) {
if (params.count("slavedelay")) {
out() << "--slavedelay cannot be used with --replSet" << endl;
dbexit( EXIT_BADOPTIONS );
- } else if (params.count("only")) {
+ }
+ else if (params.count("only")) {
out() << "--only cannot be used with --replSet" << endl;
dbexit( EXIT_BADOPTIONS );
}
@@ -944,16 +945,18 @@ int main(int argc, char* argv[])
cout << "***********************************\n"
<< "WARNING WARNING WARNING\n"
<< " replica pairs are deprecated\n"
- << " see: http://www.mongodb.org/display/DOCS/Replica+Pairs \n"
+ << " see: http://www.mongodb.org/display/DOCS/Replica+Pairs \n"
<< "***********************************" << endl;
string paired = params["pairwith"].as<string>();
if (params.count("arbiter")) {
string arbiter = params["arbiter"].as<string>();
pairWith(paired.c_str(), arbiter.c_str());
- } else {
+ }
+ else {
pairWith(paired.c_str(), "-");
}
- } else if (params.count("arbiter")) {
+ }
+ else if (params.count("arbiter")) {
out() << "specifying --arbiter without --pairwith" << endl;
dbexit( EXIT_BADOPTIONS );
}
@@ -973,7 +976,7 @@ int main(int argc, char* argv[])
dbexit( EXIT_BADOPTIONS );
}
// note a small size such as x==1 is ok for an arbiter.
- if( x > 1000 && sizeof(void*) == 4 ) {
+ if( x > 1000 && sizeof(void*) == 4 ) {
out() << "--oplogSize of " << x << "MB is too big for 32 bit version. Use 64 bit build instead." << endl;
dbexit( EXIT_BADOPTIONS );
}
@@ -997,20 +1000,20 @@ int main(int argc, char* argv[])
}
log() << "--cacheSize option not currently supported" << endl;
}
- if (params.count("port") == 0 ) {
+ if (params.count("port") == 0 ) {
if( params.count("configsvr") ) {
cmdLine.port = CmdLine::ConfigServerPort;
}
if( params.count("shardsvr") )
cmdLine.port = CmdLine::ShardServerPort;
}
- else {
- if ( cmdLine.port <= 0 || cmdLine.port > 65535 ){
+ else {
+ if ( cmdLine.port <= 0 || cmdLine.port > 65535 ) {
out() << "bad --port number" << endl;
dbexit( EXIT_BADOPTIONS );
}
}
- if ( params.count("configsvr" ) ){
+ if ( params.count("configsvr" ) ) {
if (cmdLine.usingReplSets() || replSettings.master || replSettings.slave) {
log() << "replication should not be enabled on a config server" << endl;
::exit(-1);
@@ -1020,10 +1023,10 @@ int main(int argc, char* argv[])
if ( params.count( "dbpath" ) == 0 )
dbpath = "/data/configdb";
}
- if ( params.count( "profile" ) ){
+ if ( params.count( "profile" ) ) {
cmdLine.defaultProfile = params["profile"].as<int>();
}
- if ( params.count( "maxConns" ) ){
+ if ( params.count( "maxConns" ) ) {
int newSize = params["maxConns"].as<int>();
if ( newSize < 5 ) {
out() << "maxConns has to be at least 5" << endl;
@@ -1031,17 +1034,17 @@ int main(int argc, char* argv[])
}
else if ( newSize >= 10000000 ) {
out() << "maxConns can't be greater than 10000000" << endl;
- dbexit( EXIT_BADOPTIONS );
+ dbexit( EXIT_BADOPTIONS );
}
connTicketHolder.resize( newSize );
}
- if (params.count("nounixsocket")){
+ if (params.count("nounixsocket")) {
noUnixSocket = true;
}
- if (params.count("ipv6")){
+ if (params.count("ipv6")) {
enableIPv6();
}
- if (params.count("noMoveParanoia")){
+ if (params.count("noMoveParanoia")) {
cmdLine.moveParanoia = false;
}
@@ -1087,7 +1090,7 @@ int main(int argc, char* argv[])
}
if( cmdLine.pretouch )
- log() << "--pretouch " << cmdLine.pretouch << endl;
+ log() << "--pretouch " << cmdLine.pretouch << endl;
#if defined(_WIN32)
if (serviceParamsCheck( params, dbpath, argc, argv )) {
@@ -1112,7 +1115,7 @@ namespace mongo {
{
dblock lk;
log() << "now exiting" << endl;
- dbexit( code );
+ dbexit( code );
}
}
@@ -1169,8 +1172,8 @@ namespace mongo {
printStackTrace();
abort();
}
-
- void setupSignals_ignoreHelper( int signal ){}
+
+ void setupSignals_ignoreHelper( int signal ) {}
void setupSignals( bool inFork ) {
assert( signal(SIGSEGV, abruptQuit) != SIG_ERR );
@@ -1193,49 +1196,47 @@ namespace mongo {
sigaddset( &asyncSignals, SIGTERM );
assert( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
boost::thread it( interruptThread );
-
+
set_terminate( myterminate );
}
#else
-void ctrlCTerminate() {
- log() << "got kill or ctrl-c signal, will terminate after current cmd ends" << endl;
- Client::initThread( "ctrlCTerminate" );
- exitCleanly( EXIT_KILL );
-}
-BOOL CtrlHandler( DWORD fdwCtrlType )
-{
- switch( fdwCtrlType )
- {
- case CTRL_C_EVENT:
- rawOut("Ctrl-C signal");
- ctrlCTerminate();
- return( TRUE );
- case CTRL_CLOSE_EVENT:
- rawOut("CTRL_CLOSE_EVENT signal");
- ctrlCTerminate();
- return( TRUE );
- case CTRL_BREAK_EVENT:
- rawOut("CTRL_BREAK_EVENT signal");
- ctrlCTerminate();
- return TRUE;
- case CTRL_LOGOFF_EVENT:
- rawOut("CTRL_LOGOFF_EVENT signal (ignored)");
- return FALSE;
- case CTRL_SHUTDOWN_EVENT:
- rawOut("CTRL_SHUTDOWN_EVENT signal (ignored)");
- return FALSE;
- default:
- return FALSE;
+ void ctrlCTerminate() {
+ log() << "got kill or ctrl-c signal, will terminate after current cmd ends" << endl;
+ Client::initThread( "ctrlCTerminate" );
+ exitCleanly( EXIT_KILL );
+ }
+ BOOL CtrlHandler( DWORD fdwCtrlType ) {
+ switch( fdwCtrlType ) {
+ case CTRL_C_EVENT:
+ rawOut("Ctrl-C signal");
+ ctrlCTerminate();
+ return( TRUE );
+ case CTRL_CLOSE_EVENT:
+ rawOut("CTRL_CLOSE_EVENT signal");
+ ctrlCTerminate();
+ return( TRUE );
+ case CTRL_BREAK_EVENT:
+ rawOut("CTRL_BREAK_EVENT signal");
+ ctrlCTerminate();
+ return TRUE;
+ case CTRL_LOGOFF_EVENT:
+ rawOut("CTRL_LOGOFF_EVENT signal (ignored)");
+ return FALSE;
+ case CTRL_SHUTDOWN_EVENT:
+ rawOut("CTRL_SHUTDOWN_EVENT signal (ignored)");
+ return FALSE;
+ default:
+ return FALSE;
+ }
}
-}
void myPurecallHandler() {
rawOut( "pure virtual method called, printing stack:" );
printStackTrace();
- abort();
+ abort();
}
-
+
void setupSignals( bool inFork ) {
if( SetConsoleCtrlHandler( (PHANDLER_ROUTINE) CtrlHandler, TRUE ) )
;
diff --git a/db/db.h b/db/db.h
index 90f94a62596..7ef7d03af1e 100644
--- a/db/db.h
+++ b/db/db.h
@@ -43,29 +43,29 @@ namespace mongo {
if ( x == _paths.end() )
return false;
const DBs& m = x->second;
-
+
string db = _todb( ns );
DBs::const_iterator it = m.find(db);
return it != m.end();
}
-
+
Database * get( const string& ns , const string& path ) const {
dbMutex.assertAtLeastReadLocked();
Paths::const_iterator x = _paths.find( path );
if ( x == _paths.end() )
return 0;
const DBs& m = x->second;
-
+
string db = _todb( ns );
DBs::const_iterator it = m.find(db);
- if ( it != m.end() )
+ if ( it != m.end() )
return it->second;
return 0;
}
-
- void put( const string& ns , const string& path , Database * db ){
+
+ void put( const string& ns , const string& path , Database * db ) {
dbMutex.assertWriteLocked();
DBs& m = _paths[path];
Database*& d = m[_todb(ns)];
@@ -73,10 +73,10 @@ namespace mongo {
_size++;
d = db;
}
-
+
Database* getOrCreate( const string& ns , const string& path , bool& justCreated );
- void erase( const string& ns , const string& path ){
+ void erase( const string& ns , const string& path ) {
dbMutex.assertWriteLocked();
DBs& m = _paths[path];
_size -= (int)m.erase( _todb( ns ) );
@@ -85,54 +85,54 @@ namespace mongo {
/* force - force close even if something underway - use at shutdown */
bool closeAll( const string& path , BSONObjBuilder& result, bool force );
- int size(){
+ int size() {
return _size;
}
-
+
void forEach(boost::function<void(Database *)> f) const {
dbMutex.assertAtLeastReadLocked();
- for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ){
+ for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ) {
DBs m = i->second;
- for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ){
+ for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ) {
f(j->second);
}
}
- }
+ }
/**
* gets all unique db names, ignoring paths
*/
void getAllShortNames( set<string>& all ) const {
dbMutex.assertAtLeastReadLocked();
- for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ){
+ for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ) {
DBs m = i->second;
- for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ){
+ for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ) {
all.insert( j->first );
}
}
}
private:
-
+
string _todb( const string& ns ) const {
string d = __todb( ns );
- uassert( 13280 , (string)"invalid db name: " + ns , Database::validDBName( d ) );
+ uassert( 13280 , (string)"invalid db name: " + ns , Database::validDBName( d ) );
return d;
}
string __todb( const string& ns ) const {
size_t i = ns.find( '.' );
- if ( i == string::npos ){
+ if ( i == string::npos ) {
uassert( 13074 , "db name can't be empty" , ns.size() );
return ns;
}
uassert( 13075 , "db name can't be empty" , i > 0 );
return ns.substr( 0 , i );
}
-
+
Paths _paths;
int _size;
-
+
};
extern DatabaseHolder dbHolder;
@@ -140,22 +140,22 @@ namespace mongo {
struct dbtemprelease {
Client::Context * _context;
int _locktype;
-
+
dbtemprelease() {
_context = cc().getContext();
_locktype = dbMutex.getState();
assert( _locktype );
-
+
if ( _locktype > 0 ) {
- massert( 10298 , "can't temprelease nested write lock", _locktype == 1);
+ massert( 10298 , "can't temprelease nested write lock", _locktype == 1);
if ( _context ) _context->unlocked();
dbMutex.unlock();
- }
+ }
else {
- massert( 10299 , "can't temprelease nested read lock", _locktype == -1);
+ massert( 10299 , "can't temprelease nested read lock", _locktype == -1);
if ( _context ) _context->unlocked();
dbMutex.unlock_shared();
- }
+ }
}
~dbtemprelease() {
@@ -163,11 +163,11 @@ namespace mongo {
dbMutex.lock();
else
dbMutex.lock_shared();
-
+
if ( _context ) _context->relocked();
}
};
-
+
/**
only does a temp release if we're not nested and have a lock
@@ -175,22 +175,22 @@ namespace mongo {
struct dbtempreleasecond {
dbtemprelease * real;
int locktype;
-
- dbtempreleasecond(){
+
+ dbtempreleasecond() {
real = 0;
locktype = dbMutex.getState();
if ( locktype == 1 || locktype == -1 )
real = new dbtemprelease();
}
-
- ~dbtempreleasecond(){
- if ( real ){
+
+ ~dbtempreleasecond() {
+ if ( real ) {
delete real;
real = 0;
}
}
-
- bool unlocked(){
+
+ bool unlocked() {
return real > 0;
}
};
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index ce9f0ef3ddf..aae59829007 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -56,7 +56,7 @@ namespace mongo {
*/
class CmdResetError : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() {
return false;
@@ -76,8 +76,8 @@ namespace mongo {
}
} cmdResetError;
- /* set by replica sets if specified in the configuration.
- a pointer is used to avoid any possible locking issues with lockless reading (see below locktype() is NONE
+ /* set by replica sets if specified in the configuration.
+ a pointer is used to avoid any possible locking issues with lockless reading (see below locktype() is NONE
and would like to keep that)
(for now, it simply orphans any old copy as config changes should be extremely rare).
note: once non-null, never goes to null again.
@@ -87,37 +87,37 @@ namespace mongo {
class CmdGetLastError : public Command {
public:
CmdGetLastError() : Command("getLastError", false, "getlasterror") { }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() { return false; }
virtual bool slaveOk() const { return true; }
virtual void help( stringstream& help ) const {
help << "return error status of the last operation on this connection\n"
- << "options:\n"
+ << "options:\n"
<< " fsync - fsync before returning, or wait for journal commit if running with --dur\n"
<< " w - await replication to w servers (including self) before returning\n"
<< " wtimeout - timeout for w in milliseconds";
}
bool run(const string& dbname, BSONObj& _cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
LastError *le = lastError.disableForCommand();
-
+
bool err = false;
if ( le->nPrev != 1 )
err = LastError::noError.appendSelf( result , false );
else
err = le->appendSelf( result , false );
-
+
Client& c = cc();
c.appendLastOp( result );
-
+
result.appendNumber( "connectionId" , c.getConnectionId() );
BSONObj cmdObj = _cmdObj;
- {
+ {
BSONObj::iterator i(_cmdObj);
i.next();
- if( !i.more() ) {
+ if( !i.more() ) {
/* empty, use default */
BSONObj *def = getLastErrorDefault;
if( def )
@@ -132,7 +132,7 @@ namespace mongo {
log() << "fsync from getlasterror" << endl;
result.append( "fsyncFiles" , MemoryMappedFile::flushAll( true ) );
}
- else {
+ else {
// this perhaps is temp. how long we wait for the group commit to occur.
result.append( "waited", t.millis() );
}
@@ -145,33 +145,33 @@ namespace mongo {
}
BSONElement e = cmdObj["w"];
- if ( e.isNumber() ){
+ if ( e.isNumber() ) {
int timeout = cmdObj["wtimeout"].numberInt();
Timer t;
int w = e.numberInt();
-
+
long long passes = 0;
char buf[32];
- while ( 1 ){
+ while ( 1 ) {
OpTime op(c.getLastOp());
if ( opReplicatedEnough( op, w ) )
break;
-
+
// if replication isn't enabled (e.g., config servers)
- if ( ! anyReplEnabled() ){
+ if ( ! anyReplEnabled() ) {
errmsg = "replication not enabled";
result.append( "err", "norepl" );
return true;
}
-
- if ( op.isNull() ){
+
+ if ( op.isNull() ) {
result.append( "err" , "no write has been done on this connection" );
return true;
}
- if ( timeout > 0 && t.millis() >= timeout ){
+ if ( timeout > 0 && t.millis() >= timeout ) {
result.append( "wtimeout" , true );
errmsg = "timed out waiting for slaves";
result.append( "waited" , t.millis() );
@@ -186,7 +186,7 @@ namespace mongo {
}
result.appendNumber( "wtime" , t.millis() );
}
-
+
result.appendNull( "err" );
return true;
}
@@ -194,7 +194,7 @@ namespace mongo {
class CmdGetPrevError : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool logTheOp() {
return false;
@@ -228,7 +228,7 @@ namespace mongo {
virtual bool slaveOk() const {
return false;
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
CmdDropDatabase() : Command("dropDatabase") {}
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
@@ -253,7 +253,7 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "repair database. also compacts. note: slow.";
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
CmdRepairDatabase() : Command("repairDatabase") {}
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
@@ -268,7 +268,7 @@ namespace mongo {
return repairDatabase( dbname, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
}
} cmdRepairDatabase;
-
+
/* set db profiling level
todo: how do we handle profiling information put in the db with replication?
sensibly or not?
@@ -285,7 +285,7 @@ namespace mongo {
help << "-1 to get current values\n";
help << "http://www.mongodb.org/display/DOCS/Database+Profiler";
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
CmdProfile() : Command("profile") {}
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONElement e = cmdObj.firstElement();
@@ -294,7 +294,7 @@ namespace mongo {
int p = (int) e.number();
bool ok = false;
-
+
if ( p == -1 )
ok = true;
else if ( p >= 0 && p <= 2 ) {
@@ -304,7 +304,7 @@ namespace mongo {
BSONElement slow = cmdObj["slowms"];
if ( slow.isNumber() )
cmdLine.slowMS = slow.numberInt();
-
+
return ok;
}
} cmdProfile;
@@ -317,8 +317,8 @@ namespace mongo {
CmdServerStatus() : Command("serverStatus", true) {
started = time(0);
}
-
- virtual LockType locktype() const { return NONE; }
+
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const {
help << "returns lots of administrative server statistics";
@@ -329,7 +329,7 @@ namespace mongo {
BSONObjBuilder timeBuilder(128);
- bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+ bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
result.append( "host" , prettyHostName() );
result.append("version", versionString);
@@ -348,7 +348,7 @@ namespace mongo {
t.append("totalTime", tt);
t.append("lockTime", tl);
t.append("ratio", (tt ? tl/tt : 0));
-
+
{
BSONObjBuilder ttt( t.subobjStart( "currentQueue" ) );
int w=0, r=0;
@@ -358,7 +358,7 @@ namespace mongo {
ttt.append( "writers" , w );
ttt.done();
}
-
+
{
BSONObjBuilder ttt( t.subobjStart( "activeClients" ) );
int w=0, r=0;
@@ -369,20 +369,20 @@ namespace mongo {
ttt.done();
}
-
+
result.append( "globalLock" , t.obj() );
}
timeBuilder.appendNumber( "after basic" , Listener::getElapsedTimeMillis() - start );
- if ( authed ){
-
+ if ( authed ) {
+
BSONObjBuilder t( result.subobjStart( "mem" ) );
-
+
t.append("bits", ( sizeof(int*) == 4 ? 32 : 64 ) );
ProcessInfo p;
- if ( p.supported() ){
+ if ( p.supported() ) {
t.appendNumber( "resident" , p.getResidentSize() );
t.appendNumber( "virtual" , p.getVirtualMemorySize() );
t.appendBool( "supported" , true );
@@ -391,14 +391,14 @@ namespace mongo {
result.append( "note" , "not all mem info support on this platform" );
t.appendBool( "supported" , false );
}
-
+
t.appendNumber( "mapped" , MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) );
t.done();
-
+
}
timeBuilder.appendNumber( "after is authed" , Listener::getElapsedTimeMillis() - start );
-
+
{
BSONObjBuilder bb( result.subobjStart( "connections" ) );
bb.append( "current" , connTicketHolder.used() );
@@ -406,15 +406,15 @@ namespace mongo {
bb.done();
}
timeBuilder.appendNumber( "after connections" , Listener::getElapsedTimeMillis() - start );
-
- if ( authed ){
+
+ if ( authed ) {
BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
bb.append("note", "fields vary by platform");
ProcessInfo p;
p.getExtraInfo(bb);
bb.done();
timeBuilder.appendNumber( "after extra info" , Listener::getElapsedTimeMillis() - start );
-
+
}
{
@@ -422,13 +422,13 @@ namespace mongo {
globalIndexCounters.append( bb );
bb.done();
}
-
+
{
BSONObjBuilder bb( result.subobjStart( "backgroundFlushing" ) );
globalFlushCounters.append( bb );
bb.done();
}
-
+
{
BSONObjBuilder bb( result.subobjStart( "cursors" ) );
ClientCursor::appendStats( bb );
@@ -441,24 +441,24 @@ namespace mongo {
bb.done();
}
-
- timeBuilder.appendNumber( "after counters" , Listener::getElapsedTimeMillis() - start );
- if ( anyReplEnabled() ){
+ timeBuilder.appendNumber( "after counters" , Listener::getElapsedTimeMillis() - start );
+
+ if ( anyReplEnabled() ) {
BSONObjBuilder bb( result.subobjStart( "repl" ) );
appendReplicationInfo( bb , authed , cmdObj["repl"].numberInt() );
bb.done();
- if ( ! _isMaster() ){
+ if ( ! _isMaster() ) {
result.append( "opcountersRepl" , replOpCounters.getObj() );
}
-
+
}
- timeBuilder.appendNumber( "after repl" , Listener::getElapsedTimeMillis() - start );
-
+ timeBuilder.appendNumber( "after repl" , Listener::getElapsedTimeMillis() - start );
+
result.append( "opcounters" , globalOpCounters.getObj() );
-
+
{
BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
asserts.append( "regular" , assertionCount.regular );
@@ -469,7 +469,7 @@ namespace mongo {
asserts.done();
}
- timeBuilder.appendNumber( "after asserts" , Listener::getElapsedTimeMillis() - start );
+ timeBuilder.appendNumber( "after asserts" , Listener::getElapsedTimeMillis() - start );
result.append( "writeBacksQueued" , ! writeBackManager.queuesEmpty() );
@@ -479,8 +479,8 @@ namespace mongo {
if ( ! authed )
result.append( "note" , "run against admin for more info" );
-
- if ( Listener::getElapsedTimeMillis() - start > 1000 ){
+
+ if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
BSONObj t = timeBuilder.obj();
log() << "serverStatus was very slow: " << t << endl;
result.append( "timing" , t );
@@ -497,7 +497,7 @@ namespace mongo {
return true;
}
virtual void help( stringstream& help ) const { help << "internal"; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
CmdGetOpTime() : Command("getoptime") { }
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
writelock l( "" );
@@ -527,7 +527,7 @@ namespace mongo {
return true;
}
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Monitoring+and+Diagnostics#MonitoringandDiagnostics-DatabaseRecord%2FReplay"; }
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
flushDiagLog();
@@ -540,7 +540,7 @@ namespace mongo {
/* remove bit from a bit array - actually remove its slot, not a clear
note: this function does not work with x == 63 -- that is ok
- but keep in mind in the future if max indexes were extended to
+ but keep in mind in the future if max indexes were extended to
exactly 64 it would be a problem
*/
unsigned long long removeBit(unsigned long long b, int x) {
@@ -583,7 +583,8 @@ namespace mongo {
for ( int i = 0; i < d->nIndexes; i++ ) {
if ( !mayDeleteIdIndex && d->idx(i).isIdIndex() ) {
idIndex = &d->idx(i);
- } else {
+ }
+ else {
d->idx(i).kill_idx();
}
}
@@ -596,9 +597,9 @@ namespace mongo {
/* assuming here that id index is not multikey: */
d->multiKeyIndexBits = 0;
assureSysIndexesEmptied(ns, idIndex);
- anObjBuilder.append("msg", mayDeleteIdIndex ?
- "indexes dropped for collection" :
- "non-_id indexes dropped for collection");
+ anObjBuilder.append("msg", mayDeleteIdIndex ?
+ "indexes dropped for collection" :
+ "non-_id indexes dropped for collection");
}
else {
// delete just one index
@@ -621,9 +622,10 @@ namespace mongo {
d->nIndexes--;
for ( int i = x; i < d->nIndexes; i++ )
d->idx(i) = d->idx(i+1);
- } else {
+ }
+ else {
int n = removeFromSysIndexes(ns, name); // just in case an orphaned listing there - i.e. should have been repaired but wasn't
- if( n ) {
+ if( n ) {
log() << "info: removeFromSysIndexes cleaned up " << n << " entries" << endl;
}
log() << "dropIndexes: " << name << " not found" << endl;
@@ -648,7 +650,7 @@ namespace mongo {
return false;
}
virtual void help( stringstream& help ) const { help << "drop a collection\n{drop : <collectionName>}"; }
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string nsToDrop = dbname + '.' + cmdObj.firstElement().valuestr();
NamespaceDetails *d = nsdetails(nsToDrop.c_str());
@@ -667,7 +669,7 @@ namespace mongo {
/* select count(*) */
class CmdCount : public Command {
public:
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
CmdCount() : Command("count") { }
virtual bool logTheOp() {
return false;
@@ -689,7 +691,7 @@ namespace mongo {
long long n = runCount(ns.c_str(), cmdObj, err);
long long nn = n;
bool ok = true;
- if ( n == -1 ){
+ if ( n == -1 ) {
nn = 0;
result.appendBool( "missing" , true );
}
@@ -717,7 +719,7 @@ namespace mongo {
virtual bool adminOnly() const {
return false;
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream& help ) const {
help << "create a collection";
}
@@ -740,7 +742,7 @@ namespace mongo {
virtual bool slaveOk() const {
return false;
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream& help ) const {
help << "drop indexes for a collection";
}
@@ -756,9 +758,9 @@ namespace mongo {
if ( f.type() == String ) {
return dropIndexes( d, toDeleteNs.c_str(), f.valuestr(), errmsg, anObjBuilder, false );
}
- else if ( f.type() == Object ){
+ else if ( f.type() == Object ) {
int idxId = d->findIndexByKeyPattern( f.embeddedObject() );
- if ( idxId < 0 ){
+ if ( idxId < 0 ) {
errmsg = "can't find index with key:";
errmsg += f.embeddedObject().toString();
return false;
@@ -785,7 +787,7 @@ namespace mongo {
public:
virtual bool logTheOp() { return false; } // only reindexes on the one node
virtual bool slaveOk() const { return true; } // can reindex on a secondary
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream& help ) const {
help << "re-index a collection";
}
@@ -799,7 +801,7 @@ namespace mongo {
tlog() << "CMD: reIndex " << toDeleteNs << endl;
BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs.c_str());
- if ( ! d ){
+ if ( ! d ) {
errmsg = "ns not found";
return false;
}
@@ -807,7 +809,7 @@ namespace mongo {
list<BSONObj> all;
auto_ptr<DBClientCursor> i = db.getIndexes( toDeleteNs );
BSONObjBuilder b;
- while ( i->more() ){
+ while ( i->more() ) {
BSONObj o = i->next().getOwned();
b.append( BSONObjBuilder::numStr( all.size() ) , o );
all.push_back( o );
@@ -815,12 +817,12 @@ namespace mongo {
bool ok = dropIndexes( d, toDeleteNs.c_str(), "*" , errmsg, result, true );
- if ( ! ok ){
+ if ( ! ok ) {
errmsg = "dropIndexes failed";
return false;
}
- for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ){
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
}
@@ -843,7 +845,7 @@ namespace mongo {
virtual bool adminOnly() const {
return true;
}
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream& help ) const { help << "list databases on this server"; }
CmdListDatabases() : Command("listDatabases" , true ) {}
bool run(const string& dbname , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
@@ -865,11 +867,11 @@ namespace mongo {
seen.insert( i->c_str() );
}
-
+
// TODO: erh 1/1/2010 I think this is broken where path != dbpath ??
set<string> allShortNames;
dbHolder.getAllShortNames( allShortNames );
- for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ){
+ for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ) {
string name = *i;
if ( seen.count( name ) )
@@ -889,15 +891,15 @@ namespace mongo {
}
} cmdListDatabases;
- /* note an access to a database right after this will open it back up - so this is mainly
- for diagnostic purposes.
+ /* note an access to a database right after this will open it back up - so this is mainly
+ for diagnostic purposes.
*/
class CmdCloseAllDatabases : public Command {
public:
virtual void help( stringstream& help ) const { help << "Close all database files.\nA new request will cause an immediate reopening; thus, this is mostly for testing purposes."; }
virtual bool adminOnly() const { return true; }
virtual bool slaveOk() const { return false; }
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
CmdCloseAllDatabases() : Command( "closeAllDatabases" ) {}
bool run(const string& dbname , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
@@ -907,15 +909,15 @@ namespace mongo {
class CmdFileMD5 : public Command {
public:
- CmdFileMD5() : Command( "filemd5" ){}
+ CmdFileMD5() : Command( "filemd5" ) {}
virtual bool slaveOk() const {
return true;
}
virtual void help( stringstream& help ) const {
help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
}
- virtual LockType locktype() const { return READ; }
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ virtual LockType locktype() const { return READ; }
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname;
ns += ".";
{
@@ -937,8 +939,8 @@ namespace mongo {
scoped_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns.c_str()));
int n = 0;
- while ( cursor->ok() ){
- if ( ! cursor->matcher()->matchesCurrent( cursor.get() ) ){
+ while ( cursor->ok() ) {
+ if ( ! cursor->matcher()->matchesCurrent( cursor.get() ) ) {
log() << "**** NOT MATCHING ****" << endl;
PRINT(cursor->current());
cursor->advance();
@@ -954,7 +956,7 @@ namespace mongo {
BSONElement ne = obj["n"];
assert(ne.isNumber());
int myn = ne.numberInt();
- if ( n != myn ){
+ if ( n != myn ) {
log() << "should have chunk: " << n << " have:" << myn << endl;
DBDirectClient client;
@@ -972,12 +974,13 @@ namespace mongo {
md5_append( &st , (const md5_byte_t*)(data) , len );
n++;
- } catch (...) {
+ }
+ catch (...) {
yield.relock(); // needed before yield goes out of scope
throw;
}
- if ( ! yield.stillOk() ){
+ if ( ! yield.stillOk() ) {
uasserted(13281, "File deleted during filemd5 command");
}
}
@@ -1002,15 +1005,15 @@ namespace mongo {
public:
CmdDatasize() : Command( "dataSize", false, "datasize" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help <<
- "determine data size for a set of data in a certain range"
- "\nexample: { dataSize:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }"
- "\nkeyPattern, min, and max parameters are optional."
- "\nnote: This command may take a while to run";
+ "determine data size for a set of data in a certain range"
+ "\nexample: { dataSize:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }"
+ "\nkeyPattern, min, and max parameters are optional."
+ "\nnote: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
Timer timer;
string ns = jsobj.firstElement().String();
@@ -1021,38 +1024,38 @@ namespace mongo {
Client::Context ctx( ns );
NamespaceDetails *d = nsdetails(ns.c_str());
-
- if ( ! d || d->stats.nrecords == 0 ){
+
+ if ( ! d || d->stats.nrecords == 0 ) {
result.appendNumber( "size" , 0 );
result.appendNumber( "numObjects" , 0 );
result.append( "millis" , timer.millis() );
return true;
}
-
+
result.appendBool( "estimate" , estimate );
shared_ptr<Cursor> c;
if ( min.isEmpty() && max.isEmpty() ) {
- if ( estimate ){
+ if ( estimate ) {
result.appendNumber( "size" , d->stats.datasize );
result.appendNumber( "numObjects" , d->stats.nrecords );
result.append( "millis" , timer.millis() );
return 1;
}
c = theDataFileMgr.findAll( ns.c_str() );
- }
+ }
else if ( min.isEmpty() || max.isEmpty() ) {
errmsg = "only one of min or max specified";
return false;
- }
+ }
else {
IndexDetails *idx = cmdIndexDetailsForRange( ns.c_str(), errmsg, min, max, keyPattern );
if ( idx == 0 )
return false;
-
+
c.reset( new BtreeCursor( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
}
-
+
long long avgObjSize = d->stats.datasize / d->stats.nrecords;
long long maxSize = jsobj["maxSize"].numberLong();
@@ -1066,11 +1069,11 @@ namespace mongo {
size += avgObjSize;
else
size += c->currLoc().rec()->netLength();
-
+
numObjects++;
-
- if ( ( maxSize && size > maxSize ) ||
- ( maxObjects && numObjects > maxObjects ) ){
+
+ if ( ( maxSize && size > maxSize ) ||
+ ( maxObjects && numObjects > maxObjects ) ) {
result.appendBool( "maxReached" , true );
break;
}
@@ -1080,7 +1083,7 @@ namespace mongo {
ostringstream os;
os << "Finding size for ns: " << ns;
- if ( ! min.isEmpty() ){
+ if ( ! min.isEmpty() ) {
os << " between " << min << " and " << max;
}
logIfSlow( timer , os.str() );
@@ -1093,21 +1096,21 @@ namespace mongo {
} cmdDatasize;
namespace {
- long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ){
+ long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ) {
dbMutex.assertAtLeastReadLocked();
NamespaceDetails * nsd = nsdetails( ns.c_str() );
if ( ! nsd )
return 0;
-
- long long totalSize = 0;
+
+ long long totalSize = 0;
NamespaceDetails::IndexIterator ii = nsd->ii();
- while ( ii.more() ){
+ while ( ii.more() ) {
IndexDetails& d = ii.next();
string collNS = d.indexNamespace();
NamespaceDetails * mine = nsdetails( collNS.c_str() );
- if ( ! mine ){
+ if ( ! mine ) {
log() << "error: have index [" << collNS << "] but no NamespaceDetails" << endl;
continue;
}
@@ -1123,32 +1126,32 @@ namespace mongo {
public:
CollectionStats() : Command( "collStats", false, "collstats" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + "." + jsobj.firstElement().valuestr();
Client::Context cx( ns );
-
+
NamespaceDetails * nsd = nsdetails( ns.c_str() );
- if ( ! nsd ){
+ if ( ! nsd ) {
errmsg = "ns not found";
return false;
}
result.append( "ns" , ns.c_str() );
-
+
int scale = 1;
- if ( jsobj["scale"].isNumber() ){
+ if ( jsobj["scale"].isNumber() ) {
scale = jsobj["scale"].numberInt();
- if ( scale <= 0 ){
+ if ( scale <= 0 ) {
errmsg = "scale has to be > 0";
return false;
}
-
+
}
- else if ( jsobj["scale"].trueValue() ){
+ else if ( jsobj["scale"].trueValue() ) {
errmsg = "scale has to be a number > 0";
return false;
}
@@ -1163,7 +1166,7 @@ namespace mongo {
int numExtents;
BSONArrayBuilder extents;
-
+
result.appendNumber( "storageSize" , nsd->storageSize( &numExtents , verbose ? &extents : 0 ) / scale );
result.append( "numExtents" , numExtents );
result.append( "nindexes" , nsd->nIndexes );
@@ -1174,12 +1177,12 @@ namespace mongo {
BSONObjBuilder indexSizes;
result.appendNumber( "totalIndexSize" , getIndexSizeForCollection(dbname, ns, &indexSizes, scale) / scale );
result.append("indexSizes", indexSizes.obj());
-
- if ( nsd->capped ){
+
+ if ( nsd->capped ) {
result.append( "capped" , nsd->capped );
result.append( "max" , nsd->max );
}
-
+
if ( verbose )
result.appendArray( "extents" , extents.arr() );
@@ -1191,11 +1194,11 @@ namespace mongo {
public:
DBStats() : Command( "dbStats", false, "dbstats" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help << " example: { dbStats:1 } ";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
list<string> collections;
Database* d = cc().database();
if ( d )
@@ -1209,11 +1212,11 @@ namespace mongo {
long long indexes = 0;
long long indexSize = 0;
- for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it){
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
const string ns = *it;
NamespaceDetails * nsd = nsdetails( ns.c_str() );
- if ( ! nsd ){
+ if ( ! nsd ) {
errmsg = "missing ns: ";
errmsg += ns;
return false;
@@ -1250,11 +1253,11 @@ namespace mongo {
public:
CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
virtual bool slaveOk() const { return false; }
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream &help ) const {
help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string from = jsobj.getStringField( "cloneCollectionAsCapped" );
string to = jsobj.getStringField( "toCollection" );
long long size = (long long)jsobj.getField( "size" ).number();
@@ -1302,20 +1305,20 @@ namespace mongo {
}
} cmdCloneCollectionAsCapped;
- /* jan2010:
- Converts the given collection to a capped collection w/ the specified size.
- This command is not highly used, and is not currently supported with sharded
- environments.
+ /* jan2010:
+ Converts the given collection to a capped collection w/ the specified size.
+ This command is not highly used, and is not currently supported with sharded
+ environments.
*/
class CmdConvertToCapped : public Command {
public:
CmdConvertToCapped() : Command( "convertToCapped" ) {}
virtual bool slaveOk() const { return false; }
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual void help( stringstream &help ) const {
help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str());
string from = jsobj.getStringField( "convertToCapped" );
@@ -1334,8 +1337,8 @@ namespace mongo {
BSONObj info;
if ( !client.runCommand( dbname ,
- BSON( "cloneCollectionAsCapped" << from << "toCollection" << shortTmpName << "size" << double( size ) ),
- info ) ) {
+ BSON( "cloneCollectionAsCapped" << from << "toCollection" << shortTmpName << "size" << double( size ) ),
+ info ) ) {
errmsg = "cloneCollectionAsCapped failed: " + info.toString();
return false;
}
@@ -1348,7 +1351,7 @@ namespace mongo {
if ( !client.runCommand( "admin",
BSON( "renameCollection" << longTmpName <<
"to" << ( dbname + "." + from ) ),
- info ) ) {
+ info ) ) {
errmsg = "renameCollection failed: " + info.toString();
return false;
}
@@ -1361,11 +1364,11 @@ namespace mongo {
class CmdFindAndModify : public Command {
public:
virtual void help( stringstream &help ) const {
- help <<
- "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
- "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
- "Either update or remove is required, all other fields have default values.\n"
- "Output is in the \"value\" field\n";
+ help <<
+ "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
+ "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
+ "Either update or remove is required, all other fields have default values.\n"
+ "Output is in the \"value\" field\n";
}
CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
@@ -1375,7 +1378,7 @@ namespace mongo {
virtual bool slaveOk() const {
return false;
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return WRITE; }
virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
static DBDirectClient db;
@@ -1393,8 +1396,8 @@ namespace mongo {
const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder);
BSONObj out = db.findOne(ns, q, fields);
- if (out.isEmpty()){
- if (!upsert){
+ if (out.isEmpty()) {
+ if (!upsert) {
errmsg = "No matching object found";
return false;
}
@@ -1405,12 +1408,12 @@ namespace mongo {
db.update(ns, origQuery, update.embeddedObjectUserCheck(), true);
BSONObj gle = db.getLastErrorDetailed();
- if (gle["err"].type() == String){
+ if (gle["err"].type() == String) {
errmsg = gle["err"].String();
return false;
}
- if (cmdObj["new"].trueValue()){
+ if (cmdObj["new"].trueValue()) {
BSONElement _id = gle["upserted"];
if (_id.eoo())
_id = origQuery["_id"];
@@ -1418,22 +1421,24 @@ namespace mongo {
out = db.findOne(ns, QUERY("_id" << _id), fields);
}
- } else {
+ }
+ else {
- if (cmdObj["remove"].trueValue()){
+ if (cmdObj["remove"].trueValue()) {
uassert(12515, "can't remove and update", cmdObj["update"].eoo());
db.remove(ns, QUERY("_id" << out["_id"]), 1);
- } else { // update
+ }
+ else { // update
BSONElement queryId = origQuery["_id"];
- if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality){
+ if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) {
// need to include original query for $ positional operator
BSONObjBuilder b;
b.append(out["_id"]);
BSONObjIterator it(origQuery);
- while (it.more()){
+ while (it.more()) {
BSONElement e = it.next();
if (strcmp(e.fieldName(), "_id"))
b.append(e);
@@ -1446,7 +1451,7 @@ namespace mongo {
db.update(ns, q, update.embeddedObjectUserCheck());
BSONObj gle = db.getLastErrorDetailed();
- if (gle["err"].type() == String){
+ if (gle["err"].type() == String) {
errmsg = gle["err"].String();
return false;
}
@@ -1461,7 +1466,7 @@ namespace mongo {
return true;
}
} cmdFindAndModify;
-
+
/* Returns client's uri */
class CmdWhatsMyUri : public Command {
public:
@@ -1469,20 +1474,20 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() {
return false;
}
virtual void help( stringstream &help ) const {
help << "{whatsmyuri:1}";
- }
+ }
virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
BSONObj info = cc().curop()->infoNoauth();
result << "you" << info[ "client" ];
return true;
}
} cmdWhatsMyUri;
-
+
/* For testing only, not for general use */
class GodInsert : public Command {
public:
@@ -1499,7 +1504,7 @@ namespace mongo {
}
virtual void help( stringstream &help ) const {
help << "internal. for testing only.";
- }
+ }
virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "godinsert" ].valuestrsafe();
uassert( 13049, "godinsert must specify a collection", !coll.empty() );
@@ -1512,32 +1517,32 @@ namespace mongo {
class DBHashCmd : public Command {
public:
- DBHashCmd() : Command( "dbHash", false, "dbhash" ){}
+ DBHashCmd() : Command( "dbHash", false, "dbhash" ) {}
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
list<string> colls;
Database* db = cc().database();
if ( db )
db->namespaceIndex.getNamespaces( colls );
colls.sort();
-
+
result.appendNumber( "numCollections" , (long long)colls.size() );
result.append( "host" , prettyHostName() );
-
+
md5_state_t globalState;
md5_init(&globalState);
BSONObjBuilder bb( result.subobjStart( "collections" ) );
- for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ){
+ for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
string c = *i;
if ( c.find( ".system.profil" ) != string::npos )
continue;
-
+
shared_ptr<Cursor> cursor;
NamespaceDetails * nsd = nsdetails( c.c_str() );
-
+
// debug SERVER-761
NamespaceDetails::IndexIterator ii = nsd->ii();
while( ii.more() ) {
@@ -1549,15 +1554,15 @@ namespace mongo {
log() << endl;
}
}
-
+
int idNum = nsd->findIdIndex();
- if ( idNum >= 0 ){
+ if ( idNum >= 0 ) {
cursor.reset( new BtreeCursor( nsd , idNum , nsd->idx( idNum ) , BSONObj() , BSONObj() , false , 1 ) );
}
- else if ( c.find( ".system." ) != string::npos ){
+ else if ( c.find( ".system." ) != string::npos ) {
continue;
}
- else if ( nsd->capped ){
+ else if ( nsd->capped ) {
cursor = findTableScan( c.c_str() , BSONObj() );
}
else {
@@ -1568,9 +1573,9 @@ namespace mongo {
md5_state_t st;
md5_init(&st);
-
+
long long n = 0;
- while ( cursor->ok() ){
+ while ( cursor->ok() ) {
BSONObj c = cursor->current();
md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
n++;
@@ -1579,7 +1584,7 @@ namespace mongo {
md5digest d;
md5_finish(&st, d);
string hash = digestToString( d );
-
+
bb.append( c.c_str() + ( dbname.size() + 1 ) , hash );
md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
@@ -1598,9 +1603,9 @@ namespace mongo {
} dbhashCmd;
/* for diagnostic / testing purposes. */
- class CmdSleep : public Command {
+ class CmdSleep : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool adminOnly() const { return true; }
virtual bool logTheOp() { return false; }
virtual bool slaveOk() const { return true; }
@@ -1610,13 +1615,13 @@ namespace mongo {
}
CmdSleep() : Command("sleep") { }
bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
-
-
+
+
int secs = 100;
if ( cmdObj["secs"].isNumber() )
secs = cmdObj["secs"].numberInt();
-
- if( cmdObj.getBoolField("w") ) {
+
+ if( cmdObj.getBoolField("w") ) {
writelock lk("");
sleepsecs(secs);
}
@@ -1631,24 +1636,24 @@ namespace mongo {
class AvailableQueryOptions : public Command {
public:
- AvailableQueryOptions() : Command( "availablequeryoptions" ){}
+ AvailableQueryOptions() : Command( "availablequeryoptions" ) {}
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result << "options" << QueryOption_AllSupported;
return true;
}
- } availableQueryOptionsCmd;
-
+ } availableQueryOptionsCmd;
+
// just for testing
class CapTrunc : public Command {
public:
- CapTrunc() : Command( "captrunc" ){}
+ CapTrunc() : Command( "captrunc" ) {}
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() { return true; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "captrunc" ].valuestrsafe();
uassert( 13416, "captrunc must specify a collection", !coll.empty() );
string ns = dbname + "." + coll;
@@ -1666,16 +1671,16 @@ namespace mongo {
nsd->cappedTruncateAfter( ns.c_str(), end, inc );
return true;
}
- } capTruncCmd;
-
+ } capTruncCmd;
+
// just for testing
class EmptyCapped : public Command {
public:
- EmptyCapped() : Command( "emptycapped" ){}
+ EmptyCapped() : Command( "emptycapped" ) {}
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return WRITE; }
virtual bool requiresAuth() { return true; }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string coll = cmdObj[ "emptycapped" ].valuestrsafe();
uassert( 13428, "emptycapped must specify a collection", !coll.empty() );
string ns = dbname + "." + coll;
@@ -1684,9 +1689,9 @@ namespace mongo {
nsd->emptyCappedCollection( ns.c_str() );
return true;
}
- } emptyCappedCmd;
-
- /**
+ } emptyCappedCmd;
+
+ /**
* this handles
- auth
- locking
@@ -1694,17 +1699,17 @@ namespace mongo {
then calls run()
*/
bool execCommand( Command * c ,
- Client& client , int queryOptions ,
- const char *cmdns, BSONObj& cmdObj ,
- BSONObjBuilder& result,
- bool fromRepl ){
-
+ Client& client , int queryOptions ,
+ const char *cmdns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl ) {
+
string dbname = nsToDatabase( cmdns );
-
- AuthenticationInfo *ai = client.getAuthenticationInfo();
- if( c->adminOnly() && c->localHostOnlyIfNoAuth( cmdObj ) && noauth && !ai->isLocalHost ) {
- result.append( "errmsg" ,
+ AuthenticationInfo *ai = client.getAuthenticationInfo();
+
+ if( c->adminOnly() && c->localHostOnlyIfNoAuth( cmdObj ) && noauth && !ai->isLocalHost ) {
+ result.append( "errmsg" ,
"unauthorized: this command must run from localhost when running db without auth" );
log() << "command denied: " << cmdObj.toString() << endl;
return false;
@@ -1714,32 +1719,32 @@ namespace mongo {
result.append( "errmsg" , "access denied; use admin db" );
log() << "command denied: " << cmdObj.toString() << endl;
return false;
- }
+ }
- if ( cmdObj["help"].trueValue() ){
+ if ( cmdObj["help"].trueValue() ) {
stringstream ss;
ss << "help for: " << c->name << " ";
c->help( ss );
result.append( "help" , ss.str() );
result.append( "lockType" , c->locktype() );
return true;
- }
+ }
- bool canRunHere =
+ bool canRunHere =
isMaster( dbname.c_str() ) ||
c->slaveOk() ||
( c->slaveOverrideOk() && ( queryOptions & QueryOption_SlaveOk ) ) ||
fromRepl;
- if ( ! canRunHere ){
+ if ( ! canRunHere ) {
result.append( "errmsg" , "not master" );
return false;
}
if ( c->adminOnly() )
log( 2 ) << "command: " << cmdObj << endl;
-
- if ( c->locktype() == Command::NONE ){
+
+ if ( c->locktype() == Command::NONE ) {
// we also trust that this won't crash
string errmsg;
int ok = c->run( dbname , cmdObj , errmsg , result , fromRepl );
@@ -1747,35 +1752,35 @@ namespace mongo {
result.append( "errmsg" , errmsg );
return ok;
}
-
+
bool needWriteLock = c->locktype() == Command::WRITE;
-
- if ( ! needWriteLock ){
+
+ if ( ! needWriteLock ) {
assert( ! c->logTheOp() );
}
mongolock lk( needWriteLock );
Client::Context ctx( dbname , dbpath , &lk , c->requiresAuth() );
-
+
try {
string errmsg;
- if ( ! c->run(dbname, cmdObj, errmsg, result, fromRepl ) ){
+ if ( ! c->run(dbname, cmdObj, errmsg, result, fromRepl ) ) {
result.append( "errmsg" , errmsg );
return false;
}
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
stringstream ss;
ss << "exception: " << e.what();
result.append( "errmsg" , ss.str() );
result.append( "code" , e.getCode() );
return false;
}
-
- if ( c->logTheOp() && ! fromRepl ){
+
+ if ( c->logTheOp() && ! fromRepl ) {
logOp("c", cmdns, cmdObj);
}
-
+
return true;
}
@@ -1791,9 +1796,9 @@ namespace mongo {
cc().curop()->ensureStarted();
string dbname = nsToDatabase( ns );
- if( logLevel >= 1 )
+ if( logLevel >= 1 )
log() << "run command " << ns << ' ' << _cmdobj << endl;
-
+
const char *p = strchr(ns, '.');
if ( !p ) return false;
if ( strcmp(p, ".$cmd") != 0 ) return false;
@@ -1813,10 +1818,10 @@ namespace mongo {
bool ok = false;
BSONElement e = jsobj.firstElement();
-
+
Command * c = e.type() ? Command::findCommand( e.fieldName() ) : 0;
- if ( c ){
+ if ( c ) {
ok = execCommand( c , client , queryOptions , ns , jsobj , anObjBuilder , fromRepl );
}
else {
@@ -1832,5 +1837,5 @@ namespace mongo {
return true;
}
-
+
} // namespace mongo
diff --git a/db/dbcommands_admin.cpp b/db/dbcommands_admin.cpp
index 14cac9998de..c1e43e66590 100644
--- a/db/dbcommands_admin.cpp
+++ b/db/dbcommands_admin.cpp
@@ -37,22 +37,22 @@ namespace mongo {
class CleanCmd : public Command {
public:
- CleanCmd() : Command( "clean" ){}
+ CleanCmd() : Command( "clean" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return WRITE; }
-
+ virtual LockType locktype() const { return WRITE; }
+
virtual void help(stringstream& h) const { h << "internal"; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string dropns = dbname + "." + cmdObj.firstElement().valuestrsafe();
-
+
if ( !cmdLine.quiet )
tlog() << "CMD: clean " << dropns << endl;
-
+
NamespaceDetails *d = nsdetails(dropns.c_str());
-
- if ( ! d ){
+
+ if ( ! d ) {
errmsg = "ns not found";
return 0;
}
@@ -63,39 +63,39 @@ namespace mongo {
result.append("ns", dropns.c_str());
return 1;
}
-
+
} cleanCmd;
-
+
class ValidateCmd : public Command {
public:
- ValidateCmd() : Command( "validate" ){}
+ ValidateCmd() : Command( "validate" ) {}
virtual bool slaveOk() const {
return true;
}
-
+
virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow."; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
//{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] } */
-
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+
+ bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
NamespaceDetails * d = nsdetails( ns.c_str() );
if ( !cmdLine.quiet )
tlog() << "CMD: validate " << ns << endl;
- if ( ! d ){
+ if ( ! d ) {
errmsg = "ns not found";
return 0;
}
-
+
result.append( "ns", ns );
result.append( "result" , validateNS( ns.c_str() , d, &cmdObj ) );
return 1;
}
-
-
+
+
string validateNS(const char *ns, NamespaceDetails *d, BSONObj *cmdObj) {
bool scanData = true;
if( cmdObj && cmdObj->hasElement("scandata") && !cmdObj->getBoolField("scandata") )
@@ -106,13 +106,13 @@ namespace mongo {
//ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
if ( d->capped )
ss << " capped:" << d->capped << " max:" << d->max << '\n';
-
+
ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString()<< '\n';
ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString() << '\n';
try {
d->firstExtent.ext()->assertOk();
d->lastExtent.ext()->assertOk();
-
+
DiskLoc el = d->firstExtent;
int ne = 0;
while( !el.isNull() ) {
@@ -123,7 +123,8 @@ namespace mongo {
killCurrentOp.checkForInterrupt();
}
ss << " # extents:" << ne << '\n';
- } catch (...) {
+ }
+ catch (...) {
valid=false;
ss << " extent asserted ";
}
@@ -198,7 +199,7 @@ namespace mongo {
ndel++;
if ( loc.questionable() ) {
- if( d->capped && !loc.isValid() && i == 1 ) {
+ if( d->capped && !loc.isValid() && i == 1 ) {
/* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
see comments in namespace.h
*/
@@ -218,7 +219,8 @@ namespace mongo {
k++;
killCurrentOp.checkForInterrupt();
}
- } catch (...) {
+ }
+ catch (...) {
ss <<" ?exception in deleted chain for bucket " << i << endl;
valid = false;
}
@@ -236,7 +238,7 @@ namespace mongo {
while( i.more() ) {
IndexDetails& id = i.next();
ss << " " << id.indexNamespace() << " keys:" <<
- id.head.btree()->fullValidate(id.head, id.keyPattern()) << endl;
+ id.head.btree()->fullValidate(id.head, id.keyPattern()) << endl;
}
}
catch (...) {
@@ -261,36 +263,36 @@ namespace mongo {
extern unsigned lockedForWriting;
extern mongo::mutex lockedForWritingMutex;
-/*
- class UnlockCommand : public Command {
- public:
- UnlockCommand() : Command( "unlock" ) { }
- virtual bool readOnly() { return true; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( lockedForWriting ) {
- log() << "command: unlock requested" << endl;
- errmsg = "unlock requested";
- unlockRequested = true;
- }
- else {
- errmsg = "not locked, so cannot unlock";
- return 0;
+ /*
+ class UnlockCommand : public Command {
+ public:
+ UnlockCommand() : Command( "unlock" ) { }
+ virtual bool readOnly() { return true; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( lockedForWriting ) {
+ log() << "command: unlock requested" << endl;
+ errmsg = "unlock requested";
+ unlockRequested = true;
+ }
+ else {
+ errmsg = "not locked, so cannot unlock";
+ return 0;
+ }
+ return 1;
}
- return 1;
- }
-
- } unlockCommand;
-*/
+
+ } unlockCommand;
+ */
/* see unlockFsync() for unlocking:
db.$cmd.sys.unlock.findOne()
*/
class FSyncCommand : public Command {
- class LockDBJob : public BackgroundJob {
+ class LockDBJob : public BackgroundJob {
protected:
string name() { return "lockdbjob"; }
- void run() {
+ void run() {
Client::initThread("fsyncjob");
Client& c = cc();
{
@@ -301,8 +303,8 @@ namespace mongo {
MemoryMappedFile::flushAll(true);
log() << "db is now locked for snapshotting, no writes allowed. use db.$cmd.sys.unlock.findOne() to unlock" << endl;
_ready = true;
- while( 1 ) {
- if( unlockRequested ) {
+ while( 1 ) {
+ if( unlockRequested ) {
unlockRequested = false;
break;
}
@@ -316,16 +318,16 @@ namespace mongo {
}
public:
bool& _ready;
- LockDBJob(bool& ready) : BackgroundJob( true /* delete self */ ), _ready(ready){
+ LockDBJob(bool& ready) : BackgroundJob( true /* delete self */ ), _ready(ready) {
_ready = false;
}
};
public:
- FSyncCommand() : Command( "fsync" ){}
- virtual LockType locktype() const { return WRITE; }
+ FSyncCommand() : Command( "fsync" ) {}
+ virtual LockType locktype() const { return WRITE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- /*virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ /*virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
string x = cmdObj["exec"].valuestrsafe();
return !x.empty();
}*/
@@ -336,12 +338,12 @@ namespace mongo {
bool lock = cmdObj["lock"].trueValue();
log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
- if( lock ) {
+ if( lock ) {
uassert(12034, "fsync: can't lock while an unlock is pending", !unlockRequested);
uassert(12032, "fsync: sync option must be true when using lock", sync);
- /* With releaseEarly(), we must be extremely careful we don't do anything
- where we would have assumed we were locked. profiling is one of those things.
- Perhaps at profile time we could check if we released early -- however,
+ /* With releaseEarly(), we must be extremely careful we don't do anything
+ where we would have assumed we were locked. profiling is one of those things.
+ Perhaps at profile time we could check if we released early -- however,
we need to be careful to keep that code very fast it's a very common code path when on.
*/
uassert(12033, "fsync: profiling must be off to enter locked mode", cc().database()->profile == 0);
@@ -350,7 +352,7 @@ namespace mongo {
dbMutex.releaseEarly();
l->go();
// don't return until background thread has acquired the write lock
- while( !ready ) {
+ while( !ready ) {
sleepmillis(10);
}
result.append("info", "now locked against writes, use db.$cmd.sys.unlock.findOne() to unlock");
@@ -360,9 +362,9 @@ namespace mongo {
}
return 1;
}
-
+
} fsyncCmd;
-
+
}
diff --git a/db/dbcommands_generic.cpp b/db/dbcommands_generic.cpp
index 34ee229e7bf..b8cabd46064 100644
--- a/db/dbcommands_generic.cpp
+++ b/db/dbcommands_generic.cpp
@@ -52,12 +52,12 @@ namespace mongo {
CmdBuildInfo() : Command( "buildInfo", true, "buildinfo" ) {}
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const {
help << "get version #, etc.\n";
help << "{ buildinfo:1 }";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo();
result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 );
result.appendBool( "debug" , debug );
@@ -66,8 +66,8 @@ namespace mongo {
}
} cmdBuildInfo;
- /** experimental. either remove or add support in repl sets also. in a repl set, getting this setting from the
- repl set config could make sense.
+ /** experimental. either remove or add support in repl sets also. in a repl set, getting this setting from the
+ repl set config could make sense.
*/
unsigned replApplyBatchSize = 1;
@@ -76,7 +76,7 @@ namespace mongo {
CmdGet() : Command( "getParameter" ) { }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const {
help << "get administrative option(s)\nexample:\n";
help << "{ getParameter:1, notablescan:1 }\n";
@@ -89,9 +89,9 @@ namespace mongo {
}
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
bool all = *cmdObj.firstElement().valuestrsafe() == '*';
-
+
int before = result.len();
-
+
if( all || cmdObj.hasElement("quiet") ) {
result.append("quiet", cmdLine.quiet );
}
@@ -106,7 +106,7 @@ namespace mongo {
}
if( all || cmdObj.hasElement("replApplyBatchSize") ) {
result.append("replApplyBatchSize", replApplyBatchSize);
- }
+ }
if ( before == result.len() ) {
errmsg = "no option found to get";
@@ -121,7 +121,7 @@ namespace mongo {
CmdSet() : Command( "setParameter" ) { }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const {
help << "set administrative option(s)\nexample:\n";
help << "{ setParameter:1, notablescan:true }\n";
@@ -130,7 +130,7 @@ namespace mongo {
help << " logLevel\n";
help << " quiet\n";
}
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
int s = 0;
if( cmdObj.hasElement("notablescan") ) {
result.append("was", cmdLine.noTableScan);
@@ -174,63 +174,63 @@ namespace mongo {
class PingCommand : public Command {
public:
- PingCommand() : Command( "ping" ){}
+ PingCommand() : Command( "ping" ) {}
virtual bool slaveOk() const { return true; }
virtual void help( stringstream &help ) const { help << "a way to check that the server is alive. responds immediately even if server is in a db lock."; }
virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
- virtual bool run(const string& badns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual bool run(const string& badns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
// IMPORTANT: Don't put anything in here that might lock db - including authentication
return true;
}
} pingCmd;
-
+
class FeaturesCmd : public Command {
public:
- FeaturesCmd() : Command( "features", true ){}
+ FeaturesCmd() : Command( "features", true ) {}
void help(stringstream& h) const { h << "return build level feature settings"; }
virtual bool slaveOk() const { return true; }
- virtual bool readOnly(){ return true; }
+ virtual bool readOnly() { return true; }
virtual LockType locktype() const { return NONE; }
- virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
- if ( globalScriptEngine ){
+ virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( globalScriptEngine ) {
BSONObjBuilder bb( result.subobjStart( "js" ) );
result.append( "utf8" , globalScriptEngine->utf8Ok() );
bb.done();
}
- if ( cmdObj["oidReset"].trueValue() ){
+ if ( cmdObj["oidReset"].trueValue() ) {
result.append( "oidMachineOld" , OID::getMachineId() );
OID::regenMachineId();
}
result.append( "oidMachine" , OID::getMachineId() );
return true;
}
-
+
} featuresCmd;
class LogRotateCmd : public Command {
public:
- LogRotateCmd() : Command( "logRotate" ){}
- virtual LockType locktype() const { return NONE; }
+ LogRotateCmd() : Command( "logRotate" ) {}
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
rotateLogs();
return 1;
- }
-
+ }
+
} logRotateCmd;
-
+
class ListCommandsCmd : public Command {
public:
virtual void help( stringstream &help ) const { help << "get a list of all db commands"; }
- ListCommandsCmd() : Command( "listCommands", false ){}
- virtual LockType locktype() const { return NONE; }
+ ListCommandsCmd() : Command( "listCommands", false ) {}
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return false; }
virtual bool run(const string& ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
BSONObjBuilder b( result.subobjStart( "commands" ) );
- for ( map<string,Command*>::iterator i=_commands->begin(); i!=_commands->end(); ++i ){
+ for ( map<string,Command*>::iterator i=_commands->begin(); i!=_commands->end(); ++i ) {
Command * c = i->second;
// don't show oldnames
@@ -252,10 +252,10 @@ namespace mongo {
b.done();
return 1;
- }
+ }
} listCommandsCmd;
-
+
class CmdShutdown : public Command {
public:
virtual bool requiresAuth() { return true; }
@@ -267,7 +267,7 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const {
help << "shutdown the database. must be ran against admin db and either (1) ran from localhost or (2) authenticated.\n";
}
@@ -277,7 +277,7 @@ namespace mongo {
if ( c ) {
c->shutdown();
}
-
+
log() << "terminating, shutdown command received" << endl;
dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
@@ -298,7 +298,7 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
CmdForceError() : Command("forceerror") {}
bool run(const string& dbnamne, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
uassert( 10038 , "forced error", false);
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index 14d9b37e910..31d52609b6e 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -66,7 +66,7 @@ namespace mongo {
errmsg = (string)"compile failed: " + s->getError();
return false;
}
-
+
if ( e.type() == CodeWScope )
s->init( e.codeWScopeScopeData() );
s->localConnect( dbName.c_str() );
@@ -100,7 +100,7 @@ namespace mongo {
errmsg += s->getError();
return false;
}
-
+
s->append( result , "retval" , "return" );
return true;
@@ -122,18 +122,18 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
CmdEval() : Command("eval", false, "$eval") { }
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
-
+
AuthenticationInfo *ai = cc().getAuthenticationInfo();
uassert( 12598 , "$eval reads unauthorized", ai->isAuthorizedReads(dbname.c_str()) );
-
- if ( cmdObj["nolock"].trueValue() ){
+
+ if ( cmdObj["nolock"].trueValue() ) {
return dbEval(dbname, cmdObj, result, errmsg);
}
-
+
// write security will be enforced in DBDirectClient
mongolock lk( ai->isAuthorized( dbname.c_str() ) );
Client::Context ctx( dbname );
-
+
return dbEval(dbname, cmdObj, result, errmsg);
}
} cmdeval;
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index dc979dc2f06..9162415ab0a 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -41,7 +41,7 @@ namespace mongo {
}
}
- if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
+ if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n';
return;
}
@@ -78,7 +78,8 @@ namespace mongo {
one_ = c_->current();
loc_ = c_->currLoc();
setStop();
- } else {
+ }
+ else {
c_->advance();
}
}
@@ -96,11 +97,11 @@ namespace mongo {
BSONObj one_;
DiskLoc loc_;
};
-
- /* fetch a single object from collection ns that matches query
+
+ /* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
- bool Helpers::findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex) {
+ bool Helpers::findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex) {
MultiPlanScanner s( ns, query, BSONObj(), 0, !requireIndex );
FindOne original( requireIndex );
shared_ptr< FindOne > res = s.runOp( original );
@@ -112,10 +113,10 @@ namespace mongo {
return true;
}
- /* fetch a single object from collection ns that matches query
+ /* fetch a single object from collection ns that matches query
set your db SavedContext first
*/
- DiskLoc Helpers::findOne(const char *ns, const BSONObj &query, bool requireIndex) {
+ DiskLoc Helpers::findOne(const char *ns, const BSONObj &query, bool requireIndex) {
MultiPlanScanner s( ns, query, BSONObj(), 0, !requireIndex );
FindOne original( requireIndex );
shared_ptr< FindOne > res = s.runOp( original );
@@ -125,7 +126,7 @@ namespace mongo {
}
bool Helpers::findById(Client& c, const char *ns, BSONObj query, BSONObj& result ,
- bool * nsFound , bool * indexFound ){
+ bool * nsFound , bool * indexFound ) {
dbMutex.assertAtLeastReadLocked();
Database *database = c.database();
assert( database );
@@ -134,7 +135,7 @@ namespace mongo {
return false;
if ( nsFound )
*nsFound = 1;
-
+
int idxNo = d->findIdIndex();
if ( idxNo < 0 )
return false;
@@ -142,9 +143,9 @@ namespace mongo {
*indexFound = 1;
IndexDetails& i = d->idx( idxNo );
-
+
BSONObj key = i.getKeyFromQuery( query );
-
+
DiskLoc loc = i.head.btree()->findSingle( i , i.head , key );
if ( loc.isNull() )
return false;
@@ -152,12 +153,12 @@ namespace mongo {
return true;
}
- DiskLoc Helpers::findById(NamespaceDetails *d, BSONObj idquery) {
- int idxNo = d->findIdIndex();
- uassert(13430, "no _id index", idxNo>=0);
- IndexDetails& i = d->idx( idxNo );
- BSONObj key = i.getKeyFromQuery( idquery );
- return i.head.btree()->findSingle( i , i.head , key );
+ DiskLoc Helpers::findById(NamespaceDetails *d, BSONObj idquery) {
+ int idxNo = d->findIdIndex();
+ uassert(13430, "no _id index", idxNo>=0);
+ IndexDetails& i = d->idx( idxNo );
+ BSONObj key = i.getKeyFromQuery( idquery );
+ return i.head.btree()->findSingle( i , i.head , key );
}
bool Helpers::isEmpty(const char *ns, bool doAuth) {
@@ -185,17 +186,17 @@ namespace mongo {
bool Helpers::getLast(const char *ns, BSONObj& result) {
Client::Context ctx(ns);
shared_ptr<Cursor> c = findTableScan(ns, reverseNaturalObj);
- if( !c->ok() )
+ if( !c->ok() )
return false;
result = c->current();
return true;
}
- void Helpers::upsert( const string& ns , const BSONObj& o ){
+ void Helpers::upsert( const string& ns , const BSONObj& o ) {
BSONElement e = o["_id"];
assert( e.type() );
BSONObj id = e.wrap();
-
+
OpDebug debug;
Client::Context context(ns);
updateObjects(ns.c_str(), o, /*pattern=*/id, /*upsert=*/true, /*multi=*/false , /*logtheop=*/true , debug );
@@ -213,12 +214,12 @@ namespace mongo {
_updateObjects(/*god=*/true, ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , logTheOp , debug );
}
- BSONObj Helpers::toKeyFormat( const BSONObj& o , BSONObj& key ){
+ BSONObj Helpers::toKeyFormat( const BSONObj& o , BSONObj& key ) {
BSONObjBuilder me;
BSONObjBuilder k;
BSONObjIterator i( o );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
k.append( e.fieldName() , 1 );
me.appendAs( e , "" );
@@ -226,8 +227,8 @@ namespace mongo {
key = k.obj();
return me.obj();
}
-
- long long Helpers::removeRange( const string& ns , const BSONObj& min , const BSONObj& max , bool yield , bool maxInclusive , RemoveCallback * callback ){
+
+ long long Helpers::removeRange( const string& ns , const BSONObj& min , const BSONObj& max , bool yield , bool maxInclusive , RemoveCallback * callback ) {
BSONObj keya , keyb;
BSONObj minClean = toKeyFormat( min , keya );
BSONObj maxClean = toKeyFormat( max , keyb );
@@ -240,32 +241,32 @@ namespace mongo {
int ii = nsd->findIndexByKeyPattern( keya );
assert( ii >= 0 );
-
+
long long num = 0;
-
+
IndexDetails& i = nsd->idx( ii );
shared_ptr<Cursor> c( new BtreeCursor( nsd , ii , i , minClean , maxClean , maxInclusive, 1 ) );
auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
cc->setDoingDeletes( true );
-
- while ( c->ok() ){
+
+ while ( c->ok() ) {
DiskLoc rloc = c->currLoc();
BSONObj key = c->currKey();
if ( callback )
callback->goingToDelete( c->current() );
-
+
c->advance();
c->noteLocation();
-
+
logOp( "d" , ns.c_str() , rloc.obj()["_id"].wrap() );
theDataFileMgr.deleteRecord(ns.c_str() , rloc.rec(), rloc);
num++;
c->checkLocation();
- if ( yield && ! cc->yieldSometimes() ){
+ if ( yield && ! cc->yieldSometimes() ) {
// cursor got finished by someone else, so we're done
cc.release(); // if the collection/db is dropped, cc may be deleted
break;
@@ -290,11 +291,12 @@ namespace mongo {
BSONObjBuilder result;
dropCollection( name_, errmsg, result );
}
- } catch ( ... ) {
+ }
+ catch ( ... ) {
problem() << "exception cleaning up DbSet" << endl;
}
}
-
+
void DbSet::reset( const string &name, const BSONObj &key ) {
if ( !name.empty() )
name_ = name;
@@ -303,74 +305,77 @@ namespace mongo {
Client::Context c( name_.c_str() );
if ( nsdetails( name_.c_str() ) ) {
Helpers::emptyCollection( name_.c_str() );
- } else {
+ }
+ else {
string err;
massert( 10303 , err, userCreateNS( name_.c_str(), fromjson( "{autoIndexId:false}" ), err, false ) );
}
- Helpers::ensureIndex( name_.c_str(), key_, true, "setIdx" );
+ Helpers::ensureIndex( name_.c_str(), key_, true, "setIdx" );
}
-
+
bool DbSet::get( const BSONObj &obj ) const {
Client::Context c( name_.c_str() );
BSONObj temp;
return Helpers::findOne( name_.c_str(), obj, temp, true );
}
-
+
void DbSet::set( const BSONObj &obj, bool val ) {
Client::Context c( name_.c_str() );
if ( val ) {
try {
BSONObj k = obj;
theDataFileMgr.insertWithObjMod( name_.c_str(), k, false );
- } catch ( DBException& ) {
+ }
+ catch ( DBException& ) {
// dup key - already in set
}
- } else {
+ }
+ else {
deleteObjects( name_.c_str(), obj, true, false, false );
- }
+ }
}
- RemoveSaver::RemoveSaver( const string& a , const string& b , const string& why) : _out(0){
+ RemoveSaver::RemoveSaver( const string& a , const string& b , const string& why) : _out(0) {
static int NUM = 0;
-
+
_root = dbpath;
if ( a.size() )
_root /= a;
if ( b.size() )
_root /= b;
assert( a.size() || b.size() );
-
+
_file = _root;
-
+
stringstream ss;
ss << why << "." << terseCurrentTime(false) << "." << NUM++ << ".bson";
_file /= ss.str();
}
-
- RemoveSaver::~RemoveSaver(){
- if ( _out ){
+
+ RemoveSaver::~RemoveSaver() {
+ if ( _out ) {
_out->close();
delete _out;
_out = 0;
}
}
-
- void RemoveSaver::goingToDelete( const BSONObj& o ){
- if ( ! _out ){
+
+ void RemoveSaver::goingToDelete( const BSONObj& o ) {
+ if ( ! _out ) {
create_directories( _root );
_out = new ofstream();
_out->open( _file.string().c_str() , ios_base::out | ios_base::binary );
- if ( ! _out->good() ){
+ if ( ! _out->good() ) {
log( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
delete _out;
_out = 0;
return;
}
-
+
}
_out->write( o.objdata() , o.objsize() );
}
-
-
+
+
} // namespace mongo
diff --git a/db/dbhelpers.h b/db/dbhelpers.h
index d952613396e..e793d3f6c29 100644
--- a/db/dbhelpers.h
+++ b/db/dbhelpers.h
@@ -36,7 +36,7 @@ namespace mongo {
/**
all helpers assume locking is handled above them
*/
- struct Helpers {
+ struct Helpers {
/* ensure the specified index exists.
@@ -54,7 +54,7 @@ namespace mongo {
/* fetch a single object from collection ns that matches query.
set your db SavedContext first.
- @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
+ @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
won't work.
@param requireIndex if true, complain if no index for the query. a way to guard against
@@ -63,16 +63,16 @@ namespace mongo {
@return true if object found
*/
static bool findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex = false);
- static DiskLoc findOne(const char *ns, const BSONObj &query, bool requireIndex);
+ static DiskLoc findOne(const char *ns, const BSONObj &query, bool requireIndex);
/**
* @param foundIndex if passed in will be set to 1 if ns and index found
* @return true if object found
*/
- static bool findById(Client&, const char *ns, BSONObj query, BSONObj& result ,
+ static bool findById(Client&, const char *ns, BSONObj query, BSONObj& result ,
bool * nsFound = 0 , bool * indexFound = 0 );
- /* uasserts if no _id index.
+ /* uasserts if no _id index.
@return null loc if not found */
static DiskLoc findById(NamespaceDetails *d, BSONObj query);
@@ -87,7 +87,7 @@ namespace mongo {
static void putSingleton(const char *ns, BSONObj obj);
static void putSingletonGod(const char *ns, BSONObj obj, bool logTheOp);
static bool getFirst(const char *ns, BSONObj& result) { return getSingleton(ns, result); }
- static bool getLast(const char *ns, BSONObj& result); // get last object int he collection; e.g. {$natural : -1}
+ static bool getLast(const char *ns, BSONObj& result); // get last object int he collection; e.g. {$natural : -1}
/**
* you have to lock
@@ -106,7 +106,7 @@ namespace mongo {
class RemoveCallback {
public:
- virtual ~RemoveCallback(){}
+ virtual ~RemoveCallback() {}
virtual void goingToDelete( const BSONObj& o ) = 0;
};
/* removeRange: operation is oplog'd */
@@ -147,13 +147,13 @@ namespace mongo {
~RemoveSaver();
void goingToDelete( const BSONObj& o );
-
+
private:
path _root;
path _file;
ofstream* _out;
-
+
};
-
+
} // namespace mongo
diff --git a/db/dbmessage.h b/db/dbmessage.h
index 53d6c478234..80bf133c24c 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -35,7 +35,7 @@ namespace mongo {
*/
extern bool objcheck;
-
+
#pragma pack(1)
struct QueryResult : public MsgData {
long long cursorId;
@@ -50,7 +50,7 @@ namespace mongo {
int& _resultFlags() {
return dataAsInt();
}
- void setResultFlagsToOk() {
+ void setResultFlagsToOk() {
_resultFlags() = ResultFlag_AwaitCapable;
}
};
@@ -63,8 +63,7 @@ namespace mongo {
*/
class DbMessage {
public:
- DbMessage(const Message& _m) : m(_m)
- {
+ DbMessage(const Message& _m) : m(_m) {
// for received messages, Message has only one buffer
theEnd = _m.singleData()->_data + _m.header()->dataLen();
char *r = _m.singleData()->_data;
@@ -86,7 +85,7 @@ namespace mongo {
const char * afterNS() const {
return data + strlen( data ) + 1;
}
-
+
int getInt( int num ) const {
const int * foo = (const int*)afterNS();
return foo[num];
@@ -96,7 +95,7 @@ namespace mongo {
return getInt( 1 );
}
- void resetPull(){ nextjsobj = data; }
+ void resetPull() { nextjsobj = data; }
int pullInt() const { return pullInt(); }
int& pullInt() {
if ( nextjsobj == data )
@@ -140,10 +139,10 @@ namespace mongo {
BSONObj js(nextjsobj);
massert( 10305 , "Client Error: Invalid object size", js.objsize() > 3 );
massert( 10306 , "Client Error: Next object larger than space left in message",
- js.objsize() < ( theEnd - data ) );
+ js.objsize() < ( theEnd - data ) );
if ( objcheck && !js.valid() ) {
massert( 10307 , "Client Error: bad object in message", false);
- }
+ }
nextjsobj += js.objsize();
if ( nextjsobj >= theEnd )
nextjsobj = 0;
@@ -152,11 +151,11 @@ namespace mongo {
const Message& msg() const { return m; }
- void markSet(){
+ void markSet() {
mark = nextjsobj;
}
-
- void markReset(){
+
+ void markReset() {
nextjsobj = mark;
}
@@ -180,7 +179,7 @@ namespace mongo {
int queryOptions;
BSONObj query;
BSONObj fields;
-
+
/* parses the message into the above fields */
QueryMessage(DbMessage& d) {
ns = d.getns();
@@ -232,8 +231,7 @@ namespace mongo {
/* object reply helper. */
inline void replyToQuery(int queryResultFlags,
AbstractMessagingPort* p, Message& requestMsg,
- BSONObj& responseObj)
- {
+ BSONObj& responseObj) {
replyToQuery(queryResultFlags,
p, requestMsg,
(void *) responseObj.objdata(), responseObj.objsize(), 1);
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 0f3489580d5..7aa6148f201 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -53,14 +53,14 @@ namespace mongo {
};
bool execCommand( Command * c ,
- Client& client , int queryOptions ,
- const char *ns, BSONObj& cmdObj ,
- BSONObjBuilder& result,
+ Client& client , int queryOptions ,
+ const char *ns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
bool fromRepl );
class DbWebServer : public MiniWebServer {
public:
- DbWebServer(const string& ip, int port, const AdminAccess* webUsers)
+ DbWebServer(const string& ip, int port, const AdminAccess* webUsers)
: MiniWebServer(ip, port), _webUsers(webUsers) {
WebStatusPlugin::initAll();
}
@@ -87,26 +87,26 @@ namespace mongo {
string auth = getHeader( rq , "Authorization" );
- if ( auth.size() > 0 && auth.find( "Digest " ) == 0 ){
+ if ( auth.size() > 0 && auth.find( "Digest " ) == 0 ) {
auth = auth.substr( 7 ) + ", ";
map<string,string> parms;
pcrecpp::StringPiece input( auth );
-
+
string name, val;
pcrecpp::RE re("(\\w+)=\"?(.*?)\"?, ");
- while ( re.Consume( &input, &name, &val) ){
+ while ( re.Consume( &input, &name, &val) ) {
parms[name] = val;
}
BSONObj user = _webUsers->getAdminUser( parms["username"] );
- if ( ! user.isEmpty() ){
+ if ( ! user.isEmpty() ) {
string ha1 = user["pwd"].str();
string ha2 = md5simpledigest( (string)"GET" + ":" + parms["uri"] );
-
+
stringstream r;
r << ha1 << ':' << parms["nonce"];
- if ( parms["nc"].size() && parms["cnonce"].size() && parms["qop"].size() ){
+ if ( parms["nc"].size() && parms["cnonce"].size() && parms["qop"].size() ) {
r << ':';
r << parms["nc"];
r << ':';
@@ -117,20 +117,20 @@ namespace mongo {
r << ':';
r << ha2;
string r1 = md5simpledigest( r.str() );
-
+
if ( r1 == parms["response"] )
return true;
}
}
-
+
stringstream authHeader;
- authHeader
- << "WWW-Authenticate: "
- << "Digest realm=\"mongo\", "
- << "nonce=\"abc\", "
- << "algorithm=MD5, qop=\"auth\" "
- ;
-
+ authHeader
+ << "WWW-Authenticate: "
+ << "Digest realm=\"mongo\", "
+ << "nonce=\"abc\", "
+ << "algorithm=MD5, qop=\"auth\" "
+ ;
+
headers.push_back( authHeader.str() );
return 0;
}
@@ -143,16 +143,15 @@ namespace mongo {
int& responseCode,
vector<string>& headers, // if completely empty, content-type: text/html will be added
const SockAddr &from
- )
- {
+ ) {
if ( url.size() > 1 ) {
-
+
if ( ! allowed( rq , headers, from ) ) {
responseCode = 401;
headers.push_back( "Content-Type: text/plain" );
responseMsg = "not allowed\n";
return;
- }
+ }
{
BSONObj params;
@@ -163,16 +162,17 @@ namespace mongo {
}
DbWebHandler * handler = DbWebHandler::findHandler( url );
- if ( handler ){
- if ( handler->requiresREST( url ) && ! cmdLine.rest ){
+ if ( handler ) {
+ if ( handler->requiresREST( url ) && ! cmdLine.rest ) {
_rejectREST( responseMsg , responseCode , headers );
- }else{
+ }
+ else {
string callback = params.getStringField("jsonp");
uassert(13453, "server not started with --jsonp", callback.empty() || cmdLine.jsonp);
handler->handle( rq , url , params , responseMsg , responseCode , headers , from );
- if (responseCode == 200 && !callback.empty()){
+ if (responseCode == 200 && !callback.empty()) {
responseMsg = callback + '(' + responseMsg + ')';
}
}
@@ -185,20 +185,20 @@ namespace mongo {
_rejectREST( responseMsg , responseCode , headers );
return;
}
-
+
responseCode = 404;
headers.push_back( "Content-Type: text/html" );
responseMsg = "<html><body>unknown url</body></html>\n";
return;
}
-
+
// generate home page
- if ( ! allowed( rq , headers, from ) ){
+ if ( ! allowed( rq , headers, from ) ) {
responseCode = 401;
responseMsg = "not allowed\n";
return;
- }
+ }
responseCode = 200;
stringstream ss;
@@ -216,14 +216,14 @@ namespace mongo {
{
const map<string, Command*> *m = Command::webCommands();
if( m ) {
- ss <<
- a("",
- "These read-only context-less commands can be executed from the web interface. "
- "Results are json format, unless ?text=1 is appended in which case the result is output as text "
- "for easier human viewing",
- "Commands")
- << ": ";
- for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ ) {
+ ss <<
+ a("",
+ "These read-only context-less commands can be executed from the web interface. "
+ "Results are json format, unless ?text=1 is appended in which case the result is output as text "
+ "for easier human viewing",
+ "Commands")
+ << ": ";
+ for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ ) {
stringstream h;
i->second->help(h);
string help = h.str();
@@ -236,67 +236,67 @@ namespace mongo {
}
}
ss << '\n';
- /*
- ss << "HTTP <a "
- "title=\"click for documentation on this http interface\""
- "href=\"http://www.mongodb.org/display/DOCS/Http+Interface\">admin port</a>:" << _port << "<p>\n";
- */
+ /*
+ ss << "HTTP <a "
+ "title=\"click for documentation on this http interface\""
+ "href=\"http://www.mongodb.org/display/DOCS/Http+Interface\">admin port</a>:" << _port << "<p>\n";
+ */
doUnlockedStuff(ss);
WebStatusPlugin::runAll( ss );
-
+
ss << "</body></html>\n";
responseMsg = ss.str();
}
- void _rejectREST( string& responseMsg , int& responseCode, vector<string>& headers ){
- responseCode = 403;
- stringstream ss;
- ss << "REST is not enabled. use --rest to turn on.\n";
- ss << "check that port " << _port << " is secured for the network too.\n";
- responseMsg = ss.str();
- headers.push_back( "Content-Type: text/plain" );
+ void _rejectREST( string& responseMsg , int& responseCode, vector<string>& headers ) {
+ responseCode = 403;
+ stringstream ss;
+ ss << "REST is not enabled. use --rest to turn on.\n";
+ ss << "check that port " << _port << " is secured for the network too.\n";
+ responseMsg = ss.str();
+ headers.push_back( "Content-Type: text/plain" );
}
};
// ---
-
- bool prisort( const Prioritizable * a , const Prioritizable * b ){
+
+ bool prisort( const Prioritizable * a , const Prioritizable * b ) {
return a->priority() < b->priority();
}
// -- status framework ---
- WebStatusPlugin::WebStatusPlugin( const string& secionName , double priority , const string& subheader )
+ WebStatusPlugin::WebStatusPlugin( const string& secionName , double priority , const string& subheader )
: Prioritizable(priority), _name( secionName ) , _subHeading( subheader ) {
if ( ! _plugins )
_plugins = new vector<WebStatusPlugin*>();
_plugins->push_back( this );
}
- void WebStatusPlugin::initAll(){
+ void WebStatusPlugin::initAll() {
if ( ! _plugins )
return;
-
+
sort( _plugins->begin(), _plugins->end() , prisort );
-
+
for ( unsigned i=0; i<_plugins->size(); i++ )
(*_plugins)[i]->init();
}
- void WebStatusPlugin::runAll( stringstream& ss ){
+ void WebStatusPlugin::runAll( stringstream& ss ) {
if ( ! _plugins )
return;
-
- for ( unsigned i=0; i<_plugins->size(); i++ ){
+
+ for ( unsigned i=0; i<_plugins->size(); i++ ) {
WebStatusPlugin * p = (*_plugins)[i];
- ss << "<hr>\n"
+ ss << "<hr>\n"
<< "<b>" << p->_name << "</b>";
-
+
ss << " " << p->_subHeading;
ss << "<br>\n";
-
+
p->run(ss);
}
@@ -308,29 +308,30 @@ namespace mongo {
class LogPlugin : public WebStatusPlugin {
public:
- LogPlugin() : WebStatusPlugin( "Log" , 100 ), _log(0){
+ LogPlugin() : WebStatusPlugin( "Log" , 100 ), _log(0) {
}
-
- virtual void init(){
+
+ virtual void init() {
assert( ! _log );
_log = new RamLog();
Logstream::get().addGlobalTee( _log );
}
- virtual void run( stringstream& ss ){
+ virtual void run( stringstream& ss ) {
_log->toHTML( ss );
}
RamLog * _log;
};
-
+
LogPlugin * logPlugin = new LogPlugin();
// -- handler framework ---
DbWebHandler::DbWebHandler( const string& name , double priority , bool requiresREST )
- : Prioritizable(priority), _name(name) , _requiresREST(requiresREST){
+ : Prioritizable(priority), _name(name) , _requiresREST(requiresREST) {
- { // setup strings
+ {
+ // setup strings
_defaultUrl = "/";
_defaultUrl += name;
@@ -338,8 +339,9 @@ namespace mongo {
ss << name << " priority: " << priority << " rest: " << requiresREST;
_toString = ss.str();
}
-
- { // add to handler list
+
+ {
+ // add to handler list
if ( ! _handlers )
_handlers = new vector<DbWebHandler*>();
_handlers->push_back( this );
@@ -347,11 +349,11 @@ namespace mongo {
}
}
- DbWebHandler * DbWebHandler::findHandler( const string& url ){
+ DbWebHandler * DbWebHandler::findHandler( const string& url ) {
if ( ! _handlers )
return 0;
-
- for ( unsigned i=0; i<_handlers->size(); i++ ){
+
+ for ( unsigned i=0; i<_handlers->size(); i++ ) {
DbWebHandler * h = (*_handlers)[i];
if ( h->handles( url ) )
return h;
@@ -359,71 +361,71 @@ namespace mongo {
return 0;
}
-
+
vector<DbWebHandler*> * DbWebHandler::_handlers = 0;
// --- basic handlers ---
class FavIconHandler : public DbWebHandler {
public:
- FavIconHandler() : DbWebHandler( "favicon.ico" , 0 , false ){}
+ FavIconHandler() : DbWebHandler( "favicon.ico" , 0 , false ) {}
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ){
+ vector<string>& headers, const SockAddr &from ) {
responseCode = 404;
headers.push_back( "Content-Type: text/plain" );
responseMsg = "no favicon\n";
}
} faviconHandler;
-
+
class StatusHandler : public DbWebHandler {
public:
- StatusHandler() : DbWebHandler( "_status" , 1 , false ){}
-
+ StatusHandler() : DbWebHandler( "_status" , 1 , false ) {}
+
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ){
+ vector<string>& headers, const SockAddr &from ) {
headers.push_back( "Content-Type: application/json" );
responseCode = 200;
-
+
static vector<string> commands;
- if ( commands.size() == 0 ){
+ if ( commands.size() == 0 ) {
commands.push_back( "serverStatus" );
commands.push_back( "buildinfo" );
}
-
+
BSONObjBuilder buf(1024);
-
- for ( unsigned i=0; i<commands.size(); i++ ){
+
+ for ( unsigned i=0; i<commands.size(); i++ ) {
string cmd = commands[i];
Command * c = Command::findCommand( cmd );
assert( c );
assert( c->locktype() == 0 );
-
+
BSONObj co;
{
BSONObjBuilder b;
b.append( cmd , 1 );
-
- if ( cmd == "serverStatus" && params["repl"].type() ){
+
+ if ( cmd == "serverStatus" && params["repl"].type() ) {
b.append( "repl" , atoi( params["repl"].valuestr() ) );
}
-
+
co = b.obj();
}
-
+
string errmsg;
-
+
BSONObjBuilder sub;
if ( ! c->run( "admin.$cmd" , co , errmsg , sub , false ) )
buf.append( cmd , errmsg );
else
buf.append( cmd , sub.obj() );
}
-
+
responseMsg = buf.obj().jsonString();
}
@@ -432,14 +434,14 @@ namespace mongo {
class CommandListHandler : public DbWebHandler {
public:
- CommandListHandler() : DbWebHandler( "_commands" , 1 , true ){}
-
+ CommandListHandler() : DbWebHandler( "_commands" , 1 , true ) {}
+
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ){
+ vector<string>& headers, const SockAddr &from ) {
headers.push_back( "Content-Type: text/html" );
responseCode = 200;
-
+
stringstream ss;
ss << start("Commands List");
ss << p( a("/", "back", "Home") );
@@ -448,18 +450,18 @@ namespace mongo {
ss << "S:slave-ok R:read-lock W:write-lock A:admin-only<br>\n";
ss << table();
ss << "<tr><th>Command</th><th>Attributes</th><th>Help</th></tr>\n";
- for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ )
+ for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ )
i->second->htmlHelp(ss);
ss << _table() << _end();
-
+
responseMsg = ss.str();
}
} commandListHandler;
class CommandsHandler : public DbWebHandler {
public:
- CommandsHandler() : DbWebHandler( "DUMMY COMMANDS" , 2 , true ){}
-
+ CommandsHandler() : DbWebHandler( "DUMMY COMMANDS" , 2 , true ) {}
+
bool _cmd( const string& url , string& cmd , bool& text, bo params ) const {
cmd = str::after(url, '/');
text = params["text"].boolean();
@@ -470,25 +472,25 @@ namespace mongo {
const map<string,Command*> *m = Command::webCommands();
if( ! m )
return 0;
-
+
map<string,Command*>::const_iterator i = m->find(cmd);
if ( i == m->end() )
return 0;
-
+
return i->second;
}
- virtual bool handles( const string& url ) const {
+ virtual bool handles( const string& url ) const {
string cmd;
bool text;
if ( ! _cmd( url , cmd , text, bo() ) )
return false;
return _cmd(cmd) != 0;
}
-
+
virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ) {
+ vector<string>& headers, const SockAddr &from ) {
string cmd;
bool text = false;
assert( _cmd( url , cmd , text, params ) );
@@ -497,16 +499,16 @@ namespace mongo {
BSONObj cmdObj = BSON( cmd << 1 );
Client& client = cc();
-
+
BSONObjBuilder result;
execCommand(c, client, 0, "admin.", cmdObj , result, false);
-
+
responseCode = 200;
-
+
string j = result.done().jsonString(Strict, text );
responseMsg = j;
-
- if( text ){
+
+ if( text ) {
headers.push_back( "Content-Type: text/plain" );
responseMsg += '\n';
}
@@ -515,13 +517,13 @@ namespace mongo {
}
}
-
+
} commandsHandler;
// --- external ----
void webServerThread(const AdminAccess* adminAccess) {
- boost::scoped_ptr<const AdminAccess> adminAccessPtr(adminAccess); // adminAccess is owned here
+ boost::scoped_ptr<const AdminAccess> adminAccessPtr(adminAccess); // adminAccess is owned here
Client::initThread("websvr");
const int p = cmdLine.port + 1000;
DbWebServer mini(cmdLine.bind_ip, p, adminAccessPtr.get());
diff --git a/db/dbwebserver.h b/db/dbwebserver.h
index 4f99bbe0c39..bdbcba2c07d 100644
--- a/db/dbwebserver.h
+++ b/db/dbwebserver.h
@@ -23,16 +23,16 @@ namespace mongo {
class Prioritizable {
public:
- Prioritizable( double p ) : _priority(p){}
+ Prioritizable( double p ) : _priority(p) {}
double priority() const { return _priority; }
private:
double _priority;
};
-
+
class DbWebHandler : public Prioritizable {
public:
DbWebHandler( const string& name , double priority , bool requiresREST );
- virtual ~DbWebHandler(){}
+ virtual ~DbWebHandler() {}
virtual bool handles( const string& url ) const { return url == _defaultUrl; }
@@ -46,15 +46,15 @@ namespace mongo {
int& responseCode,
vector<string>& headers, // if completely empty, content-type: text/html will be added
const SockAddr &from
- ) = 0;
-
+ ) = 0;
+
string toString() const { return _toString; }
static DbWebHandler * findHandler( const string& url );
private:
string _name;
bool _requiresREST;
-
+
string _defaultUrl;
string _toString;
@@ -64,8 +64,8 @@ namespace mongo {
class WebStatusPlugin : public Prioritizable {
public:
WebStatusPlugin( const string& secionName , double priority , const string& subheader = "" );
- virtual ~WebStatusPlugin(){}
-
+ virtual ~WebStatusPlugin() {}
+
virtual void run( stringstream& ss ) = 0;
/** called when web server stats up */
virtual void init() = 0;
@@ -76,10 +76,10 @@ namespace mongo {
string _name;
string _subHeading;
static vector<WebStatusPlugin*> * _plugins;
-
+
};
void webServerThread( const AdminAccess* admins );
string prettyHostName();
-
+
};
diff --git a/db/diskloc.h b/db/diskloc.h
index 4e887001c29..f356c73c64b 100644
--- a/db/diskloc.h
+++ b/db/diskloc.h
@@ -33,22 +33,22 @@ namespace mongo {
class MongoDataFile;
#pragma pack(1)
- /** represents a disk location/offset on disk in a database. 64 bits.
- it is assumed these will be passed around by value a lot so don't do anything to make them large
- (such as adding a virtual function)
- */
+ /** represents a disk location/offset on disk in a database. 64 bits.
+ it is assumed these will be passed around by value a lot so don't do anything to make them large
+ (such as adding a virtual function)
+ */
class DiskLoc {
int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
int ofs;
public:
- enum SentinelValues {
+ enum SentinelValues {
NullOfs = -1,
MaxFiles=16000 // thus a limit of about 32TB of data per db
- };
+ };
- DiskLoc(int a, int b) : _a(a), ofs(b) { }
+ DiskLoc(int a, int b) : _a(a), ofs(b) { }
DiskLoc() { Null(); }
DiskLoc(const DiskLoc& l) {
_a=l._a;
@@ -68,7 +68,7 @@ namespace mongo {
}
void assertOk() { assert(!isNull()); }
void setInvalid() {
- _a = -2;
+ _a = -2;
ofs = 0;
}
bool isValid() const { return _a != -2; }
diff --git a/db/driverHelpers.cpp b/db/driverHelpers.cpp
index ce89ca475af..d98a33b25c5 100644
--- a/db/driverHelpers.cpp
+++ b/db/driverHelpers.cpp
@@ -36,18 +36,18 @@ namespace mongo {
class BasicDriverHelper : public Command {
public:
- BasicDriverHelper( const char * name ) : Command( name ){}
-
+ BasicDriverHelper( const char * name ) : Command( name ) {}
+
virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
- virtual bool slaveOverrideOk(){ return true; }
+ virtual bool slaveOverrideOk() { return true; }
};
class ObjectIdTest : public BasicDriverHelper {
public:
- ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ){}
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
- if ( cmdObj.firstElement().type() != jstOID ){
+ ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ) {}
+ virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( cmdObj.firstElement().type() != jstOID ) {
errmsg = "not oid";
return false;
}
diff --git a/db/dur.cpp b/db/dur.cpp
index a8b451ba5a4..ca8b0d78048 100644
--- a/db/dur.cpp
+++ b/db/dur.cpp
@@ -16,10 +16,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
+/*
phases
- PREPLOGBUFFER
+ PREPLOGBUFFER
we will build an output buffer ourself and then use O_DIRECT
we could be in read lock for this
for very large objects write directly to redo log in situ?
@@ -30,10 +30,10 @@
WRITETODATAFILES
apply the writes back to the non-private MMF after they are for certain in redo log
REMAPPRIVATEVIEW
- we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
- remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
+ we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
+ remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
to be too frequent. tracking time for this step would be wise.
- there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
+ there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
be required. so doing these remaps more incrementally in the future might make sense - but have to be careful
not to introduce bugs.
@@ -54,7 +54,7 @@
using namespace mongoutils;
-namespace mongo {
+namespace mongo {
namespace dur {
@@ -64,7 +64,7 @@ namespace mongo {
/** declared later in this file
only used in this file -- use DurableInterface::commitNow() outside
*/
- static void groupCommit();
+ static void groupCommit();
CommitJob commitJob;
@@ -74,41 +74,41 @@ namespace mongo {
memset(this, 0, sizeof(*this));
}
- Stats::Stats() {
+ Stats::Stats() {
_a.reset();
_b.reset();
curr = &_a;
_intervalMicros = 3000000;
}
- Stats::S * Stats::other() {
+ Stats::S * Stats::other() {
return curr == &_a ? &_b : &_a;
}
- BSONObj Stats::S::_asObj() {
+ BSONObj Stats::S::_asObj() {
return BSON(
- "commits" << _commits <<
- "journaledMB" << _journaledBytes / 1000000.0 <<
- "writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0 <<
- "commitsInWriteLock" << _commitsInWriteLock <<
- "timeMs" <<
- BSON( "dt" << _dtMillis <<
- "prepLogBuffer" << (unsigned) (_prepLogBufferMicros/1000) <<
- "writeToJournal" << (unsigned) (_writeToJournalMicros/1000) <<
- "writeToDataFiles" << (unsigned) (_writeToDataFilesMicros/1000) <<
- "remapPrivateView" << (unsigned) (_remapPrivateViewMicros/1000)
- )
- );
- }
-
- BSONObj Stats::asObj() {
+ "commits" << _commits <<
+ "journaledMB" << _journaledBytes / 1000000.0 <<
+ "writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0 <<
+ "commitsInWriteLock" << _commitsInWriteLock <<
+ "timeMs" <<
+ BSON( "dt" << _dtMillis <<
+ "prepLogBuffer" << (unsigned) (_prepLogBufferMicros/1000) <<
+ "writeToJournal" << (unsigned) (_writeToJournalMicros/1000) <<
+ "writeToDataFiles" << (unsigned) (_writeToDataFilesMicros/1000) <<
+ "remapPrivateView" << (unsigned) (_remapPrivateViewMicros/1000)
+ )
+ );
+ }
+
+ BSONObj Stats::asObj() {
return other()->_asObj();
}
void Stats::rotate() {
unsigned long long now = curTimeMicros64();
unsigned long long dt = now - _lastRotate;
- if( dt >= _intervalMicros && _intervalMicros ) {
+ if( dt >= _intervalMicros && _intervalMicros ) {
// rotate
curr->_dtMillis = (unsigned) (dt/1000);
_lastRotate = now;
@@ -116,17 +116,17 @@ namespace mongo {
}
}
- void NonDurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
+ void NonDurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
memcpy(dst, src, len);
}
- void DurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
+ void DurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
// for now, journalled
memcpy( writingPtr(dst, len), src, len );
/* todo before doing this:
- finish implementation of _switchToReachableView
- - performance test it. privateViews.find() uses a mutex, so that could make
+ - performance test it. privateViews.find() uses a mutex, so that could make
it slow.
*/
/*
@@ -166,15 +166,15 @@ namespace mongo {
TempDisableDurability::TempDisableDurability() : _wasDur(cmdLine.dur) {
dbMutex.assertWriteLocked();
- if (_wasDur){
+ if (_wasDur) {
DurableInterface::disableDurability();
cmdLine.dur = false;
}
}
- TempDisableDurability::~TempDisableDurability(){
+ TempDisableDurability::~TempDisableDurability() {
dbMutex.assertWriteLocked();
- if (_wasDur){
+ if (_wasDur) {
cmdLine.dur = true;
DurableInterface::enableDurability();
}
@@ -185,23 +185,23 @@ namespace mongo {
return true;
}
- bool DurableImpl::awaitCommit() {
+ bool DurableImpl::awaitCommit() {
commitJob.awaitNextCommit();
return true;
}
- /** Declare that a file has been created
- Normally writes are applied only after journalling, for safety. But here the file
- is created first, and the journal will just replay the creation if the create didn't
+ /** Declare that a file has been created
+ Normally writes are applied only after journalling, for safety. But here the file
+ is created first, and the journal will just replay the creation if the create didn't
happen because of crashing.
*/
- void DurableImpl::createdFile(string filename, unsigned long long len) {
+ void DurableImpl::createdFile(string filename, unsigned long long len) {
shared_ptr<DurOp> op( new FileCreatedOp(filename, len) );
commitJob.noteOp(op);
}
/** indicate that a database is about to be dropped. call before the actual drop. */
- void DurableImpl::droppingDb(string db) {
+ void DurableImpl::droppingDb(string db) {
shared_ptr<DurOp> op( new DropDbOp(db) );
// DropDbOp must be in a commit group by itself to ensure proper
@@ -211,7 +211,7 @@ namespace mongo {
groupCommit();
}
- void* DurableImpl::writingPtr(void *x, unsigned len) {
+ void* DurableImpl::writingPtr(void *x, unsigned len) {
void *p = x;
if( testIntent )
p = MongoMMF::switchToPrivateView(x);
@@ -231,13 +231,13 @@ namespace mongo {
declareWriteIntent(p+ofs, len);
return p;
}
-
+
void* DurableImpl::writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges ) {
char *p = (char *) buf;
if( testIntent )
p = (char *) MongoMMF::switchToPrivateView(buf);
for( vector< pair< long long, unsigned > >::const_iterator i = ranges.begin();
- i != ranges.end(); ++i ) {
+ i != ranges.end(); ++i ) {
declareWriteIntent( p + i->first, i->second );
}
return p;
@@ -257,7 +257,7 @@ namespace mongo {
SLOW
*/
#if 0
- void DurableImpl::debugCheckLastDeclaredWrite() {
+ void DurableImpl::debugCheckLastDeclaredWrite() {
if( testIntent )
return;
@@ -270,21 +270,21 @@ namespace mongo {
const WriteIntent &i = commitJob.lastWrite();
size_t ofs;
MongoMMF *mmf = privateViews.find(i.start(), ofs);
- if( mmf == 0 )
+ if( mmf == 0 )
return;
size_t past = ofs + i.length();
- if( mmf->length() < past + 8 )
+ if( mmf->length() < past + 8 )
return; // too close to end of view
char *priv = (char *) mmf->getView();
char *writ = (char *) mmf->view_write();
unsigned long long *a = (unsigned long long *) (priv+past);
unsigned long long *b = (unsigned long long *) (writ+past);
- if( *a != *b ) {
- for( set<WriteIntent>::iterator it(commitJob.writes().begin()), end((commitJob.writes().begin())); it != end; ++it ) {
+ if( *a != *b ) {
+ for( set<WriteIntent>::iterator it(commitJob.writes().begin()), end((commitJob.writes().begin())); it != end; ++it ) {
const WriteIntent& wi = *it;
char *r1 = (char*) wi.start();
char *r2 = (char*) wi.end();
- if( r1 <= (((char*)a)+8) && r2 > (char*)a ) {
+ if( r1 <= (((char*)a)+8) && r2 > (char*)a ) {
//log() << "it's ok " << wi.p << ' ' << wi.len << endl;
return;
}
@@ -301,7 +301,7 @@ namespace mongo {
/** write the buffer we have built to the journal and fsync it.
outside of lock as that could be slow.
*/
- static void WRITETOJOURNAL(AlignedBuilder& ab) {
+ static void WRITETOJOURNAL(AlignedBuilder& ab) {
Timer t;
journal(ab);
stats.curr->_writeToJournalMicros += t.micros();
@@ -312,8 +312,8 @@ namespace mongo {
class validateSingleMapMatches {
public:
validateSingleMapMatches(unsigned long long& bytes) :_bytes(bytes) {}
- void operator () (MongoFile *mf){
- if( mf->isMongoMMF() ) {
+ void operator () (MongoFile *mf) {
+ if( mf->isMongoMMF() ) {
MongoMMF *mmf = (MongoMMF*) mf;
const char *p = (const char *) mmf->getView();
const char *w = (const char *) mmf->view_write();
@@ -335,8 +335,8 @@ namespace mongo {
int logged = 0;
unsigned lastMismatch = 0xffffffff;
for( unsigned i = 0; i < mmf->length(); i++ ) {
- if( p[i] != w[i] ) {
- if( lastMismatch != 0xffffffff && lastMismatch+1 != i )
+ if( p[i] != w[i] ) {
+ if( lastMismatch != 0xffffffff && lastMismatch+1 != i )
log() << endl; // separate blocks of mismatches
lastMismatch= i;
if( ++logged < 60 ) {
@@ -352,7 +352,7 @@ namespace mongo {
if( i > high ) high = i;
}
}
- if( low != 0xffffffff ) {
+ if( low != 0xffffffff ) {
std::stringstream ss;
ss << "dur error warning views mismatch " << mmf->filename() << ' ' << (hex) << low << ".." << high << " len:" << high-low+1;
log() << ss.str() << endl;
@@ -364,7 +364,7 @@ namespace mongo {
}
}
}
- private:
+ private:
unsigned long long& _bytes;
};
@@ -372,7 +372,7 @@ namespace mongo {
*/
void debugValidateAllMapsMatch() {
if( ! (cmdLine.durOptions & CmdLine::DurParanoid) )
- return;
+ return;
unsigned long long bytes = 0;
Timer t;
@@ -383,7 +383,7 @@ namespace mongo {
/** We need to remap the private views periodically. otherwise they would become very large.
Call within write lock.
*/
- void _REMAPPRIVATEVIEW() {
+ void _REMAPPRIVATEVIEW() {
static unsigned startAt;
static unsigned long long lastRemap;
@@ -391,14 +391,14 @@ namespace mongo {
dbMutex._remapPrivateViewRequested = false;
assert( !commitJob.hasWritten() );
- if( 0 ) {
+ if( 0 ) {
log() << "TEMP remapprivateview disabled for testing - will eventually run oom in this mode if db bigger than ram" << endl;
return;
}
- // we want to remap all private views about every 2 seconds. there could be ~1000 views so
- // we do a little each pass; beyond the remap time, more significantly, there will be copy on write
- // faults after remapping, so doing a little bit at a time will avoid big load spikes on
+ // we want to remap all private views about every 2 seconds. there could be ~1000 views so
+ // we do a little each pass; beyond the remap time, more significantly, there will be copy on write
+ // faults after remapping, so doing a little bit at a time will avoid big load spikes on
// remapping.
unsigned long long now = curTimeMicros64();
double fraction = (now-lastRemap)/20000000.0;
@@ -406,7 +406,7 @@ namespace mongo {
rwlock lk(MongoFile::mmmutex, false);
set<MongoFile*>& files = MongoFile::getAllFiles();
unsigned sz = files.size();
- if( sz == 0 )
+ if( sz == 0 )
return;
unsigned ntodo = (unsigned) (sz * fraction);
@@ -437,13 +437,13 @@ namespace mongo {
}
}
}
- void REMAPPRIVATEVIEW() {
+ void REMAPPRIVATEVIEW() {
Timer t;
_REMAPPRIVATEVIEW();
stats.curr->_remapPrivateViewMicros += t.micros();
}
- void drainSome() {
+ void drainSome() {
Writes& writes = commitJob.wi();
writes._deferred.invoke();
}
@@ -461,7 +461,7 @@ namespace mongo {
WRITETOJOURNAL(commitJob._ab);
- // data is now in the journal, which is sufficient for acknowledging getLastError.
+ // data is now in the journal, which is sufficient for acknowledging getLastError.
// (ok to crash after that)
commitJob.notifyCommitted();
@@ -470,28 +470,28 @@ namespace mongo {
commitJob.reset();
// REMAPPRIVATEVIEW
- //
- // remapping private views must occur after WRITETODATAFILES otherwise
+ //
+ // remapping private views must occur after WRITETODATAFILES otherwise
// we wouldn't see newly written data on reads.
- //
+ //
DEV assert( !commitJob.hasWritten() );
- if( !dbMutex.isWriteLocked() ) {
- // this needs done in a write lock thus we do it on the next acquisition of that
- // instead of here (there is no rush if you aren't writing anyway -- but it must happen,
+ if( !dbMutex.isWriteLocked() ) {
+ // this needs done in a write lock thus we do it on the next acquisition of that
+ // instead of here (there is no rush if you aren't writing anyway -- but it must happen,
// if it is done, before any uncommitted writes occur).
//
dbMutex._remapPrivateViewRequested = true;
}
- else {
+ else {
stats.curr->_commitsInWriteLock++;
- // however, if we are already write locked, we must do it now -- up the call tree someone
+ // however, if we are already write locked, we must do it now -- up the call tree someone
// may do a write without a new lock acquisition. this can happen when MongoMMF::close() calls
// this method when a file (and its views) is about to go away.
//
REMAPPRIVATEVIEW();
}
}
- /** locking in read lock when called
+ /** locking in read lock when called
@see MongoMMF::close()
*/
static void groupCommit() {
@@ -517,35 +517,35 @@ namespace mongo {
}
}
- // starvation on read locks could occur. so if read lock acquisition is slow, try to get a
+ // starvation on read locks could occur. so if read lock acquisition is slow, try to get a
// write lock instead. otherwise writes could use too much RAM.
writelock lk;
groupCommit();
}
- /** called when a MongoMMF is closing -- we need to go ahead and group commit in that case before its
- views disappear
+ /** called when a MongoMMF is closing -- we need to go ahead and group commit in that case before its
+ views disappear
*/
void closingFileNotification() {
if( dbMutex.atLeastReadLocked() ) {
- groupCommit();
+ groupCommit();
- if (!inShutdown()){
+ if (!inShutdown()) {
RecoveryJob::get().close();
}
}
else {
assert( inShutdown() );
- if( commitJob.hasWritten() ) {
+ if( commitJob.hasWritten() ) {
log() << "dur warning files are closing outside locks with writes pending" << endl;
}
}
}
- static void durThread() {
+ static void durThread() {
Client::initThread("dur");
const int HowOftenToGroupCommitMs = 100;
- while( 1 ) {
+ while( 1 ) {
try {
int millis = HowOftenToGroupCommitMs;
{
@@ -566,7 +566,7 @@ namespace mongo {
go();
stats.rotate();
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "exception in durThread causing immediate shutdown: " << e.what() << endl;
abort(); // based on myTerminate()
}
@@ -599,7 +599,7 @@ namespace mongo {
try {
recover();
}
- catch(...) {
+ catch(...) {
log() << "exception during recovery" << endl;
throw;
}
@@ -607,5 +607,5 @@ namespace mongo {
}
} // namespace dur
-
+
} // namespace mongo
diff --git a/db/dur.h b/db/dur.h
index 88a732d0d72..caed4457401 100644
--- a/db/dur.h
+++ b/db/dur.h
@@ -8,10 +8,10 @@
namespace mongo {
class NamespaceDetails;
-
+
namespace dur {
- /** Call during startup so durability module can initialize
+ /** Call during startup so durability module can initialize
Throws if fatal error
Does nothing if cmdLine.dur is false
*/
@@ -25,13 +25,13 @@ namespace mongo {
const bool _wasDur;
};
- class DurableInterface : boost::noncopyable {
+ class DurableInterface : boost::noncopyable {
public:
virtual ~DurableInterface() { log() << "ERROR warning ~DurableInterface not intended to be called" << endl; }
- /** Declare that a file has been created
- Normally writes are applied only after journalling, for safety. But here the file
- is created first, and the journal will just replay the creation if the create didn't
+ /** Declare that a file has been created
+ Normally writes are applied only after journalling, for safety. But here the file
+ is created first, and the journal will just replay the creation if the create didn't
happen because of crashing.
*/
virtual void createdFile(string filename, unsigned long long len) = 0;
@@ -40,20 +40,20 @@ namespace mongo {
virtual void droppingDb(string db) = 0;
/** Declarations of write intent.
-
- Use these methods to declare "i'm about to write to x and it should be logged for redo."
-
+
+ Use these methods to declare "i'm about to write to x and it should be logged for redo."
+
Failure to call writing...() is checked in _DEBUG mode by using a read only mapped view
- (i.e., you'll segfault if the code is covered in that situation). The _DEBUG check doesn't
+ (i.e., you'll segfault if the code is covered in that situation). The _DEBUG check doesn't
verify that your length is correct though.
*/
- /** declare intent to write to x for up to len
+ /** declare intent to write to x for up to len
@return pointer where to write. this is modified when testIntent is true.
*/
virtual void* writingPtr(void *x, unsigned len) = 0;
- /** declare write intent; should already be in the write view to work correctly when testIntent is true.
+ /** declare write intent; should already be in the write view to work correctly when testIntent is true.
if you aren't, use writingPtr() instead.
*/
virtual void declareWriteIntent(void *x, unsigned len) = 0;
@@ -64,7 +64,7 @@ namespace mongo {
@return new buffer pointer. this is modified when testIntent is true.
*/
virtual void* writingAtOffset(void *buf, unsigned ofs, unsigned len) = 0;
-
+
/** declare intent to write
@param ranges vector of pairs representing ranges. Each pair
comprises an offset from buf where a range begins, then the
@@ -72,8 +72,8 @@ namespace mongo {
@return new buffer pointer. this is modified when testIntent is true.
*/
virtual void* writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges ) = 0;
-
- /** Wait for acknowledgement of the next group commit.
+
+ /** Wait for acknowledgement of the next group commit.
@return true if --dur is on. There will be delay.
@return false if --dur is off.
*/
@@ -81,12 +81,12 @@ namespace mongo {
/** Commit immediately.
- Generally, you do not want to do this often, as highly granular committing may affect
+ Generally, you do not want to do this often, as highly granular committing may affect
performance.
-
+
Does not return until the commit is complete.
- You must be at least read locked when you call this. Ideally, you are not write locked
+ You must be at least read locked when you call this. Ideally, you are not write locked
and then read operations can occur concurrently.
@return true if --dur is on.
@@ -114,18 +114,18 @@ namespace mongo {
*/
template <typename T>
inline
- T* alreadyDeclared(T *x) {
+ T* alreadyDeclared(T *x) {
#if defined(_TESTINTENT)
return (T*) MongoMMF::switchToPrivateView(x);
#else
- return x;
+ return x;
#endif
}
/** declare intent to write to x for sizeof(*x) */
- template <typename T>
- inline
- T* writing(T *x) {
+ template <typename T>
+ inline
+ T* writing(T *x) {
return (T*) writingPtr(x, sizeof(T));
}
@@ -136,15 +136,15 @@ namespace mongo {
virtual void setNoJournal(void *dst, void *src, unsigned len) = 0;
/* assert that we have not (at least so far) declared write intent for p */
- inline void assertReading(void *p) {
- dassert( !testIntent || MongoMMF::switchToPrivateView(p) != p );
+ inline void assertReading(void *p) {
+ dassert( !testIntent || MongoMMF::switchToPrivateView(p) != p );
}
static DurableInterface& getDur() { return *_impl; }
private:
/** Intentionally unimplemented method.
- It's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
+ It's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
This will override the templated version and yield an unresolved external.
*/
Record* writing(Record* r);
@@ -152,7 +152,7 @@ namespace mongo {
BtreeBucket* writing( BtreeBucket* );
/** Intentionally unimplemented method. NamespaceDetails may be based on references to 'Extra' objects. */
NamespaceDetails* writing( NamespaceDetails* );
-
+
static DurableInterface* _impl; // NonDurableImpl at startup()
static void enableDurability(); // makes _impl a DurableImpl
static void disableDurability(); // makes _impl a NonDurableImpl
diff --git a/db/dur_commitjob.cpp b/db/dur_commitjob.cpp
index 007e04c07c2..1f9f729c10b 100644
--- a/db/dur_commitjob.cpp
+++ b/db/dur_commitjob.cpp
@@ -23,11 +23,11 @@
namespace mongo {
namespace dur {
- void Writes::D::go(const Writes::D& d) {
+ void Writes::D::go(const Writes::D& d) {
commitJob.wi()._insertWriteIntent(d.p, d.len);
}
- void WriteIntent::absorb(const WriteIntent& other){
+ void WriteIntent::absorb(const WriteIntent& other) {
dassert(overlaps(other));
void* newStart = min(start(), other.start());
@@ -37,7 +37,7 @@ namespace mongo {
dassert(contains(other));
}
- void Writes::clear() {
+ void Writes::clear() {
dbMutex.assertAtLeastReadLocked();
_alreadyNoted.clear();
@@ -51,7 +51,7 @@ namespace mongo {
}
#if defined(DEBUG_WRITE_INTENT)
- void assertAlreadyDeclared(void *p, int len) {
+ void assertAlreadyDeclared(void *p, int len) {
if( commitJob.wi()._debug[p] >= len )
return;
log() << "assertAlreadyDeclared fails " << (void*)p << " len:" << len << ' ' << commitJob.wi()._debug[p] << endl;
@@ -60,10 +60,10 @@ namespace mongo {
}
#endif
- void Writes::_insertWriteIntent(void* p, int len){
+ void Writes::_insertWriteIntent(void* p, int len) {
WriteIntent wi(p, len);
- if (_writes.empty()){
+ if (_writes.empty()) {
_writes.insert(wi);
return;
}
@@ -74,8 +74,7 @@ namespace mongo {
// closest.end() >= wi.end()
if ((closest != _writes.end() && closest->overlaps(wi)) || // high end
- (closest != _writes.begin() && (--closest)->overlaps(wi))) // low end
- {
+ (closest != _writes.begin() && (--closest)->overlaps(wi))) { // low end
if (closest->contains(wi))
return; // nothing to do
@@ -87,7 +86,7 @@ namespace mongo {
if (!begin->overlaps(wi)) ++begin; // make inclusive
DEV { // ensure we're not deleting anything we shouldn't
- for (iterator it(begin); it != end; ++it){
+ for (iterator it(begin); it != end; ++it) {
assert(wi.contains(*it));
}
}
@@ -96,7 +95,7 @@ namespace mongo {
_writes.insert(wi);
DEV { // ensure there are no overlaps
- for (iterator it(_writes.begin()), end(boost::prior(_writes.end())); it != end; ++it){
+ for (iterator it(_writes.begin()), end(boost::prior(_writes.end())); it != end; ++it) {
assert(!it->overlaps(*boost::next(it)));
}
}
@@ -118,7 +117,7 @@ namespace mongo {
_wi._ops.push_back(p);
}
- void CommitJob::reset() {
+ void CommitJob::reset() {
_hasWritten = false;
_wi.clear();
_ab.reset();
diff --git a/db/dur_commitjob.h b/db/dur_commitjob.h
index 77523a08a90..fa49fcfd5eb 100644
--- a/db/dur_commitjob.h
+++ b/db/dur_commitjob.h
@@ -29,7 +29,7 @@
//#define DEBUG_WRITE_INTENT 1
-namespace mongo {
+namespace mongo {
namespace dur {
/** declaration of an intent to write to a region of a memory mapped view
@@ -37,7 +37,7 @@ namespace mongo {
* We store the end rather than the start pointer to make operator< faster
* since that is heavily used in set lookup.
*/
- struct WriteIntent /* copyable */ {
+ struct WriteIntent { /* copyable */
WriteIntent() : w_ptr(0), p(0) { }
WriteIntent(void *a, unsigned b) : w_ptr(0), p((char*)a+b), len(b) { }
@@ -65,7 +65,7 @@ namespace mongo {
}
mutable void *w_ptr; // writable mapping of p.
- // mutable because set::iterator is const but this isn't used in op<
+ // mutable because set::iterator is const but this isn't used in op<
#if defined(_EXPERIMENTAL)
mutable unsigned ofsInJournalBuffer;
#endif
@@ -74,7 +74,7 @@ namespace mongo {
unsigned len; // up to this len
};
- /** try to remember things we have already marked for journalling. false negatives are ok if infrequent -
+ /** try to remember things we have already marked for journalling. false negatives are ok if infrequent -
we will just log them twice.
*/
template<int Prime>
@@ -90,7 +90,7 @@ namespace mongo {
bool checkAndSet(void* p, int len) {
unsigned x = mongoutils::hashPointer(p);
pair<void*, int> nd = nodes[x % N];
- if( nd.first == p ) {
+ if( nd.first == p ) {
if( nd.second < len ) {
nd.second = len;
return false; // haven't indicated this len yet
@@ -110,7 +110,7 @@ namespace mongo {
/** our record of pending/uncommitted write intents */
class Writes : boost::noncopyable {
struct D {
- void *p;
+ void *p;
unsigned len;
static void go(const D& d);
};
@@ -125,9 +125,9 @@ namespace mongo {
void clear();
/** merges into set (ie non-deferred version) */
- void _insertWriteIntent(void* p, int len);
+ void _insertWriteIntent(void* p, int len);
- void insertWriteIntent(void* p, int len) {
+ void insertWriteIntent(void* p, int len) {
#if defined(DEBUG_WRITE_INTENT)
if( _debug[p] < len )
_debug[p] = len;
@@ -135,7 +135,7 @@ namespace mongo {
D d;
d.p = p;
d.len = len;
- _deferred.defer(d);
+ _deferred.defer(d);
}
#ifdef _DEBUG
@@ -158,7 +158,7 @@ namespace mongo {
for example note() invocations are from the write lock.
other uses are in a read lock from a single thread (durThread)
*/
- class CommitJob : boost::noncopyable {
+ class CommitJob : boost::noncopyable {
public:
AlignedBuilder _ab; // for direct i/o writes to journal
@@ -170,19 +170,19 @@ namespace mongo {
/** note an operation other than a "basic write" */
void noteOp(shared_ptr<DurOp> p);
- set<WriteIntent>& writes() {
+ set<WriteIntent>& writes() {
if( !_wi._drained ) {
// generally, you don't want to use the set until it is prepared (after deferred ops are applied)
// thus this assert here.
- assert(false);
+ assert(false);
}
- return _wi._writes;
+ return _wi._writes;
}
vector< shared_ptr<DurOp> >& ops() { return _wi._ops; }
- /** this method is safe to call outside of locks. when haswritten is false we don't do any group commit and avoid even
- trying to acquire a lock, which might be helpful at times.
+ /** this method is safe to call outside of locks. when haswritten is false we don't do any group commit and avoid even
+ trying to acquire a lock, which might be helpful at times.
*/
bool hasWritten() const { return _hasWritten; }
@@ -192,10 +192,10 @@ namespace mongo {
/** the commit code calls this when data reaches the journal (on disk) */
void notifyCommitted() { _notify.notifyAll(); }
- /** Wait until the next group commit occurs. That is, wait until someone calls notifyCommitted. */
- void awaitNextCommit() {
+ /** Wait until the next group commit occurs. That is, wait until someone calls notifyCommitted. */
+ void awaitNextCommit() {
if( hasWritten() )
- _notify.wait();
+ _notify.wait();
}
/** we check how much written and if it is getting to be a lot, we commit sooner. */
@@ -217,8 +217,8 @@ namespace mongo {
// inlines
inline void CommitJob::note(void* p, int len) {
- // from the point of view of the dur module, it would be fine (i think) to only
- // be read locked here. but must be at least read locked to avoid race with
+ // from the point of view of the dur module, it would be fine (i think) to only
+ // be read locked here. but must be at least read locked to avoid race with
// remapprivateview
DEV dbMutex.assertWriteLocked();
dassert( cmdLine.dur );
@@ -232,25 +232,25 @@ namespace mongo {
}
/** tips for debugging:
- if you have an incorrect diff between data files in different folders
+ if you have an incorrect diff between data files in different folders
(see jstests/dur/quick.js for example),
- turn this on and see what is logged. if you have a copy of its output from before the
+ turn this on and see what is logged. if you have a copy of its output from before the
regression, a simple diff of these lines would tell you a lot likely.
*/
#if 0 && defined(_DEBUG)
- {
+ {
static int n;
- if( ++n < 10000 ) {
+ if( ++n < 10000 ) {
size_t ofs;
MongoMMF *mmf = privateViews._find(w.p, ofs);
if( mmf ) {
log() << "DEBUG note write intent " << w.p << ' ' << mmf->filename() << " ofs:" << hex << ofs << " len:" << w.len << endl;
}
- else {
+ else {
log() << "DEBUG note write intent " << w.p << ' ' << w.len << " NOT FOUND IN privateViews" << endl;
}
}
- else if( n == 10000 ) {
+ else if( n == 10000 ) {
log() << "DEBUG stopping write intent logging, too much to log" << endl;
}
}
diff --git a/db/dur_journal.cpp b/db/dur_journal.cpp
index 0c2f4f2cfe3..c761436ee3f 100644
--- a/db/dur_journal.cpp
+++ b/db/dur_journal.cpp
@@ -45,29 +45,29 @@ namespace mongo {
BOOST_STATIC_ASSERT( sizeof(JEntry) == 12 );
BOOST_STATIC_ASSERT( sizeof(LSNFile) == 88 );
- filesystem::path getJournalDir() {
+ filesystem::path getJournalDir() {
filesystem::path p(dbpath);
p /= "journal";
return p;
}
- path lsnPath() {
+ path lsnPath() {
return getJournalDir()/"lsn";
}
/** this should be called when something really bad happens so that we can flag appropriately
*/
- void journalingFailure(const char *msg) {
+ void journalingFailure(const char *msg) {
/** todo:
(1) don't log too much
- (2) make an indicator in the journal dir that something bad happened.
+ (2) make an indicator in the journal dir that something bad happened.
(2b) refuse to do a recovery startup if that is there without manual override.
- */
+ */
log() << "journaling error " << msg << endl;
assert(false);
}
- JHeader::JHeader(string fname) {
+ JHeader::JHeader(string fname) {
magic[0] = 'j'; magic[1] = '\n';
_version = CurrentVersion;
memset(ts, 0, sizeof(ts));
@@ -85,24 +85,23 @@ namespace mongo {
const unsigned long long LsnShutdownSentinel = ~((unsigned long long)0);
- Journal::Journal() :
- _curLogFileMutex("JournalLfMutex")
- {
+ Journal::Journal() :
+ _curLogFileMutex("JournalLfMutex") {
_written = 0;
_nextFileNumber = 0;
- _curLogFile = 0;
+ _curLogFile = 0;
_preFlushTime = 0;
_lastFlushTime = 0;
_writeToLSNNeeded = false;
}
- path Journal::getFilePathFor(int filenumber) const {
+ path Journal::getFilePathFor(int filenumber) const {
filesystem::path p(dir);
p /= string(str::stream() << "j._" << filenumber);
return p;
}
- bool Journal::tryToCloseCurJournalFile() {
+ bool Journal::tryToCloseCurJournalFile() {
mutex::try_lock lk(_curLogFileMutex, 2000);
if( lk.ok ) {
closeCurrentJournalFile();
@@ -110,14 +109,14 @@ namespace mongo {
return lk.ok;
}
- /** never throws
+ /** never throws
@return true if journal dir is not empty
*/
- bool haveJournalFiles() {
+ bool haveJournalFiles() {
try {
for ( boost::filesystem::directory_iterator i( getJournalDir() );
- i != boost::filesystem::directory_iterator();
- ++i ) {
+ i != boost::filesystem::directory_iterator();
+ ++i ) {
string fileName = boost::filesystem::path(*i).leaf();
if( str::startsWith(fileName, "j._") )
return true;
@@ -126,14 +125,14 @@ namespace mongo {
catch(...) { }
return false;
}
-
+
/** throws */
- void removeJournalFiles() {
+ void removeJournalFiles() {
log() << "removeJournalFiles" << endl;
try {
for ( boost::filesystem::directory_iterator i( getJournalDir() );
- i != boost::filesystem::directory_iterator();
- ++i ) {
+ i != boost::filesystem::directory_iterator();
+ ++i ) {
string fileName = boost::filesystem::path(*i).leaf();
if( str::startsWith(fileName, "j._") ) {
try {
@@ -148,12 +147,12 @@ namespace mongo {
try {
boost::filesystem::remove(lsnPath());
}
- catch(...) {
+ catch(...) {
log() << "couldn't remove " << lsnPath().string() << endl;
throw;
}
}
- catch( std::exception& e ) {
+ catch( std::exception& e ) {
log() << "error removing journal files " << e.what() << endl;
throw;
}
@@ -162,17 +161,17 @@ namespace mongo {
/** at clean shutdown */
bool okToCleanUp = false; // failed recovery would set this to false
- void journalCleanupAtShutdown() {
- if( testIntent )
+ void journalCleanupAtShutdown() {
+ if( testIntent )
return;
- if( !okToCleanUp )
+ if( !okToCleanUp )
return;
if( !j.tryToCloseCurJournalFile() ) {
return;
}
- try {
- removeJournalFiles();
+ try {
+ removeJournalFiles();
}
catch(std::exception& e) {
log() << "error couldn't remove journal file during shutdown " << e.what() << endl;
@@ -191,12 +190,12 @@ namespace mongo {
try {
create_directory(j.dir);
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "error creating directory " << j.dir << ' ' << e.what() << endl;
throw;
}
}
- }
+ }
void Journal::_open() {
assert( _curLogFile == 0 );
@@ -223,12 +222,12 @@ namespace mongo {
_open();
}
- void LSNFile::set(unsigned long long x) {
+ void LSNFile::set(unsigned long long x) {
lsn = x;
checkbytes = ~x;
}
- /** logs details of the situation, and returns 0, if anything surprising in the LSNFile
+ /** logs details of the situation, and returns 0, if anything surprising in the LSNFile
if something highly surprising, throws to abort
*/
unsigned long long LSNFile::get() {
@@ -243,19 +242,19 @@ namespace mongo {
/** called during recovery (the error message text below assumes that)
*/
unsigned long long journalReadLSN() {
- if( !debug ) {
+ if( !debug ) {
// in nondebug build, for now, be conservative until more tests written, and apply the whole journal.
// however we will still write the lsn file to exercise that code, and use in _DEBUG build.
return 0;
}
- if( !MemoryMappedFile::exists(lsnPath()) ) {
+ if( !MemoryMappedFile::exists(lsnPath()) ) {
log() << "info no lsn file in journal/ directory" << endl;
return 0;
}
try {
- // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
// however, given we actually close the file when writing, that seems unlikely.
MemoryMappedFile f;
LSNFile *L = static_cast<LSNFile*>(f.map(lsnPath().string().c_str()));
@@ -263,13 +262,13 @@ namespace mongo {
unsigned long long lsn = L->get();
return lsn;
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
uasserted(13611, str::stream() << "can't read lsn file in journal directory : " << e.what());
}
return 0;
}
- /** remember "last sequence number" to speed recoveries
+ /** remember "last sequence number" to speed recoveries
concurrency: called by durThread only.
*/
void Journal::updateLSNFile() {
@@ -277,7 +276,7 @@ namespace mongo {
return;
_writeToLSNNeeded = false;
try {
- // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
// however, given we actually close the file, that seems unlikely.
MemoryMappedFile f; // not a MongoMMF so no closing notification
unsigned long long length = sizeof(LSNFile);
@@ -285,23 +284,23 @@ namespace mongo {
assert(lsnf);
lsnf->set(_lastFlushTime);
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "write to lsn file fails " << e.what() << endl;
// don't care if this fails
}
}
- void Journal::preFlush() {
+ void Journal::preFlush() {
j._preFlushTime = Listener::getElapsedTimeMillis();
}
- void Journal::postFlush() {
+ void Journal::postFlush() {
j._lastFlushTime = j._preFlushTime;
j._writeToLSNNeeded = true;
}
// call from within _curLogFileMutex
- void Journal::closeCurrentJournalFile() {
+ void Journal::closeCurrentJournalFile() {
assert(_curLogFile);
JFile jf;
@@ -314,14 +313,14 @@ namespace mongo {
_written = 0;
}
- /** remove older journal files.
+ /** remove older journal files.
be in _curLogFileMutex but not dbMutex when calling
*/
- void Journal::removeUnneededJournalFiles() {
+ void Journal::removeUnneededJournalFiles() {
while( !_oldJournalFiles.empty() ) {
JFile f = _oldJournalFiles.front();
- if( f.lastEventTimeMs < _lastFlushTime + ExtraKeepTimeMs ) {
+ if( f.lastEventTimeMs < _lastFlushTime + ExtraKeepTimeMs ) {
// eligible for deletion
path p( f.filename );
log() << "old journal file will be removed: " << f.filename << endl;
@@ -329,19 +328,19 @@ namespace mongo {
remove(p);
}
else {
- break;
+ break;
}
_oldJournalFiles.pop_front();
}
}
- /** check if time to rotate files. assure a file is open.
+ /** check if time to rotate files. assure a file is open.
done separately from the journal() call as we can do this part
outside of lock.
thread: durThread()
*/
- void journalRotate() {
+ void journalRotate() {
j.rotate();
}
void Journal::rotate() {
@@ -349,14 +348,14 @@ namespace mongo {
j.updateLSNFile();
- if( _curLogFile && _written < DataLimit )
+ if( _curLogFile && _written < DataLimit )
return;
scoped_lock lk(_curLogFileMutex);
- if( _curLogFile && _written < DataLimit )
+ if( _curLogFile && _written < DataLimit )
return;
- if( _curLogFile ) {
+ if( _curLogFile ) {
closeCurrentJournalFile();
@@ -367,11 +366,11 @@ namespace mongo {
Timer t;
_open();
int ms = t.millis();
- if( ms >= 200 ) {
+ if( ms >= 200 ) {
log() << "DR101 latency warning on journal file open " << ms << "ms" << endl;
}
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "warning exception opening journal file " << e.what() << endl;
throw;
}
@@ -392,7 +391,7 @@ namespace mongo {
_written += b.len();
_curLogFile->synchronousAppend((void *) b.buf(), b.len());
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "warning exception in dur::journal " << e.what() << endl;
throw;
}
@@ -401,7 +400,7 @@ namespace mongo {
}
}
-/* todo
+/* todo
test (and handle) disk full on journal append. best quick thing to do is to terminate.
if we roll back operations, there are nuances such as is ReplSetImpl::lastOpTimeWritten too new in ram then?
*/
diff --git a/db/dur_journal.h b/db/dur_journal.h
index 5f8ac1fe468..e7fa47f3700 100644
--- a/db/dur_journal.h
+++ b/db/dur_journal.h
@@ -23,7 +23,7 @@ namespace mongo {
namespace dur {
- /** true if ok to cleanup journal files at termination. otherwise, files journal will be retained.
+ /** true if ok to cleanup journal files at termination. otherwise, files journal will be retained.
*/
extern bool okToCleanUp;
@@ -33,7 +33,7 @@ namespace mongo {
/** assure journal/ dir exists. throws */
void journalMakeDir();
- /** check if time to rotate files; assure a file is open.
+ /** check if time to rotate files; assure a file is open.
done separately from the journal() call as we can do this part
outside of lock.
only called by durThread.
@@ -47,7 +47,7 @@ namespace mongo {
void journal(const AlignedBuilder& buf);
/** flag that something has gone wrong during writing to the journal
- (not for recovery mode)
+ (not for recovery mode)
*/
void journalingFailure(const char *msg);
@@ -56,13 +56,13 @@ namespace mongo {
unsigned long long getLastDataFileFlushTime();
- /** never throws.
- @return true if there are any journal files in the journal dir.
+ /** never throws.
+ @return true if there are any journal files in the journal dir.
*/
bool haveJournalFiles();
// in case disk controller buffers writes
- const long long ExtraKeepTimeMs = 10000;
+ const long long ExtraKeepTimeMs = 10000;
}
}
diff --git a/db/dur_journalformat.h b/db/dur_journalformat.h
index 7eb11b44bb1..7256aae886d 100644
--- a/db/dur_journalformat.h
+++ b/db/dur_journalformat.h
@@ -25,7 +25,7 @@ namespace mongo {
namespace dur {
#pragma pack(1)
- /** beginning header for a journal/j._<n> file
+ /** beginning header for a journal/j._<n> file
there is nothing important int this header at this time. except perhaps version #.
*/
struct JHeader {
@@ -34,7 +34,7 @@ namespace mongo {
char magic[2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
- // x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
+ // x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
// that. simply incrementing the version # is safe on a fwd basis.
enum { CurrentVersion = 0x4146 };
unsigned short _version;
@@ -43,7 +43,7 @@ namespace mongo {
char n1; // '\n'
char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
char n2; // '\n'
- char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by code.
+ char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by code.
char n3, n4; // '\n', '\n'
char reserved3[8034]; // 8KB total for the file header
@@ -61,7 +61,7 @@ namespace mongo {
unsigned long long seqNumber; // sequence number that can be used on recovery to not do too much work
};
- /** an individual write operation within a group commit section. Either the entire section should
+ /** an individual write operation within a group commit section. Either the entire section should
be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
*/
struct JEntry {
@@ -80,14 +80,14 @@ namespace mongo {
unsigned ofs; // offset in file
// sentinel and masks for _fileNo
- enum {
+ enum {
DotNsSuffix = 0x7fffffff, // ".ns" file
LocalDbBit = 0x80000000 // assuming "local" db instead of using the JDbContext
};
int _fileNo; // high bit is set to indicate it should be the <dbpath>/local database
// char data[len] follows
- const char * srcData() const {
+ const char * srcData() const {
const int *i = &_fileNo;
return (const char *) (i+1);
}
@@ -109,7 +109,7 @@ namespace mongo {
};
/** group commit section footer. md5 is a key field. */
- struct JSectFooter {
+ struct JSectFooter {
JSectFooter(const void* begin, int len) { // needs buffer to compute hash
sentinel = JEntry::OpCode_Footer;
reserved = 0;
@@ -137,14 +137,14 @@ namespace mongo {
};
/** declares "the next entry(s) are for this database / file path prefix" */
- struct JDbContext {
+ struct JDbContext {
JDbContext() : sentinel(JEntry::OpCode_DbContext) { }
const unsigned sentinel; // compare to JEntry::len -- zero is our sentinel
//char dbname[];
};
/** "last sequence number" */
- struct LSNFile {
+ struct LSNFile {
unsigned ver;
unsigned reserved2;
unsigned long long lsn;
diff --git a/db/dur_journalimpl.h b/db/dur_journalimpl.h
index f026ae16206..b56ce6bf335 100644
--- a/db/dur_journalimpl.h
+++ b/db/dur_journalimpl.h
@@ -38,7 +38,7 @@ namespace mongo {
boost::filesystem::path getFilePathFor(int filenumber) const;
/** used at shutdown.
- @return false if can't close in a timely manner.
+ @return false if can't close in a timely manner.
*/
bool tryToCloseCurJournalFile();
@@ -59,7 +59,7 @@ namespace mongo {
LogFile *_curLogFile; // use _curLogFileMutex
- struct JFile {
+ struct JFile {
string filename;
unsigned long long lastEventTimeMs;
};
diff --git a/db/dur_preplogbuffer.cpp b/db/dur_preplogbuffer.cpp
index fd994f57ebf..2ca8c2712b5 100644
--- a/db/dur_preplogbuffer.cpp
+++ b/db/dur_preplogbuffer.cpp
@@ -1,4 +1,4 @@
-// @file dur_preplogbuffer.cpp
+// @file dur_preplogbuffer.cpp
/**
* Copyright (C) 2009 10gen Inc.
@@ -16,8 +16,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
- PREPLOGBUFFER
+/*
+ PREPLOGBUFFER
we will build an output buffer ourself and then use O_DIRECT
we could be in read lock for this
for very large objects write directly to redo log in situ?
@@ -37,7 +37,7 @@
using namespace mongoutils;
-namespace mongo {
+namespace mongo {
namespace dur {
RelativePath local = RelativePath::fromRelativePath("local");
@@ -57,13 +57,13 @@ namespace mongo {
dassert( i->w_ptr == 0 );
if( !mmf->willNeedRemap() ) {
- // tag this mmf as needed a remap of its private view later.
+ // tag this mmf as needed a remap of its private view later.
// usually it will already be dirty/already set, so we do the if above first
// to avoid possibility of cpu cache line contention
mmf->willNeedRemap() = true;
}
- // since we have already looked up the mmf, we go ahead and remember the write view location
+ // since we have already looked up the mmf, we go ahead and remember the write view location
// so we don't have to find the MongoMMF again later in WRITETODATAFILES()
dassert( i->w_ptr == 0 );
i->w_ptr = ((char*)mmf->view_write()) + ofs;
@@ -73,10 +73,10 @@ namespace mongo {
assert( ofs <= 0x80000000 );
e.ofs = (unsigned) ofs;
e.setFileNo( mmf->fileSuffixNo() );
- if( mmf->relativePath() == local ) {
+ if( mmf->relativePath() == local ) {
e.setLocalDbContextBit();
}
- else if( mmf->relativePath() != lastDbPath ) {
+ else if( mmf->relativePath() != lastDbPath ) {
lastDbPath = mmf->relativePath();
JDbContext c;
bb.appendStruct(c);
@@ -88,7 +88,7 @@ namespace mongo {
#endif
bb.appendBuf(i->start(), e.len);
- if (e.len != (unsigned)i->length()){
+ if (e.len != (unsigned)i->length()) {
// This only happens if we write to the last byte in a file and
// the fist byte in another file that is mapped adjacently. I
// think most OSs leave at least a one page gap between
@@ -99,12 +99,12 @@ namespace mongo {
}
}
- /** basic write ops / write intents. note there is no particular order to these : if we have
+ /** basic write ops / write intents. note there is no particular order to these : if we have
two writes to the same location during the group commit interval, it is likely
(although not assured) that it is journaled here once.
- */
+ */
void prepBasicWrites(AlignedBuilder& bb) {
- // each time events switch to a different database we journal a JDbContext
+ // each time events switch to a different database we journal a JDbContext
RelativePath lastDbPath;
for( set<WriteIntent>::iterator i = commitJob.writes().begin(); i != commitJob.writes().end(); i++ ) {
@@ -127,7 +127,7 @@ namespace mongo {
we could be in read lock for this
caller handles locking
*/
- void _PREPLOGBUFFER() {
+ void _PREPLOGBUFFER() {
assert( cmdLine.dur );
{
@@ -143,7 +143,7 @@ namespace mongo {
// ops other than basic writes (DurOp's)
{
- for( vector< shared_ptr<DurOp> >::iterator i = commitJob.ops().begin(); i != commitJob.ops().end(); ++i ) {
+ for( vector< shared_ptr<DurOp> >::iterator i = commitJob.ops().begin(); i != commitJob.ops().end(); ++i ) {
(*i)->serialize(bb);
}
}
diff --git a/db/dur_recover.cpp b/db/dur_recover.cpp
index 00f37ef1b9c..e9a77e2ca51 100644
--- a/db/dur_recover.cpp
+++ b/db/dur_recover.cpp
@@ -38,16 +38,16 @@
using namespace mongoutils;
-namespace mongo {
+namespace mongo {
- namespace dur {
+ namespace dur {
- struct ParsedJournalEntry /*copyable*/ {
+ struct ParsedJournalEntry { /*copyable*/
ParsedJournalEntry() : e(0) { }
// relative path of database for the operation.
// might be a pointer into mmaped Journal file
- const char *dbName;
+ const char *dbName;
// thse are pointers into the memory mapped journal file
const JEntry *e; // local db sentinel is already parsed out here into dbName
@@ -60,32 +60,32 @@ namespace mongo {
path getJournalDir();
/** get journal filenames, in order. throws if unexpected content found */
- static void getFiles(path dir, vector<path>& files) {
+ static void getFiles(path dir, vector<path>& files) {
map<unsigned,path> m;
for ( filesystem::directory_iterator i( dir );
- i != filesystem::directory_iterator();
- ++i ) {
+ i != filesystem::directory_iterator();
+ ++i ) {
filesystem::path filepath = *i;
string fileName = filesystem::path(*i).leaf();
if( str::startsWith(fileName, "j._") ) {
unsigned u = str::toUnsigned( str::after(fileName, '_') );
- if( m.count(u) ) {
+ if( m.count(u) ) {
uasserted(13531, str::stream() << "unexpected files in journal directory " << dir.string() << " : " << fileName);
}
- if( !m.empty() && !m.count(u-1) ) {
- uasserted(13532,
- str::stream() << "unexpected file in journal directory " << dir.string()
- << " : " << fileName << " : can't find its preceeding file");
+ if( !m.empty() && !m.count(u-1) ) {
+ uasserted(13532,
+ str::stream() << "unexpected file in journal directory " << dir.string()
+ << " : " << fileName << " : can't find its preceeding file");
}
m.insert( pair<unsigned,path>(u,filepath) );
}
}
- for( map<unsigned,path>::iterator i = m.begin(); i != m.end(); ++i )
+ for( map<unsigned,path>::iterator i = m.begin(); i != m.end(); ++i )
files.push_back(i->second);
}
/** read through the memory mapped data of a journal file (journal/j._<n> file)
- throws
+ throws
*/
class JournalSectionIterator : boost::noncopyable {
public:
@@ -102,53 +102,50 @@ namespace mongo {
/** get the next entry from the log. this function parses and combines JDbContext and JEntry's.
* @return true if got an entry. false at successful end of section (and no entry returned).
- * throws on premature end of section.
+ * throws on premature end of section.
*/
- bool next(ParsedJournalEntry& e) {
+ bool next(ParsedJournalEntry& e) {
unsigned lenOrOpCode;
_br.read(lenOrOpCode);
- if (lenOrOpCode > JEntry::OpCode_Min){
+ if (lenOrOpCode > JEntry::OpCode_Min) {
switch( lenOrOpCode ) {
- case JEntry::OpCode_Footer:
- {
- if (_doDurOps) {
- const char* pos = (const char*) _br.pos();
- pos -= sizeof(lenOrOpCode); // rewind to include OpCode
- const JSectFooter& footer = *(const JSectFooter*)pos;
- int len = pos - (char*)_sectHead;
- if (!footer.checkHash(_sectHead, len)){
- massert(13594, str::stream() << "Journal checksum doesn't match. recorded: "
- << toHex(footer.hash, sizeof(footer.hash))
- << " actual: " << md5simpledigest(_sectHead, len)
- , false);
- }
+ case JEntry::OpCode_Footer: {
+ if (_doDurOps) {
+ const char* pos = (const char*) _br.pos();
+ pos -= sizeof(lenOrOpCode); // rewind to include OpCode
+ const JSectFooter& footer = *(const JSectFooter*)pos;
+ int len = pos - (char*)_sectHead;
+ if (!footer.checkHash(_sectHead, len)) {
+ massert(13594, str::stream() << "Journal checksum doesn't match. recorded: "
+ << toHex(footer.hash, sizeof(footer.hash))
+ << " actual: " << md5simpledigest(_sectHead, len)
+ , false);
}
- return false; // false return value denotes end of section
}
+ return false; // false return value denotes end of section
+ }
case JEntry::OpCode_FileCreated:
- case JEntry::OpCode_DropDb:
- {
- e.dbName = 0;
- boost::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, _br);
- if (_doDurOps) {
- e.op = op;
- }
- return true;
+ case JEntry::OpCode_DropDb: {
+ e.dbName = 0;
+ boost::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, _br);
+ if (_doDurOps) {
+ e.op = op;
}
+ return true;
+ }
- case JEntry::OpCode_DbContext:
- {
- _lastDbName = (const char*) _br.pos();
- const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _br.remaining());
- const unsigned len = strnlen(_lastDbName, limit);
- massert(13533, "problem processing journal file during recovery", _lastDbName[len] == '\0');
- _br.skip(len+1); // skip '\0' too
- _br.read(lenOrOpCode);
- }
- // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
+ case JEntry::OpCode_DbContext: {
+ _lastDbName = (const char*) _br.pos();
+ const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _br.remaining());
+ const unsigned len = strnlen(_lastDbName, limit);
+ massert(13533, "problem processing journal file during recovery", _lastDbName[len] == '\0');
+ _br.skip(len+1); // skip '\0' too
+ _br.read(lenOrOpCode);
+ }
+ // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
default:
// fall through
@@ -172,7 +169,7 @@ namespace mongo {
const bool _doDurOps;
};
-
+
/** retrieve the file for the specified dbName plus file number.
open if not yet open.
*/
@@ -187,11 +184,11 @@ namespace mongo {
assert( fileNo >= 0 );
if( fileNo == JEntry::DotNsSuffix )
ss << "ns";
- else
+ else
ss << fileNo;
/* todo: do we need to create file here if DNE?
- we need to know what its length should be for that though.
- however does this happen? FileCreatedOp should have been in the journal and
+ we need to know what its length should be for that though.
+ however does this happen? FileCreatedOp should have been in the journal and
already applied if the file is new.
*/
fn = ss.str();
@@ -203,38 +200,38 @@ namespace mongo {
full /= fn;
file.open(full.string().c_str());
}
- catch(DBException&) {
+ catch(DBException&) {
log() << "recover error opening file " << full.string() << endl;
throw;
}
uassert(13534, str::stream() << "recovery error couldn't open " << fn, file.is_open());
- if( cmdLine.durOptions & CmdLine::DurDumpJournal )
+ if( cmdLine.durOptions & CmdLine::DurDumpJournal )
log() << " opened " << fn << ' ' << file.len()/1024.0/1024.0 << endl;
}
return file;
}
- RecoveryJob::~RecoveryJob() {
+ RecoveryJob::~RecoveryJob() {
if( !_files.empty() )
close();
}
- void RecoveryJob::close() {
+ void RecoveryJob::close() {
scoped_lock lk(_mx);
_close();
}
- void RecoveryJob::_close() {
- for(FileMap::iterator it(_files.begin()), end(_files.end()); it!=end; ++it){
+ void RecoveryJob::_close() {
+ for(FileMap::iterator it(_files.begin()), end(_files.end()); it!=end; ++it) {
it->second.fsync();
}
_files.clear(); // closes files
}
- void RecoveryJob::applyEntry(const ParsedJournalEntry& entry, bool apply, bool dump) {
+ void RecoveryJob::applyEntry(const ParsedJournalEntry& entry, bool apply, bool dump) {
if( entry.e ) {
if( dump ) {
stringstream ss;
@@ -243,20 +240,20 @@ namespace mongo {
ss << "ns";
else
ss << setw(2) << entry.e->getFileNo();
- ss << ' ' << setw(6) << entry.e->len << ' ' << /*hex << setw(8) << (size_t) fqe.srcData << dec <<*/
- " " << hexdump(entry.e->srcData(), entry.e->len);
+ ss << ' ' << setw(6) << entry.e->len << ' ' << /*hex << setw(8) << (size_t) fqe.srcData << dec <<*/
+ " " << hexdump(entry.e->srcData(), entry.e->len);
log() << ss.str() << endl;
- }
+ }
if( apply ) {
File& file = getFile(entry.dbName, entry.e->getFileNo());
file.write(entry.e->ofs, entry.e->srcData(), entry.e->len);
}
- }
+ }
else if(entry.op) {
// a DurOp subclass operation
if( dump ) {
log() << " OP " << entry.op->toString() << endl;
- }
+ }
if( apply ) {
if( entry.op->needFilesClosed() ) {
_close(); // locked in processSection
@@ -266,15 +263,15 @@ namespace mongo {
}
}
- void RecoveryJob::applyEntries(const vector<ParsedJournalEntry> &entries) {
+ void RecoveryJob::applyEntries(const vector<ParsedJournalEntry> &entries) {
bool apply = (cmdLine.durOptions & CmdLine::DurScanOnly) == 0;
bool dump = cmdLine.durOptions & CmdLine::DurDumpJournal;
if( dump )
log() << "BEGIN section" << endl;
-
- for( vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i ) {
+
+ for( vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i ) {
applyEntry(*i, apply, dump);
- }
+ }
if( dump )
log() << "END section" << endl;
@@ -286,7 +283,7 @@ namespace mongo {
vector<ParsedJournalEntry> entries;
JournalSectionIterator i(p, len, doDurOps);
- if( _lastDataSyncedFromLastRun > i.seqNumber() + ExtraKeepTimeMs ) {
+ if( _lastDataSyncedFromLastRun > i.seqNumber() + ExtraKeepTimeMs ) {
log() << "recover skipping application of section " << i.seqNumber() << " < lsn:" << _lastDataSyncedFromLastRun << endl;
// TODO: shouldn't we return here?
@@ -299,19 +296,20 @@ namespace mongo {
entries.push_back(e);
}
- // got all the entries for one group commit. apply them:
+ // got all the entries for one group commit. apply them:
applyEntries(entries);
}
/** apply a specific journal file, that is already mmap'd
@param p start of the memory mapped file
- @return true if this is detected to be the last file (ends abruptly)
+ @return true if this is detected to be the last file (ends abruptly)
*/
bool RecoveryJob::processFileBuffer(const void *p, unsigned len) {
try {
BufReader br(p,len);
- { // read file header
+ {
+ // read file header
JHeader h;
br.read(h);
if( !h.versionOk() ) {
@@ -331,7 +329,7 @@ namespace mongo {
killCurrentOp.checkForInterrupt(false);
}
}
- catch( BufReader::eof& ) {
+ catch( BufReader::eof& ) {
if( cmdLine.durOptions & CmdLine::DurDumpJournal )
log() << "ABRUPT END" << endl;
return true; // abrupt end
@@ -350,16 +348,16 @@ namespace mongo {
}
/** @param files all the j._0 style files we need to apply for recovery */
- void RecoveryJob::go(vector<path>& files) {
+ void RecoveryJob::go(vector<path>& files) {
log() << "recover begin" << endl;
// load the last sequence number synced to the datafiles on disk before the last crash
_lastDataSyncedFromLastRun = journalReadLSN();
log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
- for( unsigned i = 0; i != files.size(); ++i ) {
+ for( unsigned i = 0; i != files.size(); ++i ) {
bool abruptEnd = processFile(files[i]);
- if( abruptEnd && i+1 < files.size() ) {
+ if( abruptEnd && i+1 < files.size() ) {
log() << "recover error: abrupt end to file " << files[i].string() << ", yet it isn't the last journal file" << endl;
close();
uasserted(13535, "recover abrupt journal file end");
@@ -382,7 +380,7 @@ namespace mongo {
assert( cmdLine.dur );
filesystem::path p = getJournalDir();
- if( !exists(p) ) {
+ if( !exists(p) ) {
log() << "directory " << p.string() << " does not exist, there will be no recovery startup step" << endl;
okToCleanUp = true;
return;
@@ -391,7 +389,7 @@ namespace mongo {
vector<path> journalFiles;
getFiles(p, journalFiles);
- if( journalFiles.empty() ) {
+ if( journalFiles.empty() ) {
log() << "recover : no journal files present, no recovery needed" << endl;
okToCleanUp = true;
return;
@@ -401,9 +399,9 @@ namespace mongo {
}
/** recover from a crash
- throws on error
+ throws on error
*/
- void recover() {
+ void recover() {
// we use a lock so that exitCleanly will wait for us
// to finish (or at least to notice what is up and stop)
readlock lk;
@@ -413,19 +411,19 @@ namespace mongo {
struct BufReaderY { int a,b; };
class BufReaderUnitTest : public UnitTest {
public:
- void run() {
+ void run() {
BufReader r((void*) "abcdabcdabcd", 12);
char x;
BufReaderY y;
r.read(x); //cout << x; // a
assert( x == 'a' );
r.read(y);
- r.read(x);
+ r.read(x);
assert( x == 'b' );
}
} brunittest;
-
+
RecoveryJob RecoveryJob::_instance;
} // namespace dur
diff --git a/db/dur_recover.h b/db/dur_recover.h
index 6bac34ebb7a..dd8447b6ee0 100644
--- a/db/dur_recover.h
+++ b/db/dur_recover.h
@@ -11,9 +11,9 @@ namespace mongo {
namespace dur {
struct ParsedJournalEntry;
- /** call go() to execute a recovery from existing journal files.
- */
- class RecoveryJob : boost::noncopyable {
+ /** call go() to execute a recovery from existing journal files.
+ */
+ class RecoveryJob : boost::noncopyable {
public:
RecoveryJob() :_lastDataSyncedFromLastRun(0), _mx("recovery") {}
void go(vector<path>& files);
diff --git a/db/dur_stats.h b/db/dur_stats.h
index 8a9b421b4c2..f5a78972357 100644
--- a/db/dur_stats.h
+++ b/db/dur_stats.h
@@ -1,45 +1,45 @@
-// @file dur_stats.h
-
-namespace mongo {
- namespace dur {
-
- /** journalling stats. the model here is that the commit thread is the only writer, and that reads are
- uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
- */
- struct Stats {
- Stats();
- void rotate();
- BSONObj asObj();
- unsigned _intervalMicros;
- struct S {
- BSONObj _asObj();
- void reset();
-
- unsigned _commits;
- unsigned long long _journaledBytes;
- unsigned long long _writeToDataFilesBytes;
-
- unsigned long long _prepLogBufferMicros;
- unsigned long long _writeToJournalMicros;
- unsigned long long _writeToDataFilesMicros;
- unsigned long long _remapPrivateViewMicros;
-
- // undesirable to be in write lock for the group commit (it can be done in a read lock), so good if we
- // have visibility when this happens. can happen for a couple reasons
- // - read lock starvation
- // - file being closed
- // - data being written faster than the normal group commit interval
- unsigned _commitsInWriteLock;
-
- unsigned _dtMillis;
- };
- S *curr;
- private:
- S _a,_b;
- unsigned long long _lastRotate;
- S* other();
- };
- extern Stats stats;
-
- }
-}
+// @file dur_stats.h
+
+namespace mongo {
+ namespace dur {
+
+ /** journalling stats. the model here is that the commit thread is the only writer, and that reads are
+ uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
+ */
+ struct Stats {
+ Stats();
+ void rotate();
+ BSONObj asObj();
+ unsigned _intervalMicros;
+ struct S {
+ BSONObj _asObj();
+ void reset();
+
+ unsigned _commits;
+ unsigned long long _journaledBytes;
+ unsigned long long _writeToDataFilesBytes;
+
+ unsigned long long _prepLogBufferMicros;
+ unsigned long long _writeToJournalMicros;
+ unsigned long long _writeToDataFilesMicros;
+ unsigned long long _remapPrivateViewMicros;
+
+ // undesirable to be in write lock for the group commit (it can be done in a read lock), so good if we
+ // have visibility when this happens. can happen for a couple reasons
+ // - read lock starvation
+ // - file being closed
+ // - data being written faster than the normal group commit interval
+ unsigned _commitsInWriteLock;
+
+ unsigned _dtMillis;
+ };
+ S *curr;
+ private:
+ S _a,_b;
+ unsigned long long _lastRotate;
+ S* other();
+ };
+ extern Stats stats;
+
+ }
+}
diff --git a/db/dur_writetodatafiles.cpp b/db/dur_writetodatafiles.cpp
index 8cbc4d6c76d..de3085e63da 100644
--- a/db/dur_writetodatafiles.cpp
+++ b/db/dur_writetodatafiles.cpp
@@ -1,5 +1,5 @@
-// @file dur_writetodatafiles.cpp apply the writes back to the non-private MMF after they are for certain in redo log
-
+// @file dur_writetodatafiles.cpp apply the writes back to the non-private MMF after they are for certain in redo log
+
/**
* Copyright (C) 2009 10gen Inc.
*
@@ -22,24 +22,24 @@
#include "dur_recover.h"
#include "../util/timer.h"
-namespace mongo {
+namespace mongo {
namespace dur {
void debugValidateAllMapsMatch();
- /** apply the writes back to the non-private MMF after they are for certain in redo log
+ /** apply the writes back to the non-private MMF after they are for certain in redo log
(1) todo we don't need to write back everything every group commit. we MUST write back
- that which is going to be a remapped on its private view - but that might not be all
+ that which is going to be a remapped on its private view - but that might not be all
views.
(2) todo should we do this using N threads? would be quite easy
see Hackenberg paper table 5 and 6. 2 threads might be a good balance.
- (3) with enough work, we could do this outside the read lock. it's a bit tricky though.
- - we couldn't do it from the private views then as they may be changing. would have to then
+ (3) with enough work, we could do this outside the read lock. it's a bit tricky though.
+ - we couldn't do it from the private views then as they may be changing. would have to then
be from the journal alignedbuffer.
- - we need to be careful the file isn't unmapped on us -- perhaps a mutex or something
+ - we need to be careful the file isn't unmapped on us -- perhaps a mutex or something
with MongoMMF on closes or something to coordinate that.
locking: in read lock when called
@@ -52,11 +52,11 @@ namespace mongo {
}
// the old implementation
- void WRITETODATAFILES_Impl2() {
+ void WRITETODATAFILES_Impl2() {
MongoFile::markAllWritable(); // for _DEBUG. normally we don't write in a read lock
/* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
- for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ){
+ for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
const WriteIntent& intent = *it;
stats.curr->_writeToDataFilesBytes += intent.length();
dassert(intent.w_ptr);
@@ -68,17 +68,17 @@ namespace mongo {
}
#if defined(_EXPERIMENTAL)
- void WRITETODATAFILES_Impl3() {
+ void WRITETODATAFILES_Impl3() {
MongoFile::markAllWritable(); // for _DEBUG. normally we don't write in a read lock
/* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
- for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ){
+ for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
const WriteIntent& intent = *it;
stats.curr->_writeToDataFilesBytes += intent.length();
dassert(intent.w_ptr);
- memcpy(intent.w_ptr,
- commitJob._ab.atOfs(intent.ofsInJournalBuffer),
- intent.length());
+ memcpy(intent.w_ptr,
+ commitJob._ab.atOfs(intent.ofsInJournalBuffer),
+ intent.length());
}
if (!dbMutex.isWriteLocked())
@@ -86,7 +86,7 @@ namespace mongo {
}
#endif
- void WRITETODATAFILES() {
+ void WRITETODATAFILES() {
dbMutex.assertAtLeastReadLocked();
Timer t;
#if defined(_EXPERIMENTAL)
diff --git a/db/durop.cpp b/db/durop.cpp
index 59728d6fc6c..3c64edfe3ca 100644
--- a/db/durop.cpp
+++ b/db/durop.cpp
@@ -26,20 +26,20 @@
using namespace mongoutils;
-namespace mongo {
+namespace mongo {
extern string dbpath; // --dbpath parm
void _deleteDataFiles(const char *);
namespace dur {
-
+
/** read a durop from journal file referenced by br.
@param opcode the opcode which has already been written from the bufreader
*/
- shared_ptr<DurOp> DurOp::read(unsigned opcode, BufReader& br) {
+ shared_ptr<DurOp> DurOp::read(unsigned opcode, BufReader& br) {
shared_ptr<DurOp> op;
- switch( opcode ) {
+ switch( opcode ) {
case JEntry::OpCode_FileCreated:
op = shared_ptr<DurOp>( new FileCreatedOp(br) );
break;
@@ -52,7 +52,7 @@ namespace mongo {
return op;
}
- void DurOp::serialize(AlignedBuilder& ab) {
+ void DurOp::serialize(AlignedBuilder& ab) {
ab.appendNum(_opcode);
_serialize(ab);
}
@@ -66,7 +66,7 @@ namespace mongo {
log.readStr(reservedStr);
}
- void DropDbOp::_serialize(AlignedBuilder& ab) {
+ void DropDbOp::_serialize(AlignedBuilder& ab) {
ab.appendNum((unsigned long long) 0); // reserved for future use
ab.appendNum((unsigned long long) 0); // reserved for future use
ab.appendStr(_db);
@@ -74,20 +74,18 @@ namespace mongo {
}
/** throws */
- void DropDbOp::replay() {
+ void DropDbOp::replay() {
log() << "recover replay drop db " << _db << endl;
_deleteDataFiles(_db.c_str());
}
- FileCreatedOp::FileCreatedOp(string f, unsigned long long l) :
- DurOp(JEntry::OpCode_FileCreated)
- {
+ FileCreatedOp::FileCreatedOp(string f, unsigned long long l) :
+ DurOp(JEntry::OpCode_FileCreated) {
_p = RelativePath::fromFullPath(f);
_len = l;
}
- FileCreatedOp::FileCreatedOp(BufReader& log) : DurOp(JEntry::OpCode_FileCreated)
- {
+ FileCreatedOp::FileCreatedOp(BufReader& log) : DurOp(JEntry::OpCode_FileCreated) {
unsigned long long reserved;
log.read(reserved);
log.read(reserved);
@@ -103,8 +101,8 @@ namespace mongo {
ab.appendNum(_len);
ab.appendStr(_p.toString());
}
-
- string FileCreatedOp::toString() {
+
+ string FileCreatedOp::toString() {
return str::stream() << "FileCreatedOp " << _p.toString() << ' ' << _len/1024.0/1024.0 << "MB";
}
@@ -113,16 +111,16 @@ namespace mongo {
return exists( _p.asFullPath() );
}
- void FileCreatedOp::replay() {
+ void FileCreatedOp::replay() {
// i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
// or rewrite at least, even if it were the right length. perhaps one day we should change that
// although easier to avoid defects if we assume it is zeros perhaps.
string full = _p.asFullPath();
if( exists(full) ) {
- try {
+ try {
remove(full);
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
}
}
@@ -133,7 +131,7 @@ namespace mongo {
try {
remove(full);
}
- catch(...) {
+ catch(...) {
log() << "warning could not delete file " << full << endl;
}
}
@@ -145,7 +143,7 @@ namespace mongo {
scoped_array<char> v( new char[blksz] );
memset( v.get(), 0, blksz );
fileofs ofs = 0;
- while( left ) {
+ while( left ) {
unsigned long long w = left < blksz ? left : blksz;
f.write(ofs, v.get(), (unsigned) w);
left -= w;
diff --git a/db/durop.h b/db/durop.h
index 79a81cdeca1..c4574c2e3cb 100644
--- a/db/durop.h
+++ b/db/durop.h
@@ -30,15 +30,15 @@ namespace mongo {
const unsigned Alignment = 8192;
- /** DurOp - Operations we journal that aren't just basic writes.
+ /** DurOp - Operations we journal that aren't just basic writes.
*
* Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
- * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
+ * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
* them (we don't want a vtable for example there).
- *
+ *
* For each op we want to journal, we define a subclass.
*/
- class DurOp /* copyable */ {
+ class DurOp { /* copyable */
public:
// @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
// @see dur::JEntry
@@ -54,7 +54,7 @@ namespace mongo {
*/
static shared_ptr<DurOp> read(unsigned opcode, BufReader& br);
- /** replay the operation (during recovery)
+ /** replay the operation (during recovery)
throws
For now, these are not replayed during the normal WRITETODATAFILES phase, since these
@@ -76,7 +76,7 @@ namespace mongo {
};
/** indicates creation of a new file */
- class FileCreatedOp : public DurOp {
+ class FileCreatedOp : public DurOp {
public:
FileCreatedOp(BufReader& log);
/** param f filename to create with path */
@@ -92,11 +92,11 @@ namespace mongo {
};
/** record drop of a database */
- class DropDbOp : public DurOp {
+ class DropDbOp : public DurOp {
public:
DropDbOp(BufReader& log);
- DropDbOp(string db) :
- DurOp(JEntry::OpCode_DropDb), _db(db) { }
+ DropDbOp(string db) :
+ DurOp(JEntry::OpCode_DropDb), _db(db) { }
virtual void replay();
virtual string toString() { return string("DropDbOp ") + _db; }
virtual bool needFilesClosed() { return true; }
diff --git a/db/extsort.cpp b/db/extsort.cpp
index 14a599f3731..2e6d8d8c753 100644
--- a/db/extsort.cpp
+++ b/db/extsort.cpp
@@ -26,153 +26,153 @@
#include <fcntl.h>
namespace mongo {
-
+
BSONObj BSONObjExternalSorter::extSortOrder;
unsigned long long BSONObjExternalSorter::_compares = 0;
-
+
BSONObjExternalSorter::BSONObjExternalSorter( const BSONObj & order , long maxFileSize )
- : _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
- _arraySize(1000000), _cur(0), _curSizeSoFar(0), _sorted(0){
-
+ : _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
+ _arraySize(1000000), _cur(0), _curSizeSoFar(0), _sorted(0) {
+
stringstream rootpath;
rootpath << dbpath;
if ( dbpath[dbpath.size()-1] != '/' )
rootpath << "/";
rootpath << "_tmp/esort." << time(0) << "." << rand() << "/";
_root = rootpath.str();
-
+
log(1) << "external sort root: " << _root.string() << endl;
create_directories( _root );
_compares = 0;
}
-
- BSONObjExternalSorter::~BSONObjExternalSorter(){
- if ( _cur ){
+
+ BSONObjExternalSorter::~BSONObjExternalSorter() {
+ if ( _cur ) {
delete _cur;
_cur = 0;
}
-
+
unsigned long removed = remove_all( _root );
wassert( removed == 1 + _files.size() );
}
- void BSONObjExternalSorter::_sortInMem(){
+ void BSONObjExternalSorter::_sortInMem() {
// extSortComp needs to use glbals
// qsort_r only seems available on bsd, which is what i really want to use
dblock l;
extSortOrder = _order;
_cur->sort( BSONObjExternalSorter::extSortComp );
}
-
- void BSONObjExternalSorter::sort(){
+
+ void BSONObjExternalSorter::sort() {
uassert( 10048 , "already sorted" , ! _sorted );
-
+
_sorted = true;
- if ( _cur && _files.size() == 0 ){
+ if ( _cur && _files.size() == 0 ) {
_sortInMem();
log(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}
-
- if ( _cur ){
+
+ if ( _cur ) {
finishMap();
}
-
- if ( _cur ){
+
+ if ( _cur ) {
delete _cur;
_cur = 0;
}
-
+
if ( _files.size() == 0 )
return;
-
+
}
- void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ){
+ void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ) {
uassert( 10049 , "sorted already" , ! _sorted );
-
- if ( ! _cur ){
+
+ if ( ! _cur ) {
_cur = new InMemory( _arraySize );
}
-
+
Data& d = _cur->getNext();
d.first = o.getOwned();
d.second = loc;
-
+
long size = o.objsize();
_curSizeSoFar += size + sizeof( DiskLoc ) + sizeof( BSONObj );
-
- if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ){
+
+ if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
finishMap();
log(1) << "finishing map" << endl;
}
}
-
- void BSONObjExternalSorter::finishMap(){
+
+ void BSONObjExternalSorter::finishMap() {
uassert( 10050 , "bad" , _cur );
-
+
_curSizeSoFar = 0;
if ( _cur->size() == 0 )
return;
-
+
_sortInMem();
-
+
stringstream ss;
ss << _root.string() << "/file." << _files.size();
string file = ss.str();
-
+
ofstream out;
out.open( file.c_str() , ios_base::out | ios_base::binary );
assertStreamGood( 10051 , (string)"couldn't open file: " + file , out );
-
+
int num = 0;
- for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); ++i ){
+ for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); ++i ) {
Data p = *i;
out.write( p.first.objdata() , p.first.objsize() );
out.write( (char*)(&p.second) , sizeof( DiskLoc ) );
num++;
}
-
+
_cur->clear();
-
+
_files.push_back( file );
out.close();
log(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
}
-
+
// ---------------------------------
BSONObjExternalSorter::Iterator::Iterator( BSONObjExternalSorter * sorter ) :
- _cmp( sorter->_order ) , _in( 0 ){
-
- for ( list<string>::iterator i=sorter->_files.begin(); i!=sorter->_files.end(); i++ ){
+ _cmp( sorter->_order ) , _in( 0 ) {
+
+ for ( list<string>::iterator i=sorter->_files.begin(); i!=sorter->_files.end(); i++ ) {
_files.push_back( new FileIterator( *i ) );
_stash.push_back( pair<Data,bool>( Data( BSONObj() , DiskLoc() ) , false ) );
}
-
- if ( _files.size() == 0 && sorter->_cur ){
+
+ if ( _files.size() == 0 && sorter->_cur ) {
_in = sorter->_cur;
_it = sorter->_cur->begin();
}
-
+
}
-
- BSONObjExternalSorter::Iterator::~Iterator(){
+
+ BSONObjExternalSorter::Iterator::~Iterator() {
for ( vector<FileIterator*>::iterator i=_files.begin(); i!=_files.end(); i++ )
delete *i;
_files.clear();
}
-
- bool BSONObjExternalSorter::Iterator::more(){
+
+ bool BSONObjExternalSorter::Iterator::more() {
if ( _in )
return _it != _in->end();
-
+
for ( vector<FileIterator*>::iterator i=_files.begin(); i!=_files.end(); i++ )
if ( (*i)->more() )
return true;
@@ -181,34 +181,34 @@ namespace mongo {
return true;
return false;
}
-
- BSONObjExternalSorter::Data BSONObjExternalSorter::Iterator::next(){
-
- if ( _in ){
+
+ BSONObjExternalSorter::Data BSONObjExternalSorter::Iterator::next() {
+
+ if ( _in ) {
Data& d = *_it;
++_it;
return d;
}
-
+
Data best;
int slot = -1;
-
- for ( unsigned i=0; i<_stash.size(); i++ ){
- if ( ! _stash[i].second ){
+ for ( unsigned i=0; i<_stash.size(); i++ ) {
+
+ if ( ! _stash[i].second ) {
if ( _files[i]->more() )
_stash[i] = pair<Data,bool>( _files[i]->next() , true );
else
continue;
}
-
- if ( slot == -1 || _cmp( best , _stash[i].first ) == 0 ){
+
+ if ( slot == -1 || _cmp( best , _stash[i].first ) == 0 ) {
best = _stash[i].first;
slot = i;
}
-
+
}
-
+
assert( slot >= 0 );
_stash[slot].second = false;
@@ -216,8 +216,8 @@ namespace mongo {
}
// -----------------------------------
-
- BSONObjExternalSorter::FileIterator::FileIterator( string file ){
+
+ BSONObjExternalSorter::FileIterator::FileIterator( string file ) {
unsigned long long length;
_buf = (char*)_file.map( file.c_str() , length , MemoryMappedFile::SEQUENTIAL );
massert( 10308 , "mmap failed" , _buf );
@@ -225,17 +225,17 @@ namespace mongo {
_end = _buf + length;
}
BSONObjExternalSorter::FileIterator::~FileIterator() {}
-
- bool BSONObjExternalSorter::FileIterator::more(){
+
+ bool BSONObjExternalSorter::FileIterator::more() {
return _buf < _end;
}
-
- BSONObjExternalSorter::Data BSONObjExternalSorter::FileIterator::next(){
+
+ BSONObjExternalSorter::Data BSONObjExternalSorter::FileIterator::next() {
BSONObj o( _buf );
_buf += o.objsize();
DiskLoc * l = (DiskLoc*)_buf;
_buf += 8;
return Data( o , *l );
}
-
+
}
diff --git a/db/extsort.h b/db/extsort.h
index 7595c81ad60..c0791dbb9ee 100644
--- a/db/extsort.h
+++ b/db/extsort.h
@@ -32,13 +32,13 @@ namespace mongo {
*/
class BSONObjExternalSorter : boost::noncopyable {
public:
-
+
typedef pair<BSONObj,DiskLoc> Data;
private:
static BSONObj extSortOrder;
- static int extSortComp( const void *lv, const void *rv ){
+ static int extSortComp( const void *lv, const void *rv ) {
RARELY killCurrentOp.checkForInterrupt();
_compares++;
Data * l = (Data*)lv;
@@ -54,7 +54,7 @@ namespace mongo {
FileIterator( string file );
~FileIterator();
bool more();
- Data next();
+ Data next();
private:
MemoryMappedFile _file;
char * _buf;
@@ -63,7 +63,7 @@ namespace mongo {
class MyCmp {
public:
- MyCmp( const BSONObj & order = BSONObj() ) : _order( order ){}
+ MyCmp( const BSONObj & order = BSONObj() ) : _order( order ) {}
bool operator()( const Data &l, const Data &r ) const {
RARELY killCurrentOp.checkForInterrupt();
_compares++;
@@ -78,50 +78,50 @@ namespace mongo {
};
public:
-
+
typedef FastArray<Data> InMemory;
class Iterator : boost::noncopyable {
public:
-
+
Iterator( BSONObjExternalSorter * sorter );
~Iterator();
bool more();
Data next();
-
+
private:
MyCmp _cmp;
vector<FileIterator*> _files;
vector< pair<Data,bool> > _stash;
-
+
InMemory * _in;
InMemory::iterator _it;
-
+
};
-
+
BSONObjExternalSorter( const BSONObj & order = BSONObj() , long maxFileSize = 1024 * 1024 * 100 );
~BSONObjExternalSorter();
-
+
void add( const BSONObj& o , const DiskLoc & loc );
- void add( const BSONObj& o , int a , int b ){
+ void add( const BSONObj& o , int a , int b ) {
add( o , DiskLoc( a , b ) );
}
/* call after adding values, and before fetching the iterator */
void sort();
-
- auto_ptr<Iterator> iterator(){
+
+ auto_ptr<Iterator> iterator() {
uassert( 10052 , "not sorted" , _sorted );
return auto_ptr<Iterator>( new Iterator( this ) );
}
-
- int numFiles(){
+
+ int numFiles() {
return _files.size();
}
-
- long getCurSizeSoFar(){ return _curSizeSoFar; }
- void hintNumObjects( long long numObjects ){
+ long getCurSizeSoFar() { return _curSizeSoFar; }
+
+ void hintNumObjects( long long numObjects ) {
if ( numObjects < _arraySize )
_arraySize = (int)(numObjects + 100);
}
@@ -129,18 +129,18 @@ namespace mongo {
private:
void _sortInMem();
-
+
void sort( string file );
void finishMap();
-
+
BSONObj _order;
long _maxFilesize;
path _root;
-
+
int _arraySize;
InMemory * _cur;
long _curSizeSoFar;
-
+
list<string> _files;
bool _sorted;
diff --git a/db/filever.h b/db/filever.h
index 4aa18d49354..e89a8243dcf 100644
--- a/db/filever.h
+++ b/db/filever.h
@@ -20,11 +20,11 @@
namespace mongo {
-inline void checkDataFileVersion(NamespaceDetails& d) {
-}
+ inline void checkDataFileVersion(NamespaceDetails& d) {
+ }
-inline void checkIndexFileVersion(NamespaceDetails& d) {
-}
+ inline void checkIndexFileVersion(NamespaceDetails& d) {
+ }
}
diff --git a/db/geo/2d.cpp b/db/geo/2d.cpp
index 6e9aac7c6ba..3102b8a3469 100644
--- a/db/geo/2d.cpp
+++ b/db/geo/2d.cpp
@@ -34,7 +34,7 @@ namespace mongo {
#if 0
# define GEODEBUG(x) cout << x << endl;
# define GEODEBUGPRINT(x) PRINT(x)
- inline void PREFIXDEBUG(GeoHash prefix, const GeoConvert* g){
+ inline void PREFIXDEBUG(GeoHash prefix, const GeoConvert* g) {
if (!prefix.constrains()) {
cout << "\t empty prefix" << endl;
return ;
@@ -47,14 +47,14 @@ namespace mongo {
Point center ( (ll._x+tr._x)/2, (ll._y+tr._y)/2 );
double radius = fabs(ll._x - tr._x) / 2;
- cout << "\t ll: " << ll.toString() << " tr: " << tr.toString()
+ cout << "\t ll: " << ll.toString() << " tr: " << tr.toString()
<< " center: " << center.toString() << " radius: " << radius << endl;
}
#else
-# define GEODEBUG(x)
-# define GEODEBUGPRINT(x)
-# define PREFIXDEBUG(x, y)
+# define GEODEBUG(x)
+# define GEODEBUGPRINT(x)
+# define PREFIXDEBUG(x, y)
#endif
const double EARTH_RADIUS_KM = 6371;
@@ -65,9 +65,9 @@ namespace mongo {
GEO_SPHERE
};
- inline double computeXScanDistance(double y, double maxDistDegrees){
+ inline double computeXScanDistance(double y, double maxDistDegrees) {
// TODO: this overestimates for large madDistDegrees far from the equator
- return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegrees))),
+ return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegrees))),
cos(deg2rad(max(-89.0, y - maxDistDegrees))));
}
@@ -78,14 +78,14 @@ namespace mongo {
class Geo2dType : public IndexType , public GeoConvert {
public:
Geo2dType( const IndexPlugin * plugin , const IndexSpec* spec )
- : IndexType( plugin , spec ){
-
+ : IndexType( plugin , spec ) {
+
BSONObjBuilder orderBuilder;
BSONObjIterator i( spec->keyPattern );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( e.type() == String && GEO2DNAME == e.valuestr() ){
+ if ( e.type() == String && GEO2DNAME == e.valuestr() ) {
uassert( 13022 , "can't have 2 geo field" , _geo.size() == 0 );
uassert( 13023 , "2d has to be first in index" , _other.size() == 0 );
_geo = e.fieldName();
@@ -95,16 +95,16 @@ namespace mongo {
}
orderBuilder.append( "" , 1 );
}
-
+
uassert( 13024 , "no geo field specified" , _geo.size() );
-
+
_bits = _configval( spec , "bits" , 26 ); // for lat/long, ~ 1ft
uassert( 13028 , "can't have more than 32 bits in geo index" , _bits <= 32 );
_max = _configval( spec , "max" , 180 );
_min = _configval( spec , "min" , -180 );
-
+
_scaling = (1024*1024*1024*4.0)/(_max-_min);
_order = orderBuilder.obj();
@@ -115,30 +115,30 @@ namespace mongo {
_error = distance(a, b);
}
- int _configval( const IndexSpec* spec , const string& name , int def ){
+ int _configval( const IndexSpec* spec , const string& name , int def ) {
BSONElement e = spec->info[name];
if ( e.isNumber() )
return e.numberInt();
return def;
}
- ~Geo2dType(){
-
+ ~Geo2dType() {
+
}
- virtual BSONObj fixKey( const BSONObj& in ) {
+ virtual BSONObj fixKey( const BSONObj& in ) {
if ( in.firstElement().type() == BinData )
return in;
BSONObjBuilder b(in.objsize()+16);
-
+
if ( in.firstElement().isABSONObj() )
_hash( in.firstElement().embeddedObject() ).append( b , "" );
else if ( in.firstElement().type() == String )
GeoHash( in.firstElement().valuestr() ).append( b , "" );
else if ( in.firstElement().type() == RegEx )
GeoHash( in.firstElement().regex() ).append( b , "" );
- else
+ else
return in;
BSONObjIterator i(in);
@@ -164,7 +164,7 @@ namespace mongo {
_hash( embed ).append( b , "" );
- for ( size_t i=0; i<_other.size(); i++ ){
+ for ( size_t i=0; i<_other.size(); i++ ) {
BSONElement e = obj.getFieldDotted(_other[i]);
if ( e.eoo() )
e = _spec->missingField();
@@ -172,11 +172,11 @@ namespace mongo {
}
keys.insert( b.obj() );
}
-
+
GeoHash _tohash( const BSONElement& e ) const {
if ( e.isABSONObj() )
return _hash( e.embeddedObject() );
-
+
return GeoHash( e , _bits );
}
@@ -186,7 +186,7 @@ namespace mongo {
BSONElement x = i.next();
uassert( 13068 , "geo field only has 1 element" , i.more() );
BSONElement y = i.next();
-
+
uassert( 13026 , "geo values have to be numbers: " + o.toString() , x.isNumber() && y.isNumber() );
return hash( x.number() , y.number() );
@@ -204,33 +204,33 @@ namespace mongo {
b.append( "y" , _unconvert( y ) );
return b.obj();
}
-
+
unsigned _convert( double in ) const {
uassert( 13027 , "point not in range" , in <= (_max + _error) && in >= (_min - _error) );
in -= _min;
assert( in > 0 );
return (unsigned)(in * _scaling);
}
-
+
double _unconvert( unsigned in ) const {
double x = in;
x /= _scaling;
x += _min;
return x;
}
-
+
void unhash( const GeoHash& h , double& x , double& y ) const {
unsigned a,b;
h.unhash(a,b);
x = _unconvert( a );
y = _unconvert( b );
}
-
+
double distance( const GeoHash& a , const GeoHash& b ) const {
double ax,ay,bx,by;
unhash( a , ax , ay );
unhash( b , bx , by );
-
+
double dx = bx - ax;
double dy = by - ay;
@@ -265,10 +265,10 @@ namespace mongo {
virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const {
BSONElement e = query.getFieldDotted(_geo.c_str());
- switch ( e.type() ){
+ switch ( e.type() ) {
case Object: {
BSONObj sub = e.embeddedObject();
- switch ( sub.firstElement().getGtLtOp() ){
+ switch ( sub.firstElement().getGtLtOp() ) {
case BSONObj::opNEAR:
case BSONObj::opWITHIN:
return OPTIMAL;
@@ -284,7 +284,7 @@ namespace mongo {
string _geo;
vector<string> _other;
-
+
unsigned _bits;
int _max;
int _min;
@@ -296,38 +296,38 @@ namespace mongo {
class Box {
public:
-
+
Box( const Geo2dType * g , const GeoHash& hash )
- : _min( g , hash ) ,
- _max( _min._x + g->sizeEdge( hash ) , _min._y + g->sizeEdge( hash ) ){
+ : _min( g , hash ) ,
+ _max( _min._x + g->sizeEdge( hash ) , _min._y + g->sizeEdge( hash ) ) {
}
-
+
Box( double x , double y , double size )
- : _min( x , y ) ,
- _max( x + size , y + size ){
+ : _min( x , y ) ,
+ _max( x + size , y + size ) {
}
Box( Point min , Point max )
- : _min( min ) , _max( max ){
+ : _min( min ) , _max( max ) {
}
- Box(){}
+ Box() {}
string toString() const {
StringBuilder buf(64);
buf << _min.toString() << " -->> " << _max.toString();
return buf.str();
}
-
+
bool between( double min , double max , double val , double fudge=0) const {
return val + fudge >= min && val <= max + fudge;
}
-
+
bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
assert( amin <= amax );
assert( bmin <= bmax );
- if ( amin < bmin ){
+ if ( amin < bmin ) {
if ( amax < bmin )
return false;
res = min ? bmin : amax;
@@ -340,16 +340,16 @@ namespace mongo {
}
double intersects( const Box& other ) const {
-
+
Point boundMin(0,0);
Point boundMax(0,0);
-
+
if ( mid( _min._x , _max._x , other._min._x , other._max._x , true , boundMin._x ) == false ||
- mid( _min._x , _max._x , other._min._x , other._max._x , false , boundMax._x ) == false ||
- mid( _min._y , _max._y , other._min._y , other._max._y , true , boundMin._y ) == false ||
- mid( _min._y , _max._y , other._min._y , other._max._y , false , boundMax._y ) == false )
+ mid( _min._x , _max._x , other._min._x , other._max._x , false , boundMax._x ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , true , boundMin._y ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , false , boundMax._y ) == false )
return 0;
-
+
Box intersection( boundMin , boundMax );
return intersection.area() / ( ( area() + other.area() ) / 2 );
@@ -364,49 +364,49 @@ namespace mongo {
( _min._y + _max._y ) / 2 );
}
- bool inside( Point p , double fudge = 0 ){
+ bool inside( Point p , double fudge = 0 ) {
bool res = inside( p._x , p._y , fudge );
//cout << "is : " << p.toString() << " in " << toString() << " = " << res << endl;
return res;
}
-
- bool inside( double x , double y , double fudge = 0 ){
- return
+
+ bool inside( double x , double y , double fudge = 0 ) {
+ return
between( _min._x , _max._x , x , fudge ) &&
between( _min._y , _max._y , y , fudge );
}
- bool contains(const Box& other, double fudge=0){
+ bool contains(const Box& other, double fudge=0) {
return inside(other._min, fudge) && inside(other._max, fudge);
}
-
+
Point _min;
Point _max;
};
-
+
class Geo2dPlugin : public IndexPlugin {
public:
- Geo2dPlugin() : IndexPlugin( GEO2DNAME ){
+ Geo2dPlugin() : IndexPlugin( GEO2DNAME ) {
}
-
+
virtual IndexType* generate( const IndexSpec* spec ) const {
return new Geo2dType( this , spec );
}
} geo2dplugin;
-
+
struct GeoUnitTest : public UnitTest {
-
- int round( double d ){
+
+ int round( double d ) {
return (int)(.5+(d*1000));
}
-
+
#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
- void run(){
+ void run() {
assert( ! GeoHash::isBitSet( 0 , 0 ) );
assert( ! GeoHash::isBitSet( 0 , 31 ) );
assert( GeoHash::isBitSet( 1 , 31 ) );
-
+
IndexSpec i( BSON( "loc" << "2d" ) );
Geo2dType g( &geo2dplugin , &i );
{
@@ -432,7 +432,7 @@ namespace mongo {
assert( round( in["x"].number() ) == round( out["x"].number() ) );
assert( round( in["y"].number() ) == round( out["y"].number() ) );
}
-
+
{
GeoHash h( "0000" );
h.move( 0 , 1 );
@@ -445,13 +445,13 @@ namespace mongo {
GEOHEQ( h , "0100" );
h.move( 0 , -1 );
GEOHEQ( h , "0001" );
-
+
h.init( "0000" );
h.move( 1 , 0 );
GEOHEQ( h , "0010" );
}
-
+
{
Box b( 5 , 5 , 2 );
assert( "(5,5) -->> (7,7)" == b.toString() );
@@ -465,7 +465,7 @@ namespace mongo {
b = g.hash( 42 , 44 );
assert( round(10) == round(g.distance( a , b )) );
}
-
+
{
GeoHash x("0000");
assert( 0 == x.getHash() );
@@ -475,7 +475,7 @@ namespace mongo {
assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
}
-
+
{
GeoHash x("1010");
GEOHEQ( x , "1010" );
@@ -483,8 +483,8 @@ namespace mongo {
GEOHEQ( y , "101001" );
}
- {
-
+ {
+
GeoHash a = g.hash( 5 , 5 );
GeoHash b = g.hash( 5 , 7 );
GeoHash c = g.hash( 100 , 100 );
@@ -530,13 +530,13 @@ namespace mongo {
assert( entry.hasPrefix( GeoHash( "1100" ) ) );
assert( entry.hasPrefix( prefix ) );
}
-
+
{
GeoHash a = g.hash( 50 , 50 );
GeoHash b = g.hash( 48 , 54 );
assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
}
-
+
{
Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
@@ -555,7 +555,7 @@ namespace mongo {
int N = 10000;
{
Timer t;
- for ( int i=0; i<N; i++ ){
+ for ( int i=0; i<N; i++ ) {
unsigned x = (unsigned)rand();
unsigned y = (unsigned)rand();
GeoHash h( x , y );
@@ -569,7 +569,7 @@ namespace mongo {
{
Timer t;
- for ( int i=0; i<N; i++ ){
+ for ( int i=0; i<N; i++ ) {
unsigned x = (unsigned)rand();
unsigned y = (unsigned)rand();
GeoHash h( x , y );
@@ -600,7 +600,7 @@ namespace mongo {
{
Point BNA (-1.5127, 0.6304);
Point LAX (-2.0665, 0.5924);
-
+
double dist1 = spheredist_rad(BNA, LAX);
double dist2 = spheredist_rad(LAX, BNA);
@@ -611,7 +611,7 @@ namespace mongo {
{
Point JFK (-73.77694444, 40.63861111 );
Point LAX (-118.40, 33.94);
-
+
double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
assert( dist > 2469 && dist < 2470 );
}
@@ -635,18 +635,18 @@ namespace mongo {
}
}
} geoUnitTest;
-
+
class GeoPoint {
public:
- GeoPoint(){
+ GeoPoint() {
}
GeoPoint( const KeyNode& node , double distance )
- : _key( node.key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ) , _distance( distance ){
+ : _key( node.key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ) , _distance( distance ) {
}
GeoPoint( const BSONObj& key , DiskLoc loc , double distance )
- : _key(key) , _loc(loc) , _o( loc.obj() ) , _distance( distance ){
+ : _key(key) , _loc(loc) , _o( loc.obj() ) , _distance( distance ) {
}
bool operator<( const GeoPoint& other ) const {
@@ -667,44 +667,44 @@ namespace mongo {
public:
GeoAccumulator( const Geo2dType * g , const BSONObj& filter )
: _g(g) , _lookedAt(0) , _objectsLoaded(0) , _found(0) {
- if ( ! filter.isEmpty() ){
+ if ( ! filter.isEmpty() ) {
_matcher.reset( new CoveredIndexMatcher( filter , g->keyPattern() ) );
}
}
- virtual ~GeoAccumulator(){
+ virtual ~GeoAccumulator() {
}
- virtual void add( const KeyNode& node ){
+ virtual void add( const KeyNode& node ) {
// when looking at other boxes, don't want to look at some object twice
pair<set<DiskLoc>::iterator,bool> seenBefore = _seen.insert( node.recordLoc );
- if ( ! seenBefore.second ){
+ if ( ! seenBefore.second ) {
GEODEBUG( "\t\t\t\t already seen : " << node.recordLoc.obj()["_id"] );
return;
}
_lookedAt++;
-
+
// distance check
double d = 0;
- if ( ! checkDistance( GeoHash( node.key.firstElement() ) , d ) ){
+ if ( ! checkDistance( GeoHash( node.key.firstElement() ) , d ) ) {
GEODEBUG( "\t\t\t\t bad distance : " << node.recordLoc.obj() << "\t" << d );
return;
- }
+ }
GEODEBUG( "\t\t\t\t good distance : " << node.recordLoc.obj() << "\t" << d );
-
+
// matcher
MatchDetails details;
- if ( _matcher.get() ){
+ if ( _matcher.get() ) {
bool good = _matcher->matches( node.key , node.recordLoc , &details );
if ( details.loadedObject )
_objectsLoaded++;
-
- if ( ! good ){
+
+ if ( ! good ) {
GEODEBUG( "\t\t\t\t didn't match : " << node.recordLoc.obj()["_id"] );
return;
}
}
-
+
if ( ! details.loadedObject ) // dont double count
_objectsLoaded++;
@@ -718,7 +718,7 @@ namespace mongo {
long long found() const {
return _found;
}
-
+
const Geo2dType * _g;
set<DiskLoc> _seen;
auto_ptr<CoveredIndexMatcher> _matcher;
@@ -727,7 +727,7 @@ namespace mongo {
long long _objectsLoaded;
long long _found;
};
-
+
class GeoHopper : public GeoAccumulator {
public:
typedef multiset<GeoPoint> Holder;
@@ -736,33 +736,34 @@ namespace mongo {
: GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ), _type( type ), _farthest(-1)
{}
- virtual bool checkDistance( const GeoHash& h , double& d ){
- switch (_type){
- case GEO_PLAIN:
- d = _near.distance( Point(_g, h) );
- break;
- case GEO_SPHERE:
- d = spheredist_deg(_near, Point(_g, h));
- break;
- default:
- assert(0);
+ virtual bool checkDistance( const GeoHash& h , double& d ) {
+ switch (_type) {
+ case GEO_PLAIN:
+ d = _near.distance( Point(_g, h) );
+ break;
+ case GEO_SPHERE:
+ d = spheredist_deg(_near, Point(_g, h));
+ break;
+ default:
+ assert(0);
}
bool good = d < _maxDistance && ( _points.size() < _max || d < farthest() );
- GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString() << "\t" << h << "\t" << d
+ GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString() << "\t" << h << "\t" << d
<< " ok: " << good << " farthest: " << farthest() );
return good;
}
-
- virtual void addSpecific( const KeyNode& node , double d ){
+
+ virtual void addSpecific( const KeyNode& node , double d ) {
GEODEBUG( "\t\t" << GeoHash( node.key.firstElement() ) << "\t" << node.recordLoc.obj() << "\t" << d );
_points.insert( GeoPoint( node.key , node.recordLoc , d ) );
- if ( _points.size() > _max ){
+ if ( _points.size() > _max ) {
_points.erase( --_points.end() );
-
+
Holder::iterator i = _points.end();
i--;
_farthest = i->_distance;
- } else {
+ }
+ else {
if (d > _farthest)
_farthest = d;
}
@@ -780,42 +781,42 @@ namespace mongo {
GeoDistType _type;
double _farthest;
};
-
+
struct BtreeLocation {
int pos;
bool found;
DiskLoc bucket;
-
- BSONObj key(){
+
+ BSONObj key() {
if ( bucket.isNull() )
return BSONObj();
return bucket.btree()->keyNode( pos ).key;
}
-
- bool hasPrefix( const GeoHash& hash ){
+
+ bool hasPrefix( const GeoHash& hash ) {
BSONElement e = key().firstElement();
if ( e.eoo() )
return false;
return GeoHash( e ).hasPrefix( hash );
}
-
- bool advance( int direction , int& totalFound , GeoAccumulator* all ){
+
+ bool advance( int direction , int& totalFound , GeoAccumulator* all ) {
if ( bucket.isNull() )
return false;
bucket = bucket.btree()->advance( bucket , pos , direction , "btreelocation" );
-
+
if ( all )
return checkCur( totalFound , all );
-
+
return ! bucket.isNull();
}
- bool checkCur( int& totalFound , GeoAccumulator* all ){
+ bool checkCur( int& totalFound , GeoAccumulator* all ) {
if ( bucket.isNull() )
return false;
- if ( bucket.btree()->isUsed(pos) ){
+ if ( bucket.btree()->isUsed(pos) ) {
totalFound++;
all->add( bucket.btree()->keyNode( pos ) );
}
@@ -826,31 +827,30 @@ namespace mongo {
return true;
}
- string toString(){
+ string toString() {
stringstream ss;
ss << "bucket: " << bucket.toString() << " pos: " << pos << " found: " << found;
return ss.str();
}
- static bool initial( const IndexDetails& id , const Geo2dType * spec ,
- BtreeLocation& min , BtreeLocation& max ,
+ static bool initial( const IndexDetails& id , const Geo2dType * spec ,
+ BtreeLocation& min , BtreeLocation& max ,
GeoHash start ,
- int & found , GeoAccumulator * hopper )
- {
-
+ int & found , GeoAccumulator * hopper ) {
+
Ordering ordering = Ordering::make(spec->_order);
- min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
+ min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
ordering , min.pos , min.found , minDiskLoc );
if (hopper) min.checkCur( found , hopper );
max = min;
-
- if ( min.bucket.isNull() || ( hopper && !(hopper->found()) ) ){
- min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
+
+ if ( min.bucket.isNull() || ( hopper && !(hopper->found()) ) ) {
+ min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
ordering , min.pos , min.found , minDiskLoc , -1 );
if (hopper) min.checkCur( found , hopper );
}
-
+
return ! min.bucket.isNull() || ! max.bucket.isNull();
}
};
@@ -860,29 +860,31 @@ namespace mongo {
GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN)
: _spec( g ) ,_startPt(g,n), _start( n ) ,
_numWanted( numWanted ) , _filter( filter ) , _maxDistance( maxDistance ) ,
- _hopper( new GeoHopper( g , numWanted , _startPt , filter , maxDistance, type ) ), _type(type)
- {
+ _hopper( new GeoHopper( g , numWanted , _startPt , filter , maxDistance, type ) ), _type(type) {
assert( g->getDetails() );
_nscanned = 0;
_found = 0;
-
- if (type == GEO_PLAIN){
+
+ if (type == GEO_PLAIN) {
_scanDistance = maxDistance;
- } else if (type == GEO_SPHERE) {
- if (maxDistance == numeric_limits<double>::max()){
+ }
+ else if (type == GEO_SPHERE) {
+ if (maxDistance == numeric_limits<double>::max()) {
_scanDistance = maxDistance;
- } else {
+ }
+ else {
//TODO: consider splitting into x and y scan distances
_scanDistance = computeXScanDistance(_startPt._y, rad2deg(maxDistance));
}
- } else {
+ }
+ else {
assert(0);
}
}
-
- void exec(){
+
+ void exec() {
const IndexDetails& id = *_spec->getDetails();
-
+
const BtreeBucket * head = id.head.btree();
assert( head );
/*
@@ -892,20 +894,20 @@ namespace mongo {
* 3) find optimal set of boxes that complete circle
* 4) use regular btree cursors to scan those boxes
*/
-
+
GeoHopper * hopper = _hopper.get();
_prefix = _start;
BtreeLocation min,max;
- { // 1 regular geo hash algorithm
-
+ {
+ // 1 regular geo hash algorithm
+
if ( ! BtreeLocation::initial( id , _spec , min , max , _start , _found , NULL ) )
return;
-
+
while ( !_prefix.constrains() || // if next pass would cover universe, just keep going
- ( _hopper->found() < _numWanted && _spec->sizeEdge( _prefix ) <= _scanDistance))
- {
+ ( _hopper->found() < _numWanted && _spec->sizeEdge( _prefix ) <= _scanDistance)) {
GEODEBUG( _prefix << "\t" << _found << "\t DESC" );
while ( min.hasPrefix(_prefix) && min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
_nscanned++;
@@ -913,7 +915,7 @@ namespace mongo {
while ( max.hasPrefix(_prefix) && max.checkCur(_found, hopper) && max.advance(+1, _found, NULL) )
_nscanned++;
- if ( ! _prefix.constrains() ){
+ if ( ! _prefix.constrains() ) {
GEODEBUG( "done search w/o part 2" )
return;
}
@@ -927,10 +929,11 @@ namespace mongo {
// 2
double farthest = hopper->farthest();
GEODEBUGPRINT(hopper->farthest());
- if (farthest == -1){
+ if (farthest == -1) {
// Nothing found in Phase 1
farthest = _scanDistance;
- } else if (_type == GEO_SPHERE) {
+ }
+ else if (_type == GEO_SPHERE) {
farthest = std::min(_scanDistance, computeXScanDistance(_startPt._y, rad2deg(farthest)));
}
GEODEBUGPRINT(farthest);
@@ -939,13 +942,13 @@ namespace mongo {
GEODEBUGPRINT(want.toString());
_prefix = _start;
- while (_prefix.constrains() && _spec->sizeEdge( _prefix ) < farthest ){
+ while (_prefix.constrains() && _spec->sizeEdge( _prefix ) < farthest ) {
_prefix = _prefix.up();
}
PREFIXDEBUG(_prefix, _spec);
- if (_prefix.getBits() <= 1){
+ if (_prefix.getBits() <= 1) {
// TODO consider walking in $natural order
while ( min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
@@ -954,16 +957,16 @@ namespace mongo {
_nscanned++;
GEODEBUG( "done search after scanning whole collection" )
- return;
+ return;
}
- if ( logLevel > 0 ){
- log(1) << "want: " << want << " found:" << _found << " nscanned: " << _nscanned << " hash size:" << _spec->sizeEdge( _prefix )
+ if ( logLevel > 0 ) {
+ log(1) << "want: " << want << " found:" << _found << " nscanned: " << _nscanned << " hash size:" << _spec->sizeEdge( _prefix )
<< " farthest: " << farthest << " using box: " << Box( _spec , _prefix ).toString() << endl;
}
- for ( int x=-1; x<=1; x++ ){
- for ( int y=-1; y<=1; y++ ){
+ for ( int x=-1; x<=1; x++ ) {
+ for ( int y=-1; y<=1; y++ ) {
GeoHash toscan = _prefix;
toscan.move( x , y );
@@ -973,51 +976,52 @@ namespace mongo {
}
}
GEODEBUG( "done search" )
-
+
}
- void doBox( const IndexDetails& id , const Box& want , const GeoHash& toscan , int depth = 0 ){
+ void doBox( const IndexDetails& id , const Box& want , const GeoHash& toscan , int depth = 0 ) {
Box testBox( _spec , toscan );
- if ( logLevel > 2 ){
+ if ( logLevel > 2 ) {
cout << "\t";
for ( int i=0; i<depth; i++ )
cout << "\t";
cout << " doBox: " << testBox.toString() << "\t" << toscan.toString() << " scanned so far: " << _nscanned << endl;
- } else {
+ }
+ else {
GEODEBUGPRINT(testBox.toString());
}
- if (_alreadyScanned.contains(testBox, _spec->_error)){
+ if (_alreadyScanned.contains(testBox, _spec->_error)) {
GEODEBUG("skipping box: already scanned");
return; // been here, done this
}
double intPer = testBox.intersects( want );
-
- if ( intPer <= 0 ){
+
+ if ( intPer <= 0 ) {
GEODEBUG("skipping box: not in want");
return;
}
-
+
bool goDeeper = intPer < .5 && depth < 2;
long long myscanned = 0;
-
+
BtreeLocation loc;
- loc.bucket = id.head.btree()->locate( id , id.head , toscan.wrap() , Ordering::make(_spec->_order) ,
- loc.pos , loc.found , minDiskLoc );
+ loc.bucket = id.head.btree()->locate( id , id.head , toscan.wrap() , Ordering::make(_spec->_order) ,
+ loc.pos , loc.found , minDiskLoc );
loc.checkCur( _found , _hopper.get() );
- while ( loc.hasPrefix( toscan ) && loc.advance( 1 , _found , _hopper.get() ) ){
+ while ( loc.hasPrefix( toscan ) && loc.advance( 1 , _found , _hopper.get() ) ) {
_nscanned++;
- if ( ++myscanned > 100 && goDeeper ){
+ if ( ++myscanned > 100 && goDeeper ) {
doBox( id , want , toscan + "00" , depth + 1);
doBox( id , want , toscan + "01" , depth + 1);
doBox( id , want , toscan + "10" , depth + 1);
doBox( id , want , toscan + "11" , depth + 1);
- return;
+ return;
}
}
-
+
}
@@ -1042,17 +1046,17 @@ namespace mongo {
class GeoCursorBase : public Cursor {
public:
GeoCursorBase( const Geo2dType * spec )
- : _spec( spec ), _id( _spec->getDetails() ){
+ : _spec( spec ), _id( _spec->getDetails() ) {
}
- virtual DiskLoc refLoc(){ return DiskLoc(); }
+ virtual DiskLoc refLoc() { return DiskLoc(); }
virtual BSONObj indexKeyPattern() {
return _spec->keyPattern();
}
- virtual void noteLocation() {
+ virtual void noteLocation() {
// no-op since these are meant to be safe
}
@@ -1063,12 +1067,12 @@ namespace mongo {
virtual bool supportGetMore() { return false; }
virtual bool supportYields() { return false; }
-
+
virtual bool getsetdup(DiskLoc loc) { return false; }
virtual bool modifiedKeys() const { return true; }
virtual bool isMultiKey() const { return false; }
-
+
const Geo2dType * _spec;
const IndexDetails * _id;
@@ -1077,23 +1081,23 @@ namespace mongo {
class GeoSearchCursor : public GeoCursorBase {
public:
GeoSearchCursor( shared_ptr<GeoSearch> s )
- : GeoCursorBase( s->_spec ) ,
+ : GeoCursorBase( s->_spec ) ,
_s( s ) , _cur( s->_hopper->_points.begin() ) , _end( s->_hopper->_points.end() ), _nscanned() {
- if ( _cur != _end ) {
- ++_nscanned;
- }
+ if ( _cur != _end ) {
+ ++_nscanned;
+ }
}
-
+
virtual ~GeoSearchCursor() {}
-
- virtual bool ok(){
+
+ virtual bool ok() {
return _cur != _end;
}
-
- virtual Record* _current(){ assert(ok()); return _cur->_loc.rec(); }
- virtual BSONObj current(){ assert(ok()); return _cur->_o; }
- virtual DiskLoc currLoc(){ assert(ok()); return _cur->_loc; }
- virtual bool advance(){ _cur++; incNscanned(); return ok(); }
+
+ virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current() { assert(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
+ virtual bool advance() { _cur++; incNscanned(); return ok(); }
virtual BSONObj currKey() const { return _cur->_key; }
virtual string toString() {
@@ -1101,21 +1105,21 @@ namespace mongo {
}
- virtual BSONObj prettyStartKey() const {
- return BSON( _s->_spec->_geo << _s->_prefix.toString() );
+ virtual BSONObj prettyStartKey() const {
+ return BSON( _s->_spec->_geo << _s->_prefix.toString() );
}
- virtual BSONObj prettyEndKey() const {
+ virtual BSONObj prettyEndKey() const {
GeoHash temp = _s->_prefix;
temp.move( 1 , 1 );
- return BSON( _s->_spec->_geo << temp.toString() );
+ return BSON( _s->_spec->_geo << temp.toString() );
}
-
+
virtual long long nscanned() { return _nscanned; }
shared_ptr<GeoSearch> _s;
GeoHopper::Holder::iterator _cur;
GeoHopper::Holder::iterator _end;
-
+
void incNscanned() { if ( ok() ) { ++_nscanned; } }
long long _nscanned;
};
@@ -1126,14 +1130,14 @@ namespace mongo {
: GeoCursorBase( g ) ,GeoAccumulator( g , filter ) ,
_type( type ) , _filter( filter ) , _firstCall(true), _nscanned() {
}
-
+
virtual string toString() {
return (string)"GeoBrowse-" + _type;
}
- virtual bool ok(){
+ virtual bool ok() {
bool first = _firstCall;
- if ( _firstCall ){
+ if ( _firstCall ) {
fillStack();
_firstCall = false;
}
@@ -1144,7 +1148,7 @@ namespace mongo {
return true;
}
- while ( moreToDo() ){
+ while ( moreToDo() ) {
fillStack();
if ( ! _cur.isEmpty() ) {
if ( first ) {
@@ -1153,38 +1157,38 @@ namespace mongo {
return true;
}
}
-
+
return false;
}
-
- virtual bool advance(){
+
+ virtual bool advance() {
_cur._o = BSONObj();
-
- if ( _stack.size() ){
+
+ if ( _stack.size() ) {
_cur = _stack.front();
_stack.pop_front();
++_nscanned;
return true;
}
-
+
if ( ! moreToDo() )
return false;
-
+
while ( _cur.isEmpty() && moreToDo() )
fillStack();
return ! _cur.isEmpty() && ++_nscanned;
}
-
- virtual Record* _current(){ assert(ok()); return _cur._loc.rec(); }
- virtual BSONObj current(){ assert(ok()); return _cur._o; }
- virtual DiskLoc currLoc(){ assert(ok()); return _cur._loc; }
+
+ virtual Record* _current() { assert(ok()); return _cur._loc.rec(); }
+ virtual BSONObj current() { assert(ok()); return _cur._o; }
+ virtual DiskLoc currLoc() { assert(ok()); return _cur._loc; }
virtual BSONObj currKey() const { return _cur._key; }
virtual bool moreToDo() = 0;
virtual void fillStack() = 0;
- virtual void addSpecific( const KeyNode& node , double d ){
+ virtual void addSpecific( const KeyNode& node , double d ) {
if ( _cur.isEmpty() )
_cur = GeoPoint( node , d );
else
@@ -1196,31 +1200,31 @@ namespace mongo {
ok();
}
return _nscanned;
- }
-
+ }
+
string _type;
BSONObj _filter;
list<GeoPoint> _stack;
GeoPoint _cur;
bool _firstCall;
-
+
long long _nscanned;
};
class GeoCircleBrowse : public GeoBrowse {
public:
-
+
enum State {
- START ,
+ START ,
DOING_EXPAND ,
DOING_AROUND ,
DONE
} _state;
GeoCircleBrowse( const Geo2dType * g , const BSONObj& circle , BSONObj filter = BSONObj() , const string& type="$center")
- : GeoBrowse( g , "circle" , filter ){
+ : GeoBrowse( g , "circle" , filter ) {
uassert( 13060 , "$center needs 2 fields (middle,max distance)" , circle.nFields() == 2 );
BSONObjIterator i(circle);
@@ -1235,40 +1239,42 @@ namespace mongo {
_state = START;
_found = 0;
- if (type == "$center"){
+ if (type == "$center") {
_type = GEO_PLAIN;
_xScanDistance = _maxDistance;
_yScanDistance = _maxDistance;
- } else if (type == "$centerSphere") {
+ }
+ else if (type == "$centerSphere") {
uassert(13461, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
_type = GEO_SPHERE;
_yScanDistance = rad2deg(_maxDistance);
_xScanDistance = computeXScanDistance(_startPt._y, _yScanDistance);
- uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
- (_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
- (_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
+ uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
+ (_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
+ (_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
GEODEBUGPRINT(_maxDistance);
GEODEBUGPRINT(_xScanDistance);
GEODEBUGPRINT(_yScanDistance);
- } else {
+ }
+ else {
uassert(13460, "invalid $center query type: " + type, false);
}
ok();
}
- virtual bool moreToDo(){
+ virtual bool moreToDo() {
return _state != DONE;
}
-
- virtual void fillStack(){
- if ( _state == START ){
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
- _prefix , _found , this ) ){
+ virtual void fillStack() {
+
+ if ( _state == START ) {
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
+ _prefix , _found , this ) ) {
_state = DONE;
return;
}
@@ -1276,10 +1282,10 @@ namespace mongo {
}
- if ( _state == DOING_AROUND ){
+ if ( _state == DOING_AROUND ) {
// TODO could rework and return rather than looping
- for (int i=-1; i<=1; i++){
- for (int j=-1; j<=1; j++){
+ for (int i=-1; i<=1; i++) {
+ for (int j=-1; j<=1; j++) {
if (i == 0 && j == 0)
continue; // main box
@@ -1287,10 +1293,11 @@ namespace mongo {
newBox.move(i, j);
PREFIXDEBUG(newBox, _g);
- if (needToCheckBox(newBox)){
+ if (needToCheckBox(newBox)) {
// TODO consider splitting into quadrants
getPointsForPrefix(newBox);
- } else {
+ }
+ else {
GEODEBUG("skipping box");
}
}
@@ -1299,54 +1306,56 @@ namespace mongo {
_state = DONE;
return;
}
-
- if (_state == DOING_EXPAND){
+
+ if (_state == DOING_EXPAND) {
GEODEBUG( "circle prefix [" << _prefix << "]" );
PREFIXDEBUG(_prefix, _g);
while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
- if ( ! _prefix.constrains() ){
+ if ( ! _prefix.constrains() ) {
GEODEBUG( "\t exhausted the btree" );
_state = DONE;
return;
}
-
+
Point ll (_g, _prefix);
GeoHash trHash = _prefix;
trHash.move( 1 , 1 );
Point tr (_g, trHash);
double sideLen = fabs(tr._x - ll._x);
- if (sideLen > std::max(_xScanDistance, _yScanDistance)){ // circle must be contained by surrounding squares
- if ( (ll._x + _xScanDistance < _startPt._x && ll._y + _yScanDistance < _startPt._y) &&
- (tr._x - _xScanDistance > _startPt._x && tr._y - _yScanDistance > _startPt._y) )
- {
+ if (sideLen > std::max(_xScanDistance, _yScanDistance)) { // circle must be contained by surrounding squares
+ if ( (ll._x + _xScanDistance < _startPt._x && ll._y + _yScanDistance < _startPt._y) &&
+ (tr._x - _xScanDistance > _startPt._x && tr._y - _yScanDistance > _startPt._y) ) {
GEODEBUG("square fully contains circle");
_state = DONE;
- } else if (_prefix.getBits() > 1){
+ }
+ else if (_prefix.getBits() > 1) {
GEODEBUG("checking surrounding squares");
_state = DOING_AROUND;
- } else {
+ }
+ else {
GEODEBUG("using simple search");
_prefix = _prefix.up();
}
- } else {
+ }
+ else {
_prefix = _prefix.up();
}
return;
}
-
+
/* Clients are expected to use moreToDo before calling
* fillStack, so DONE is checked for there. If any more
* State values are defined, you should handle them
- * here. */
+ * here. */
assert(0);
}
- bool needToCheckBox(const GeoHash& prefix){
+ bool needToCheckBox(const GeoHash& prefix) {
Point ll (_g, prefix);
if (fabs(ll._x - _startPt._x) <= _xScanDistance) return true;
if (fabs(ll._y - _startPt._y) <= _yScanDistance) return true;
@@ -1361,8 +1370,8 @@ namespace mongo {
return false;
}
- void getPointsForPrefix(const GeoHash& prefix){
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ){
+ void getPointsForPrefix(const GeoHash& prefix) {
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ) {
return;
}
@@ -1370,17 +1379,17 @@ namespace mongo {
while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
}
-
- virtual bool checkDistance( const GeoHash& h , double& d ){
- switch (_type){
- case GEO_PLAIN:
- d = _g->distance( _start , h );
- break;
- case GEO_SPHERE:
- d = spheredist_deg(_startPt, Point(_g, h));
- break;
- default:
- assert(0);
+
+ virtual bool checkDistance( const GeoHash& h , double& d ) {
+ switch (_type) {
+ case GEO_PLAIN:
+ d = _g->distance( _start , h );
+ break;
+ case GEO_SPHERE:
+ d = spheredist_deg(_startPt, Point(_g, h));
+ break;
+ default:
+ assert(0);
}
GEODEBUG( "\t " << h << "\t" << d );
@@ -1393,27 +1402,27 @@ namespace mongo {
double _maxDistance; // user input
double _xScanDistance; // effected by GeoDistType
double _yScanDistance; // effected by GeoDistType
-
+
int _found;
-
- GeoHash _prefix;
+
+ GeoHash _prefix;
BtreeLocation _min;
BtreeLocation _max;
- };
+ };
class GeoBoxBrowse : public GeoBrowse {
public:
-
+
enum State {
- START ,
+ START ,
DOING_EXPAND ,
DONE
} _state;
- GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj() )
- : GeoBrowse( g , "box" , filter ){
-
+ GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj() )
+ : GeoBrowse( g , "box" , filter ) {
+
uassert( 13063 , "$box needs 2 fields (bottomLeft,topRight)" , box.nFields() == 2 );
BSONObjIterator i(box);
_bl = g->_tohash( i.next() );
@@ -1429,7 +1438,7 @@ namespace mongo {
Point center = _want.center();
_prefix = _g->hash( center._x , center._y );
-
+
GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
{
@@ -1444,42 +1453,43 @@ namespace mongo {
ok();
}
- virtual bool moreToDo(){
+ virtual bool moreToDo() {
return _state != DONE;
}
-
- virtual void fillStack(){
- if ( _state == START ){
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
- _prefix , _found , this ) ){
+ virtual void fillStack() {
+ if ( _state == START ) {
+
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max ,
+ _prefix , _found , this ) ) {
_state = DONE;
return;
}
_state = DOING_EXPAND;
}
-
- if ( _state == DOING_EXPAND ){
+
+ if ( _state == DOING_EXPAND ) {
int started = _found;
- while ( started == _found || _state == DONE ){
+ while ( started == _found || _state == DONE ) {
GEODEBUG( "box prefix [" << _prefix << "]" );
while ( _min.hasPrefix( _prefix ) && _min.advance( -1 , _found , this ) );
while ( _max.hasPrefix( _prefix ) && _max.advance( 1 , _found , this ) );
-
+
if ( _state == DONE )
return;
- if ( ! _prefix.constrains() ){
+ if ( ! _prefix.constrains() ) {
GEODEBUG( "box exhausted" );
_state = DONE;
return;
}
- if (_g->sizeEdge(_prefix) < _wantLen){
+ if (_g->sizeEdge(_prefix) < _wantLen) {
_prefix = _prefix.up();
- } else {
- for (int i=-1; i<=1; i++){
- for (int j=-1; j<=1; j++){
+ }
+ else {
+ for (int i=-1; i<=1; i++) {
+ for (int j=-1; j<=1; j++) {
if (i == 0 && j == 0)
continue; // main box
@@ -1490,36 +1500,37 @@ namespace mongo {
PREFIXDEBUG(newBox, _g);
Box cur( _g , newBox );
- if (_want.intersects(cur)){
+ if (_want.intersects(cur)) {
// TODO consider splitting into quadrants
getPointsForPrefix(newBox);
- } else {
+ }
+ else {
GEODEBUG("skipping box");
}
}
}
_state = DONE;
}
-
+
}
return;
}
}
- void getPointsForPrefix(const GeoHash& prefix){
- if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ){
+ void getPointsForPrefix(const GeoHash& prefix) {
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , prefix , _found , this ) ) {
return;
}
while ( _min.hasPrefix( prefix ) && _min.advance( -1 , _found , this ) );
while ( _max.hasPrefix( prefix ) && _max.advance( 1 , _found , this ) );
}
-
- virtual bool checkDistance( const GeoHash& h , double& d ){
+
+ virtual bool checkDistance( const GeoHash& h , double& d ) {
bool res = _want.inside( Point( _g , h ) , _fudge );
- GEODEBUG( "\t want : " << _want.toString()
- << " point: " << Point( _g , h ).toString()
+ GEODEBUG( "\t want : " << _want.toString()
+ << " point: " << Point( _g , h ).toString()
<< " in : " << res );
return res;
}
@@ -1530,23 +1541,23 @@ namespace mongo {
double _wantLen;
int _found;
-
- GeoHash _prefix;
+
+ GeoHash _prefix;
BtreeLocation _min;
BtreeLocation _max;
double _fudge;
- };
+ };
shared_ptr<Cursor> Geo2dType::newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
if ( numWanted < 0 )
numWanted = numWanted * -1;
else if ( numWanted == 0 )
- numWanted = 100;
-
+ numWanted = 100;
+
BSONObjIterator i(query);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( _geo != e.fieldName() )
@@ -1554,8 +1565,8 @@ namespace mongo {
if ( e.type() != Object )
continue;
-
- switch ( e.embeddedObject().firstElement().getGtLtOp() ){
+
+ switch ( e.embeddedObject().firstElement().getGtLtOp() ) {
case BSONObj::opNEAR: {
BSONObj n = e.embeddedObject();
e = n.firstElement();
@@ -1564,15 +1575,17 @@ namespace mongo {
GeoDistType type;
if (suffix[0] == '\0') {
type = GEO_PLAIN;
- } else if (strcmp(suffix, "Sphere") == 0) {
+ }
+ else if (strcmp(suffix, "Sphere") == 0) {
type = GEO_SPHERE;
- } else {
+ }
+ else {
uassert(13464, string("invalid $near search type: ") + e.fieldName(), false);
type = GEO_PLAIN; // prevents uninitialized warning
}
double maxDistance = numeric_limits<double>::max();
- if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ){
+ if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ) {
BSONObjIterator i(e.embeddedObject());
i.next();
i.next();
@@ -1589,25 +1602,26 @@ namespace mongo {
s->exec();
shared_ptr<Cursor> c;
c.reset( new GeoSearchCursor( s ) );
- return c;
+ return c;
}
case BSONObj::opWITHIN: {
e = e.embeddedObject().firstElement();
uassert( 13057 , "$within has to take an object or array" , e.isABSONObj() );
e = e.embeddedObject().firstElement();
string type = e.fieldName();
- if ( startsWith(type, "$center") ){
+ if ( startsWith(type, "$center") ) {
uassert( 13059 , "$center has to take an object or array" , e.isABSONObj() );
shared_ptr<Cursor> c( new GeoCircleBrowse( this , e.embeddedObjectUserCheck() , query , type) );
- return c;
- } else if ( type == "$box" ){
+ return c;
+ }
+ else if ( type == "$box" ) {
uassert( 13065 , "$box has to take an object or array" , e.isABSONObj() );
shared_ptr<Cursor> c( new GeoBoxBrowse( this , e.embeddedObjectUserCheck() , query ) );
- return c;
+ return c;
}
throw UserException( 13058 , (string)"unknown $with type: " + type );
}
- default:
+ default:
break;
}
}
@@ -1621,41 +1635,41 @@ namespace mongo {
class Geo2dFindNearCmd : public Command {
public:
- Geo2dFindNearCmd() : Command( "geoNear" ){}
- virtual LockType locktype() const { return READ; }
+ Geo2dFindNearCmd() : Command( "geoNear" ) {}
+ virtual LockType locktype() const { return READ; }
bool slaveOk() const { return true; }
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
bool slaveOverrideOk() { return true; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = dbname + "." + cmdObj.firstElement().valuestr();
NamespaceDetails * d = nsdetails( ns.c_str() );
- if ( ! d ){
+ if ( ! d ) {
errmsg = "can't find ns";
return false;
}
vector<int> idxs;
d->findIndexByType( GEO2DNAME , idxs );
-
- if ( idxs.size() > 1 ){
+
+ if ( idxs.size() > 1 ) {
errmsg = "more than 1 geo indexes :(";
return false;
}
-
- if ( idxs.size() == 0 ){
+
+ if ( idxs.size() == 0 ) {
errmsg = "no geo index :(";
return false;
}
int geoIdx = idxs[0];
-
+
result.append( "ns" , ns );
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
assert( &id == g->getDetails() );
-
+
int numWanted = 100;
if ( cmdObj["num"].isNumber() )
numWanted = cmdObj["num"].numberInt();
@@ -1678,35 +1692,35 @@ namespace mongo {
GeoSearch gs( g , n , numWanted , filter , maxDistance , type);
- if ( cmdObj["start"].type() == String){
+ if ( cmdObj["start"].type() == String) {
GeoHash start ((string) cmdObj["start"].valuestr());
gs._start = start;
}
-
+
gs.exec();
double distanceMultiplier = 1;
if ( cmdObj["distanceMultiplier"].isNumber() )
distanceMultiplier = cmdObj["distanceMultiplier"].number();
-
+
double totalDistance = 0;
BSONObjBuilder arr( result.subarrayStart( "results" ) );
int x = 0;
- for ( GeoHopper::Holder::iterator i=gs._hopper->_points.begin(); i!=gs._hopper->_points.end(); i++ ){
+ for ( GeoHopper::Holder::iterator i=gs._hopper->_points.begin(); i!=gs._hopper->_points.end(); i++ ) {
const GeoPoint& p = *i;
-
+
double dis = distanceMultiplier * p._distance;
totalDistance += dis;
-
+
BSONObjBuilder bb( arr.subobjStart( BSONObjBuilder::numStr( x++ ) ) );
bb.append( "dis" , dis );
bb.append( "obj" , p._o );
bb.done();
}
arr.done();
-
+
BSONObjBuilder stats( result.subobjStart( "stats" ) );
stats.append( "time" , cc().curop()->elapsedMillis() );
stats.appendNumber( "btreelocs" , gs._nscanned );
@@ -1715,23 +1729,23 @@ namespace mongo {
stats.append( "avgDistance" , totalDistance / x );
stats.append( "maxDistance" , gs._hopper->farthest() );
stats.done();
-
+
return true;
}
-
+
} geo2dFindNearCmd;
class GeoWalkCmd : public Command {
public:
- GeoWalkCmd() : Command( "geoWalk" ){}
- virtual LockType locktype() const { return READ; }
+ GeoWalkCmd() : Command( "geoWalk" ) {}
+ virtual LockType locktype() const { return READ; }
bool slaveOk() const { return true; }
bool slaveOverrideOk() { return true; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
string ns = dbname + "." + cmdObj.firstElement().valuestr();
NamespaceDetails * d = nsdetails( ns.c_str() );
- if ( ! d ){
+ if ( ! d ) {
errmsg = "can't find ns";
return false;
}
@@ -1739,10 +1753,10 @@ namespace mongo {
int geoIdx = -1;
{
NamespaceDetails::IndexIterator ii = d->ii();
- while ( ii.more() ){
+ while ( ii.more() ) {
IndexDetails& id = ii.next();
- if ( id.getSpec().getTypeName() == GEO2DNAME ){
- if ( geoIdx >= 0 ){
+ if ( id.getSpec().getTypeName() == GEO2DNAME ) {
+ if ( geoIdx >= 0 ) {
errmsg = "2 geo indexes :(";
return false;
}
@@ -1750,12 +1764,12 @@ namespace mongo {
}
}
}
-
- if ( geoIdx < 0 ){
+
+ if ( geoIdx < 0 ) {
errmsg = "no geo index :(";
return false;
}
-
+
IndexDetails& id = d->idx( geoIdx );
Geo2dType * g = (Geo2dType*)id.getSpec().getType();
@@ -1764,12 +1778,12 @@ namespace mongo {
int max = 100000;
BtreeCursor c( d , geoIdx , id , BSONObj() , BSONObj() , true , 1 );
- while ( c.ok() && max-- ){
+ while ( c.ok() && max-- ) {
GeoHash h( c.currKey().firstElement() );
int len;
cout << "\t" << h.toString()
- << "\t" << c.current()[g->_geo]
- << "\t" << hex << h.getHash()
+ << "\t" << c.current()[g->_geo]
+ << "\t" << hex << h.getHash()
<< "\t" << hex << ((long long*)c.currKey().firstElement().binData(len))[0]
<< "\t" << c.current()["_id"]
<< endl;
@@ -1778,7 +1792,7 @@ namespace mongo {
return true;
}
-
+
} geoWalkCmd;
}
diff --git a/db/geo/core.h b/db/geo/core.h
index e0e39c13b1c..602b513a77e 100644
--- a/db/geo/core.h
+++ b/db/geo/core.h
@@ -31,23 +31,23 @@ namespace mongo {
class GeoBitSets {
public:
- GeoBitSets(){
- for ( int i=0; i<32; i++ ){
+ GeoBitSets() {
+ for ( int i=0; i<32; i++ ) {
masks32[i] = ( 1 << ( 31 - i ) );
}
- for ( int i=0; i<64; i++ ){
+ for ( int i=0; i<64; i++ ) {
masks64[i] = ( 1LL << ( 63 - i ) );
}
-
- for ( unsigned i=0; i<16; i++ ){
+
+ for ( unsigned i=0; i<16; i++ ) {
unsigned fixed = 0;
- for ( int j=0; j<4; j++ ){
+ for ( int j=0; j<4; j++ ) {
if ( i & ( 1 << j ) )
fixed |= ( 1 << ( j * 2 ) );
}
hashedToNormal[fixed] = i;
}
-
+
}
int masks32[32];
long long masks64[64];
@@ -56,24 +56,24 @@ namespace mongo {
};
extern GeoBitSets geoBitSets;
-
+
class GeoHash {
public:
GeoHash()
- : _hash(0),_bits(0){
+ : _hash(0),_bits(0) {
}
- explicit GeoHash( const char * hash ){
+ explicit GeoHash( const char * hash ) {
init( hash );
}
- explicit GeoHash( const string& hash ){
+ explicit GeoHash( const string& hash ) {
init( hash );
}
- explicit GeoHash( const BSONElement& e , unsigned bits=32 ){
+ explicit GeoHash( const BSONElement& e , unsigned bits=32 ) {
_bits = bits;
- if ( e.type() == BinData ){
+ if ( e.type() == BinData ) {
int len = 0;
_copy( (char*)&_hash , e.binData( len ) );
assert( len == 8 );
@@ -85,26 +85,26 @@ namespace mongo {
}
_fix();
}
-
- GeoHash( unsigned x , unsigned y , unsigned bits=32){
+
+ GeoHash( unsigned x , unsigned y , unsigned bits=32) {
init( x , y , bits );
}
- GeoHash( const GeoHash& old ){
+ GeoHash( const GeoHash& old ) {
_hash = old._hash;
_bits = old._bits;
}
GeoHash( long long hash , unsigned bits )
- : _hash( hash ) , _bits( bits ){
+ : _hash( hash ) , _bits( bits ) {
_fix();
}
- void init( unsigned x , unsigned y , unsigned bits ){
+ void init( unsigned x , unsigned y , unsigned bits ) {
assert( bits <= 32 );
_hash = 0;
_bits = bits;
- for ( unsigned i=0; i<bits; i++ ){
+ for ( unsigned i=0; i<bits; i++ ) {
if ( isBitSet( x , i ) ) _hash |= geoBitSets.masks64[i*2];
if ( isBitSet( y , i ) ) _hash |= geoBitSets.masks64[(i*2)+1];
}
@@ -114,7 +114,7 @@ namespace mongo {
x = 0;
y = 0;
char * c = (char*)(&_hash);
- for ( int i=0; i<8; i++ ){
+ for ( int i=0; i<8; i++ ) {
unsigned t = (unsigned)(c[i]) & 0x55;
y |= ( geoBitSets.hashedToNormal[t] << (4*(i)) );
@@ -126,7 +126,7 @@ namespace mongo {
void unhash_slow( unsigned& x , unsigned& y ) const {
x = 0;
y = 0;
- for ( unsigned i=0; i<_bits; i++ ){
+ for ( unsigned i=0; i<_bits; i++ ) {
if ( getBitX(i) )
x |= geoBitSets.masks32[i];
if ( getBitY(i) )
@@ -141,14 +141,14 @@ namespace mongo {
/**
* @param 0 = high
*/
- static bool isBitSet( unsigned val , unsigned bit ){
+ static bool isBitSet( unsigned val , unsigned bit ) {
return geoBitSets.masks32[bit] & val;
}
-
+
GeoHash up() const {
return GeoHash( _hash , _bits - 1 );
}
-
+
bool hasPrefix( const GeoHash& other ) const {
assert( other._bits <= _bits );
if ( other._bits == 0 )
@@ -157,9 +157,9 @@ namespace mongo {
x = x >> (64-(other._bits*2));
return x == 0;
}
-
- string toString() const {
+
+ string toString() const {
StringBuilder buf( _bits * 2 );
for ( unsigned x=0; x<_bits*2; x++ )
buf.append( _hash & geoBitSets.masks64[x] ? "1" : "0" );
@@ -172,7 +172,7 @@ namespace mongo {
return ss.str();
}
- void init( const string& s ){
+ void init( const string& s ) {
_hash = 0;
_bits = s.size() / 2;
for ( unsigned pos=0; pos<s.size(); pos++ )
@@ -180,14 +180,14 @@ namespace mongo {
setBit( pos , 1 );
}
- void setBit( unsigned pos , bool one ){
+ void setBit( unsigned pos , bool one ) {
assert( pos < _bits * 2 );
if ( one )
_hash |= geoBitSets.masks64[pos];
else if ( _hash & geoBitSets.masks64[pos] )
_hash &= ~geoBitSets.masks64[pos];
}
-
+
bool getBit( unsigned pos ) const {
return _hash & geoBitSets.masks64[pos];
}
@@ -201,7 +201,7 @@ namespace mongo {
assert( pos < 32 );
return getBit( ( pos * 2 ) + 1 );
}
-
+
BSONObj wrap() const {
BSONObjBuilder b(20);
append( b , "" );
@@ -213,20 +213,20 @@ namespace mongo {
bool constrains() const {
return _bits > 0;
}
-
- void move( int x , int y ){
+
+ void move( int x , int y ) {
assert( _bits );
_move( 0 , x );
_move( 1 , y );
}
- void _move( unsigned offset , int d ){
+ void _move( unsigned offset , int d ) {
if ( d == 0 )
return;
assert( d <= 1 && d>= -1 ); // TEMP
-
+
bool from, to;
- if ( d > 0 ){
+ if ( d > 0 ) {
from = 0;
to = 1;
}
@@ -238,34 +238,34 @@ namespace mongo {
unsigned pos = ( _bits * 2 ) - 1;
if ( offset == 0 )
pos--;
- while ( true ){
- if ( getBit(pos) == from ){
+ while ( true ) {
+ if ( getBit(pos) == from ) {
setBit( pos , to );
return;
}
- if ( pos < 2 ){
+ if ( pos < 2 ) {
// overflow
- for ( ; pos < ( _bits * 2 ) ; pos += 2 ){
+ for ( ; pos < ( _bits * 2 ) ; pos += 2 ) {
setBit( pos , from );
}
return;
}
-
+
setBit( pos , from );
pos -= 2;
}
-
+
assert(0);
}
- GeoHash& operator=(const GeoHash& h) {
+ GeoHash& operator=(const GeoHash& h) {
_hash = h._hash;
_bits = h._bits;
return *this;
}
-
- bool operator==(const GeoHash& h ){
+
+ bool operator==(const GeoHash& h ) {
return _hash == h._hash && _bits == h._bits;
}
@@ -273,7 +273,7 @@ namespace mongo {
unsigned pos = _bits * 2;
_bits += strlen(s) / 2;
assert( _bits <= 32 );
- while ( s[0] ){
+ while ( s[0] ) {
if ( s[0] == '1' )
setBit( pos , 1 );
pos++;
@@ -288,19 +288,19 @@ namespace mongo {
n+=s;
return n;
}
-
- void _fix(){
+
+ void _fix() {
static long long FULL = 0xFFFFFFFFFFFFFFFFLL;
long long mask = FULL << ( 64 - ( _bits * 2 ) );
_hash &= mask;
}
-
+
void append( BSONObjBuilder& b , const char * name ) const {
char buf[8];
_copy( buf , (char*)&_hash );
b.appendBinData( name , 8 , bdtCustom , buf );
}
-
+
long long getHash() const {
return _hash;
}
@@ -311,9 +311,9 @@ namespace mongo {
GeoHash commonPrefix( const GeoHash& other ) const {
unsigned i=0;
- for ( ; i<_bits && i<other._bits; i++ ){
+ for ( ; i<_bits && i<other._bits; i++ ) {
if ( getBitX( i ) == other.getBitX( i ) &&
- getBitY( i ) == other.getBitY( i ) )
+ getBitY( i ) == other.getBitY( i ) )
continue;
break;
}
@@ -323,7 +323,7 @@ namespace mongo {
private:
void _copy( char * dst , const char * src ) const {
- for ( unsigned a=0; a<8; a++ ){
+ for ( unsigned a=0; a<8; a++ ) {
dst[a] = src[7-a];
}
}
@@ -332,14 +332,14 @@ namespace mongo {
unsigned _bits; // bits per field, so 1 to 32
};
- inline ostream& operator<<( ostream &s, const GeoHash &h ){
+ inline ostream& operator<<( ostream &s, const GeoHash &h ) {
s << h.toString();
return s;
- }
+ }
class GeoConvert {
public:
- virtual ~GeoConvert(){}
+ virtual ~GeoConvert() {}
virtual void unhash( const GeoHash& h , double& x , double& y ) const = 0;
virtual GeoHash hash( double x , double y ) const = 0;
@@ -347,31 +347,31 @@ namespace mongo {
class Point {
public:
-
- Point( const GeoConvert * g , const GeoHash& hash ){
+
+ Point( const GeoConvert * g , const GeoHash& hash ) {
g->unhash( hash , _x , _y );
}
-
- explicit Point( const BSONElement& e ){
+
+ explicit Point( const BSONElement& e ) {
BSONObjIterator i(e.Obj());
_x = i.next().number();
_y = i.next().number();
}
- explicit Point( const BSONObj& o ){
+ explicit Point( const BSONObj& o ) {
BSONObjIterator i(o);
_x = i.next().number();
_y = i.next().number();
}
Point( double x , double y )
- : _x( x ) , _y( y ){
+ : _x( x ) , _y( y ) {
}
-
- Point() : _x(0),_y(0){
+
+ Point() : _x(0),_y(0) {
}
- GeoHash hash( const GeoConvert * g ){
+ GeoHash hash( const GeoConvert * g ) {
return g->hash( _x , _y );
}
@@ -380,12 +380,12 @@ namespace mongo {
double b = _y - p._y;
return sqrt( ( a * a ) + ( b * b ) );
}
-
+
string toString() const {
StringBuilder buf(32);
buf << "(" << _x << "," << _y << ")";
return buf.str();
-
+
}
double _x;
@@ -410,13 +410,13 @@ namespace mongo {
double sin_y1(sin(p1._y)), cos_y1(cos(p1._y));
double sin_x2(sin(p2._x)), cos_x2(cos(p2._x));
double sin_y2(sin(p2._y)), cos_y2(cos(p2._y));
-
- double cross_prod =
+
+ double cross_prod =
(cos_y1*cos_x1 * cos_y2*cos_x2) +
(cos_y1*sin_x1 * cos_y2*sin_x2) +
(sin_y1 * sin_y2);
-
- if (cross_prod >= 1 || cross_prod <= -1){
+
+ if (cross_prod >= 1 || cross_prod <= -1) {
// fun with floats
assert( fabs(cross_prod)-1 < 1e-6 );
return cross_prod > 0 ? 0 : M_PI;
@@ -428,8 +428,8 @@ namespace mongo {
// note: return is still in radians as that can be multiplied by radius to get arc length
inline double spheredist_deg( const Point& p1, const Point& p2 ) {
return spheredist_rad(
- Point( deg2rad(p1._x), deg2rad(p1._y) ),
- Point( deg2rad(p2._x), deg2rad(p2._y) )
+ Point( deg2rad(p1._x), deg2rad(p1._y) ),
+ Point( deg2rad(p2._x), deg2rad(p2._y) )
);
}
diff --git a/db/geo/haystack.cpp b/db/geo/haystack.cpp
index 092258fcb3b..7f278cafa23 100644
--- a/db/geo/haystack.cpp
+++ b/db/geo/haystack.cpp
@@ -38,29 +38,29 @@
* should not be used for finding the closest restaurants that are open
*/
namespace mongo {
-
+
string GEOSEARCHNAME = "geoHaystack";
-
+
class GeoHaystackSearchHopper {
public:
GeoHaystackSearchHopper( const BSONObj& n , double maxDistance , unsigned limit , const string& geoField )
- : _near( n ) , _maxDistance( maxDistance ) , _limit( limit ) , _geoField(geoField){
-
+ : _near( n ) , _maxDistance( maxDistance ) , _limit( limit ) , _geoField(geoField) {
+
}
-
- void got( const DiskLoc& loc ){
+
+ void got( const DiskLoc& loc ) {
Point p( loc.obj().getFieldDotted( _geoField ) );
if ( _near.distance( p ) > _maxDistance )
return;
_locs.push_back( loc );
}
- int append( BSONArrayBuilder& b ){
+ int append( BSONArrayBuilder& b ) {
for ( unsigned i=0; i<_locs.size() && i<_limit; i++ )
b.append( _locs[i].obj() );
return _locs.size();
}
-
+
Point _near;
double _maxDistance;
unsigned _limit;
@@ -70,22 +70,22 @@ namespace mongo {
};
class GeoHaystackSearchIndex : public IndexType {
-
+
public:
-
+
GeoHaystackSearchIndex( const IndexPlugin* plugin , const IndexSpec* spec )
- : IndexType( plugin , spec ){
-
+ : IndexType( plugin , spec ) {
+
BSONElement e = spec->info["bucketSize"];
uassert( 13321 , "need bucketSize" , e.isNumber() );
_bucketSize = e.numberDouble();
-
+
BSONObjBuilder orderBuilder;
-
+
BSONObjIterator i( spec->keyPattern );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( e.type() == String && GEOSEARCHNAME == e.valuestr() ){
+ if ( e.type() == String && GEOSEARCHNAME == e.valuestr() ) {
uassert( 13314 , "can't have 2 geo fields" , _geo.size() == 0 );
uassert( 13315 , "2d has to be first in index" , _other.size() == 0 );
_geo = e.fieldName();
@@ -95,13 +95,13 @@ namespace mongo {
}
orderBuilder.append( "" , 1 );
}
-
+
uassert( 13316 , "no geo field specified" , _geo.size() );
uassert( 13317 , "no other fields specified" , _other.size() );
uassert( 13326 , "quadrant search can only have 1 other field for now" , _other.size() == 1 );
_order = orderBuilder.obj();
}
-
+
int hash( const BSONElement& e ) const {
uassert( 13322 , "not a number" , e.isNumber() );
return hash( e.numberDouble() );
@@ -126,18 +126,18 @@ namespace mongo {
buf.appendNull( "" );
else
buf.appendAs( e , "" );
-
+
BSONObj key = buf.obj();
GEOQUADDEBUG( obj << "\n\t" << root << "\n\t" << key );
keys.insert( key );
}
void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
-
+
BSONElement loc = obj.getFieldDotted( _geo );
if ( loc.eoo() )
return;
-
+
uassert( 13323 , "latlng not an array" , loc.isABSONObj() );
string root;
{
@@ -146,34 +146,34 @@ namespace mongo {
BSONElement y = i.next();
root = makeString( hash(x) , hash(y) );
}
-
-
+
+
assert( _other.size() == 1 );
-
+
BSONElementSet all;
obj.getFieldsDotted( _other[0] , all );
-
- if ( all.size() == 0 ){
+
+ if ( all.size() == 0 ) {
_add( obj , root , BSONElement() , keys );
}
else {
- for ( BSONElementSet::iterator i=all.begin(); i!=all.end(); ++i ){
+ for ( BSONElementSet::iterator i=all.begin(); i!=all.end(); ++i ) {
_add( obj , root , *i , keys );
}
}
-
+
}
-
+
shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
shared_ptr<Cursor> c;
assert(0);
return c;
}
-
- void searchCommand( NamespaceDetails* nsd , int idxNo ,
- const BSONObj& n /*near*/ , double maxDistance , const BSONObj& search ,
- BSONObjBuilder& result , unsigned limit ){
-
+
+ void searchCommand( NamespaceDetails* nsd , int idxNo ,
+ const BSONObj& n /*near*/ , double maxDistance , const BSONObj& search ,
+ BSONObjBuilder& result , unsigned limit ) {
+
Timer t;
log(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance << " search: " << search << endl;
@@ -184,33 +184,33 @@ namespace mongo {
y = hash( i.next() );
}
int scale = (int)ceil( maxDistance / _bucketSize );
-
+
GeoHaystackSearchHopper hopper(n,maxDistance,limit,_geo);
-
+
long long btreeMatches = 0;
- for ( int a=-scale; a<=scale; a++ ){
- for ( int b=-scale; b<=scale; b++ ){
+ for ( int a=-scale; a<=scale; a++ ) {
+ for ( int b=-scale; b<=scale; b++ ) {
BSONObjBuilder bb;
bb.append( "" , makeString( x + a , y + b ) );
- for ( unsigned i=0; i<_other.size(); i++ ){
+ for ( unsigned i=0; i<_other.size(); i++ ) {
BSONElement e = search.getFieldDotted( _other[i] );
if ( e.eoo() )
bb.appendNull( "" );
else
bb.appendAs( e , "" );
}
-
+
BSONObj key = bb.obj();
-
+
GEOQUADDEBUG( "KEY: " << key );
-
+
set<DiskLoc> thisPass;
BtreeCursor cursor( nsd , idxNo , *getDetails() , key , key , true , 1 );
- while ( cursor.ok() ){
+ while ( cursor.ok() ) {
pair<set<DiskLoc>::iterator, bool> p = thisPass.insert( cursor.currLoc() );
- if ( p.second ){
+ if ( p.second ) {
hopper.got( cursor.currLoc() );
GEOQUADDEBUG( "\t" << cursor.current() );
btreeMatches++;
@@ -221,10 +221,10 @@ namespace mongo {
}
- BSONArrayBuilder arr( result.subarrayStart( "results" ) );
+ BSONArrayBuilder arr( result.subarrayStart( "results" ) );
int num = hopper.append( arr );
arr.done();
-
+
{
BSONObjBuilder b( result.subobjStart( "stats" ) );
b.append( "time" , t.millis() );
@@ -237,20 +237,20 @@ namespace mongo {
const IndexDetails* getDetails() const {
return _spec->getDetails();
}
-
+
string _geo;
vector<string> _other;
-
+
BSONObj _order;
double _bucketSize;
};
-
+
class GeoHaystackSearchIndexPlugin : public IndexPlugin {
public:
- GeoHaystackSearchIndexPlugin() : IndexPlugin( GEOSEARCHNAME ){
+ GeoHaystackSearchIndexPlugin() : IndexPlugin( GEOSEARCHNAME ) {
}
-
+
virtual IndexType* generate( const IndexSpec* spec ) const {
return new GeoHaystackSearchIndex( this , spec );
}
@@ -259,38 +259,38 @@ namespace mongo {
class GeoHaystackSearchCommand : public Command {
- public:
- GeoHaystackSearchCommand() : Command( "geoSearch" ){}
- virtual LockType locktype() const { return READ; }
+ public:
+ GeoHaystackSearchCommand() : Command( "geoSearch" ) {}
+ virtual LockType locktype() const { return READ; }
bool slaveOk() const { return true; }
bool slaveOverrideOk() const { return true; }
- bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
-
+ bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
string ns = dbname + "." + cmdObj.firstElement().valuestr();
-
+
NamespaceDetails * d = nsdetails( ns.c_str() );
- if ( ! d ){
+ if ( ! d ) {
errmsg = "can't find ns";
return false;
}
-
+
vector<int> idxs;
d->findIndexByType( GEOSEARCHNAME , idxs );
- if ( idxs.size() == 0 ){
+ if ( idxs.size() == 0 ) {
errmsg = "no geoSearch index";
return false;
}
- if ( idxs.size() > 1 ){
+ if ( idxs.size() > 1 ) {
errmsg = "more than 1 geosearch index";
return false;
}
-
+
int idxNum = idxs[0];
-
+
IndexDetails& id = d->idx( idxNum );
GeoHaystackSearchIndex * si = (GeoHaystackSearchIndex*)id.getSpec().getType();
- assert( &id == si->getDetails() );
-
+ assert( &id == si->getDetails() );
+
BSONElement n = cmdObj["near"];
BSONElement maxDistance = cmdObj["maxDistance"];
BSONElement search = cmdObj["search"];
@@ -298,20 +298,20 @@ namespace mongo {
uassert( 13318 , "near needs to be an array" , n.isABSONObj() );
uassert( 13319 , "maxDistance needs a number" , maxDistance.isNumber() );
uassert( 13320 , "search needs to be an object" , search.type() == Object );
-
+
unsigned limit = 50;
if ( cmdObj["limit"].isNumber() )
limit = (unsigned)cmdObj["limit"].numberInt();
si->searchCommand( d , idxNum , n.Obj() , maxDistance.numberDouble() , search.Obj() , result , limit );
-
+
return 1;
}
-
- } nameSearchCommand;
+
+ } nameSearchCommand;
+
+
-
-
}
diff --git a/db/helpers/dblogger.h b/db/helpers/dblogger.h
index 572169b0239..4d6ee6d78c4 100644
--- a/db/helpers/dblogger.h
+++ b/db/helpers/dblogger.h
@@ -18,14 +18,14 @@
#pragma once
-namespace mongo {
+namespace mongo {
/** helper to log (and read log) of a capped collection in the database */
class DBLogger {
bool _inited;
public:
const string _ns;
- DBLogger(string ns) : _inited(false), _ns(ns){ }
+ DBLogger(string ns) : _inited(false), _ns(ns) { }
};
}
diff --git a/db/index.cpp b/db/index.cpp
index 7326430096d..c696e278098 100644
--- a/db/index.cpp
+++ b/db/index.cpp
@@ -26,7 +26,7 @@
namespace mongo {
- int removeFromSysIndexes(const char *ns, const char *idxName) {
+ int removeFromSysIndexes(const char *ns, const char *idxName) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("ns", ns);
@@ -35,20 +35,20 @@ namespace mongo {
return (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
}
- /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
- call. repair database is the clean solution, but this gives one a lighter weight
+ /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
+ call. repair database is the clean solution, but this gives one a lighter weight
partial option. see dropIndexes()
*/
- void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
string system_indexes = cc().database()->name + ".system.indexes";
BSONObjBuilder b;
b.append("ns", ns);
- if( idIndex ) {
+ if( idIndex ) {
b.append("name", BSON( "$ne" << idIndex->indexName().c_str() ));
}
BSONObj cond = b.done();
int n = (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
- if( n ) {
+ if( n ) {
log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
}
}
@@ -56,7 +56,7 @@ namespace mongo {
int IndexDetails::keyPatternOffset( const string& key ) const {
BSONObjIterator i( keyPattern() );
int n = 0;
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( key == e.fieldName() )
return n;
@@ -77,18 +77,18 @@ namespace mongo {
string ns = indexNamespace(); // e.g. foo.coll.$ts_1
try {
- string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
-
+ string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
+
// clean up parent namespace index cache
NamespaceDetailsTransient::get_w( pns.c_str() ).deletedIndex();
string name = indexName();
/* important to catch exception here so we can finish cleanup below. */
- try {
+ try {
dropNS(ns.c_str());
}
- catch(DBException& ) {
+ catch(DBException& ) {
log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
}
head.setInvalid();
@@ -97,12 +97,13 @@ namespace mongo {
// clean up in system.indexes. we do this last on purpose.
int n = removeFromSysIndexes(pns.c_str(), name.c_str());
wassert( n == 1 );
-
- } catch ( DBException &e ) {
+
+ }
+ catch ( DBException &e ) {
log() << "exception in kill_idx: " << e << ", ns: " << ns << endl;
}
}
-
+
void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
getSpec().getKeys( obj, keys );
}
@@ -123,7 +124,7 @@ namespace mongo {
}
}
- void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) {
+ void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) {
int z = d.nIndexesBeingBuilt();
v.resize(z);
NamespaceDetails::IndexIterator i = d.ii();
@@ -133,7 +134,7 @@ namespace mongo {
IndexChanges& ch = v[i];
idx.getKeysFromObject(oldObj, ch.oldkeys);
idx.getKeysFromObject(newObj, ch.newkeys);
- if( ch.newkeys.size() > 1 )
+ if( ch.newkeys.size() > 1 )
d.setIndexIsMultikey(i);
setDifference(ch.oldkeys, ch.newkeys, ch.removed);
setDifference(ch.newkeys, ch.oldkeys, ch.added);
@@ -151,12 +152,12 @@ namespace mongo {
}
}
- // should be { <something> : <simpletype[1|-1]>, .keyp.. }
- static bool validKeyPattern(BSONObj kp) {
+ // should be { <something> : <simpletype[1|-1]>, .keyp.. }
+ static bool validKeyPattern(BSONObj kp) {
BSONObjIterator i(kp);
- while( i.moreWithEOO() ) {
+ while( i.moreWithEOO() ) {
BSONElement e = i.next();
- if( e.type() == Object || e.type() == Array )
+ if( e.type() == Object || e.type() == Array )
return false;
}
return true;
@@ -180,14 +181,14 @@ namespace mongo {
sourceCollection = 0;
// logical name of the index. todo: get rid of the name, we don't need it!
- const char *name = io.getStringField("name");
+ const char *name = io.getStringField("name");
uassert(12523, "no index name specified", *name);
// the collection for which we are building an index
- sourceNS = io.getStringField("ns");
+ sourceNS = io.getStringField("ns");
uassert(10096, "invalid ns to index", sourceNS.find( '.' ) != string::npos);
- uassert(10097, "bad table to index name on add index attempt",
- cc().database()->name == nsToDatabase(sourceNS.c_str()));
+ uassert(10097, "bad table to index name on add index attempt",
+ cc().database()->name == nsToDatabase(sourceNS.c_str()));
BSONObj key = io.getObjectField("key");
@@ -199,7 +200,7 @@ namespace mongo {
if ( sourceNS.empty() || key.isEmpty() ) {
log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
- sourceNS << "\n idxobj:" << io.toString() << endl;
+ sourceNS << "\n idxobj:" << io.toString() << endl;
string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
uasserted(12504, s);
}
@@ -234,13 +235,13 @@ namespace mongo {
uasserted(12505,s);
}
- /* we can't build a new index for the ns if a build is already in progress in the background -
+ /* we can't build a new index for the ns if a build is already in progress in the background -
EVEN IF this is a foreground build.
*/
- uassert(12588, "cannot add index with a background operation in progress",
- !BackgroundOperation::inProgForNs(sourceNS.c_str()));
+ uassert(12588, "cannot add index with a background operation in progress",
+ !BackgroundOperation::inProgForNs(sourceNS.c_str()));
- /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
+ /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
all be treated as the same pattern.
*/
if ( IndexDetails::isIdIndexPattern(key) ) {
@@ -249,21 +250,21 @@ namespace mongo {
return false;
}
}
- else {
- /* is buildIndexes:false set for this replica set member?
+ else {
+ /* is buildIndexes:false set for this replica set member?
if so we don't build any indexes except _id
*/
- if( theReplSet && !theReplSet->buildIndexes() )
+ if( theReplSet && !theReplSet->buildIndexes() )
return false;
}
-
+
string pluginName = IndexPlugin::findPluginName( key );
IndexPlugin * plugin = pluginName.size() ? IndexPlugin::get( pluginName ) : 0;
-
- if ( plugin ){
+
+ if ( plugin ) {
fixedIndexObject = plugin->adjustIndexSpec( io );
}
- else if ( io["v"].eoo() ) {
+ else if ( io["v"].eoo() ) {
// add "v" if it doesn't exist
// if it does - leave whatever value was there
// this is for testing and replication
@@ -277,12 +278,12 @@ namespace mongo {
}
- void IndexSpec::reset( const IndexDetails * details ){
+ void IndexSpec::reset( const IndexDetails * details ) {
_details = details;
reset( details->info );
}
- void IndexSpec::reset( const DiskLoc& loc ){
+ void IndexSpec::reset( const DiskLoc& loc ) {
info = loc.obj();
keyPattern = info["key"].embeddedObjectUserCheck();
if ( keyPattern.objsize() == 0 ) {
diff --git a/db/index.h b/db/index.h
index f7c900947de..8578ed381c9 100644
--- a/db/index.h
+++ b/db/index.h
@@ -25,12 +25,12 @@
namespace mongo {
- /* Details about a particular index. There is one of these effectively for each object in
- system.namespaces (although this also includes the head pointer, which is not in that
- collection).
+ /* Details about a particular index. There is one of these effectively for each object in
+ system.namespaces (although this also includes the head pointer, which is not in that
+ collection).
** MemoryMapped Record ** (i.e., this is on disk data)
- */
+ */
class IndexDetails {
public:
/**
@@ -45,7 +45,7 @@ namespace mongo {
/* Location of index info object. Format:
{ name:"nameofindex", ns:"parentnsname", key: {keypattobject}
- [, unique: <bool>, background: <bool>]
+ [, unique: <bool>, background: <bool>]
}
This object is in the system.indexes collection. Note that since we
@@ -83,7 +83,7 @@ namespace mongo {
*/
int keyPatternOffset( const string& key ) const;
bool inKeyPattern( const string& key ) const { return keyPatternOffset( key ) >= 0; }
-
+
/* true if the specified key is in the index */
bool hasKey(const BSONObj& key);
bool wouldCreateDup(const BSONObj& key, DiskLoc self);
@@ -110,11 +110,11 @@ namespace mongo {
BSONObjIterator i(pattern);
BSONElement e = i.next();
if( strcmp(e.fieldName(), "_id") != 0 ) return false;
- return i.next().eoo();
+ return i.next().eoo();
}
-
+
/* returns true if this is the _id index. */
- bool isIdIndex() const {
+ bool isIdIndex() const {
return isIdIndexPattern( keyPattern() );
}
@@ -126,11 +126,11 @@ namespace mongo {
return io.getStringField("ns");
}
- bool unique() const {
+ bool unique() const {
BSONObj io = info.obj();
- return io["unique"].trueValue() ||
- /* temp: can we juse make unique:true always be there for _id and get rid of this? */
- isIdIndex();
+ return io["unique"].trueValue() ||
+ /* temp: can we juse make unique:true always be there for _id and get rid of this? */
+ isIdIndex();
}
/* if set, when building index, if any duplicates, drop the duplicating object */
@@ -142,7 +142,7 @@ namespace mongo {
(system.indexes or system.namespaces) -- only NamespaceIndex.
*/
void kill_idx();
-
+
const IndexSpec& getSpec() const;
string toString() const {
@@ -150,13 +150,13 @@ namespace mongo {
}
};
- struct IndexChanges/*on an update*/ {
+ struct IndexChanges { /*on an update*/
BSONObjSetDefaultOrder oldkeys;
BSONObjSetDefaultOrder newkeys;
vector<BSONObj*> removed; // these keys were removed as part of the change
vector<BSONObj*> added; // these keys were added as part of the change
- /** @curObjLoc - the object we want to add's location. if it is already in the
+ /** @curObjLoc - the object we want to add's location. if it is already in the
index, that is allowed here (for bg indexing case).
*/
void dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
diff --git a/db/indexkey.cpp b/db/indexkey.cpp
index a35f78b5645..34f30fa45ab 100644
--- a/db/indexkey.cpp
+++ b/db/indexkey.cpp
@@ -28,69 +28,69 @@ namespace mongo {
map<string,IndexPlugin*> * IndexPlugin::_plugins;
IndexType::IndexType( const IndexPlugin * plugin , const IndexSpec * spec )
- : _plugin( plugin ) , _spec( spec ){
-
+ : _plugin( plugin ) , _spec( spec ) {
+
}
- IndexType::~IndexType(){
+ IndexType::~IndexType() {
}
-
- const BSONObj& IndexType::keyPattern() const {
- return _spec->keyPattern;
+
+ const BSONObj& IndexType::keyPattern() const {
+ return _spec->keyPattern;
}
IndexPlugin::IndexPlugin( const string& name )
- : _name( name ){
+ : _name( name ) {
if ( ! _plugins )
_plugins = new map<string,IndexPlugin*>();
(*_plugins)[name] = this;
}
- string IndexPlugin::findPluginName( const BSONObj& keyPattern ){
+ string IndexPlugin::findPluginName( const BSONObj& keyPattern ) {
string pluginName = "";
-
+
BSONObjIterator i( keyPattern );
-
+
while( i.more() ) {
BSONElement e = i.next();
if ( e.type() != String )
continue;
-
+
uassert( 13007 , "can only have 1 index plugin / bad index key pattern" , pluginName.size() == 0 || pluginName == e.String() );
pluginName = e.String();
}
-
+
return pluginName;
}
-
+
int IndexType::compare( const BSONObj& l , const BSONObj& r ) const {
return l.woCompare( r , _spec->keyPattern );
}
- void IndexSpec::_init(){
+ void IndexSpec::_init() {
assert( keyPattern.objsize() );
// some basics
_nFields = keyPattern.nFields();
_sparse = info["sparse"].trueValue();
uassert( 13529 , "sparse only works for single field keys" , ! _sparse || _nFields );
-
-
+
+
{
- // build _nullKey
-
+ // build _nullKey
+
BSONObjBuilder b;
BSONObjIterator i( keyPattern );
-
+
while( i.more() ) {
BSONElement e = i.next();
_fieldNames.push_back( e.fieldName() );
_fixed.push_back( BSONElement() );
- b.appendNull( "" );
+ b.appendNull( "" );
}
_nullKey = b.obj();
}
-
+
{
// _nullElt
BSONObjBuilder b;
@@ -99,26 +99,26 @@ namespace mongo {
_nullElt = _nullObj.firstElement();
}
- {
+ {
// handle plugins
- string pluginName = IndexPlugin::findPluginName( keyPattern );
- if ( pluginName.size() ){
+ string pluginName = IndexPlugin::findPluginName( keyPattern );
+ if ( pluginName.size() ) {
IndexPlugin * plugin = IndexPlugin::get( pluginName );
- if ( ! plugin ){
+ if ( ! plugin ) {
log() << "warning: can't find plugin [" << pluginName << "]" << endl;
}
else {
- _indexType.reset( plugin->generate( this ) );
+ _indexType.reset( plugin->generate( this ) );
}
}
}
-
+
_finishedInit = true;
}
-
+
void IndexSpec::getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const {
- if ( _indexType.get() ){
+ if ( _indexType.get() ) {
_indexType->getKeys( obj , keys );
return;
}
@@ -139,12 +139,12 @@ namespace mongo {
continue;
BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
-
- if ( e.eoo() ){
+
+ if ( e.eoo() ) {
e = _nullElt; // no matching field
numNotFound++;
}
-
+
if ( e.type() != Array )
fieldNames[ i ] = ""; // no matching field or non-array match
@@ -157,27 +157,27 @@ namespace mongo {
}
// enforce single array path here
- if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ){
+ if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
stringstream ss;
ss << "cannot index parallel arrays [" << e.fieldName() << "] [" << arrElt.fieldName() << "]";
uasserted( 10088 , ss.str() );
}
}
-
+
bool allFound = true; // have we found elements for all field names in the key spec?
- for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ){
- if ( **i != '\0' ){
+ for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ) {
+ if ( **i != '\0' ) {
allFound = false;
break;
}
}
- if ( _sparse && numNotFound == _nFields ){
+ if ( _sparse && numNotFound == _nFields ) {
// we didn't find any fields
// so we're not going to index this document
return;
}
-
+
bool insertArrayNull = false;
if ( allFound ) {
@@ -187,11 +187,11 @@ namespace mongo {
for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
b.appendAs( *i, "" );
keys.insert( b.obj() );
- }
+ }
else {
// terminal array element to expand, so generate all keys
BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ){
+ if ( i.more() ) {
while( i.more() ) {
BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
@@ -203,18 +203,19 @@ namespace mongo {
keys.insert( b.obj() );
}
}
- else if ( fixed.size() > 1 ){
+ else if ( fixed.size() > 1 ) {
insertArrayNull = true;
}
}
- } else {
+ }
+ else {
// nonterminal array element to expand, so recurse
assert( !arrElt.eoo() );
BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ){
+ if ( i.more() ) {
while( i.more() ) {
BSONElement e = i.next();
- if ( e.type() == Object ){
+ if ( e.type() == Object ) {
_getKeys( fieldNames, fixed, e.embeddedObject(), keys );
}
}
@@ -223,12 +224,12 @@ namespace mongo {
insertArrayNull = true;
}
}
-
+
if ( insertArrayNull ) {
// x : [] - need to insert undefined
BSONObjBuilder b(_sizeTracker);
for( unsigned j = 0; j < fixed.size(); ++j ) {
- if ( j == arrIdx ){
+ if ( j == arrIdx ) {
b.appendUndefined( "" );
}
else {
@@ -243,12 +244,12 @@ namespace mongo {
}
}
- bool anyElementNamesMatch( const BSONObj& a , const BSONObj& b ){
+ bool anyElementNamesMatch( const BSONObj& a , const BSONObj& b ) {
BSONObjIterator x(a);
- while ( x.more() ){
+ while ( x.more() ) {
BSONElement e = x.next();
BSONObjIterator y(b);
- while ( y.more() ){
+ while ( y.more() ) {
BSONElement f = y.next();
FieldCompareResult res = compareDottedFieldNames( e.fieldName() , f.fieldName() );
if ( res == SAME || res == LEFT_SUBFIELD || res == RIGHT_SUBFIELD )
@@ -257,13 +258,13 @@ namespace mongo {
}
return false;
}
-
+
IndexSuitability IndexSpec::suitability( const BSONObj& query , const BSONObj& order ) const {
if ( _indexType.get() )
return _indexType->suitability( query , order );
return _suitability( query , order );
}
-
+
IndexSuitability IndexSpec::_suitability( const BSONObj& query , const BSONObj& order ) const {
// TODO: optimize
if ( anyElementNamesMatch( keyPattern , query ) == 0 && anyElementNamesMatch( keyPattern , order ) == 0 )
diff --git a/db/indexkey.h b/db/indexkey.h
index 2fe223d6c7e..be73171310b 100644
--- a/db/indexkey.h
+++ b/db/indexkey.h
@@ -46,16 +46,16 @@ namespace mongo {
virtual void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const = 0;
virtual shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const = 0;
-
+
/** optional op : changes query to match what's in the index */
virtual BSONObj fixKey( const BSONObj& in ) { return in; }
/** optional op : compare 2 objects with regards to this index */
- virtual int compare( const BSONObj& l , const BSONObj& r ) const;
+ virtual int compare( const BSONObj& l , const BSONObj& r ) const;
/** @return plugin */
const IndexPlugin * getPlugin() const { return _plugin; }
-
+
const BSONObj& keyPattern() const;
virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const ;
@@ -66,7 +66,7 @@ namespace mongo {
const IndexPlugin * _plugin;
const IndexSpec * _spec;
};
-
+
/**
* this represents a plugin
* a plugin could be something like full text search, sparse index, etc...
@@ -76,21 +76,21 @@ namespace mongo {
class IndexPlugin : boost::noncopyable {
public:
IndexPlugin( const string& name );
- virtual ~IndexPlugin(){}
-
+ virtual ~IndexPlugin() {}
+
virtual IndexType* generate( const IndexSpec * spec ) const = 0;
-
+
string getName() const { return _name; }
/**
* @return new keyPattern
* if nothing changes, should return keyPattern
*/
- virtual BSONObj adjustIndexSpec( const BSONObj& spec ) const { return spec; }
+ virtual BSONObj adjustIndexSpec( const BSONObj& spec ) const { return spec; }
// ------- static below -------
- static IndexPlugin* get( const string& name ){
+ static IndexPlugin* get( const string& name ) {
if ( ! _plugins )
return 0;
map<string,IndexPlugin*>::iterator i = _plugins->find( name );
@@ -109,7 +109,7 @@ namespace mongo {
string _name;
static map<string,IndexPlugin*> * _plugins;
};
-
+
/* precomputed details about an index, used for inserting keys on updates
stored/cached in NamespaceDetailsTransient, or can be used standalone
*/
@@ -117,31 +117,31 @@ namespace mongo {
public:
BSONObj keyPattern; // e.g., { name : 1 }
BSONObj info; // this is the same as IndexDetails::info.obj()
-
+
IndexSpec()
- : _details(0) , _finishedInit(false){
+ : _details(0) , _finishedInit(false) {
}
IndexSpec( const BSONObj& k , const BSONObj& m = BSONObj() )
- : keyPattern(k) , info(m) , _details(0) , _finishedInit(false){
+ : keyPattern(k) , info(m) , _details(0) , _finishedInit(false) {
_init();
}
-
+
/**
this is a DiscLoc of an IndexDetails info
- should have a key field
+ should have a key field
*/
- IndexSpec( const DiskLoc& loc ){
+ IndexSpec( const DiskLoc& loc ) {
reset( loc );
}
-
+
void reset( const DiskLoc& loc );
void reset( const IndexDetails * details );
-
+
void getKeys( const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
BSONElement missingField() const { return _nullElt; }
-
+
string getTypeName() const {
if ( _indexType.get() )
return _indexType->getPlugin()->getName();
@@ -163,24 +163,24 @@ namespace mongo {
IndexSuitability _suitability( const BSONObj& query , const BSONObj& order ) const ;
void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSetDefaultOrder &keys ) const;
-
+
BSONSizeTracker _sizeTracker;
vector<const char*> _fieldNames;
vector<BSONElement> _fixed;
BSONObj _nullKey; // a full key with all fields null
-
+
BSONObj _nullObj; // only used for _nullElt
BSONElement _nullElt; // jstNull
-
+
int _nFields; // number of fields in the index
bool _sparse; // if the index is sparse
shared_ptr<IndexType> _indexType;
const IndexDetails * _details;
-
+
void _init();
public:
diff --git a/db/instance.cpp b/db/instance.cpp
index 165e752425b..ad3062ce63e 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -62,7 +62,7 @@ namespace mongo {
bool useCursors = true;
bool useHints = true;
-
+
void flushDiagLog() {
if( _diaglog.f && _diaglog.f->is_open() ) {
log() << "flushing diag log" << endl;
@@ -71,18 +71,18 @@ namespace mongo {
}
KillCurrentOp killCurrentOp;
-
+
int lockFile = 0;
// see FSyncCommand:
- unsigned lockedForWriting;
+ unsigned lockedForWriting;
mongo::mutex lockedForWritingMutex("lockedForWriting");
bool unlockRequested = false;
void inProgCmd( Message &m, DbResponse &dbresponse ) {
BSONObjBuilder b;
- if( ! cc().isAdmin() ){
+ if( ! cc().isAdmin() ) {
BSONObjBuilder b;
b.append("err", "unauthorized");
}
@@ -94,7 +94,7 @@ namespace mongo {
{
Client& me = cc();
scoped_lock bl(Client::clientsMutex);
- for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
+ for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
Client *c = *i;
assert( c );
CurOp* co = c->curop();
@@ -113,26 +113,26 @@ namespace mongo {
b.append("info", "use db.$cmd.sys.unlock.findOne() to terminate the fsync write/snapshot lock");
}
}
-
+
replyToQuery(0, m, dbresponse, b.obj());
}
-
+
void killOp( Message &m, DbResponse &dbresponse ) {
BSONObj obj;
- if( ! cc().isAdmin() ){
+ if( ! cc().isAdmin() ) {
obj = fromjson("{\"err\":\"unauthorized\"}");
}
- /*else if( !dbMutexInfo.isLocked() )
+ /*else if( !dbMutexInfo.isLocked() )
obj = fromjson("{\"info\":\"no op in progress/not locked\"}");
*/
else {
DbMessage d(m);
QueryMessage q(d);
BSONElement e = q.query.getField("op");
- if( !e.isNumber() ) {
+ if( !e.isNumber() ) {
obj = fromjson("{\"err\":\"no op number field specified?\"}");
}
- else {
+ else {
log() << "going to kill op: " << e << endl;
obj = fromjson("{\"info\":\"attempting to kill op\"}");
killCurrentOp.kill( (unsigned) e.number() );
@@ -143,23 +143,23 @@ namespace mongo {
void unlockFsync(const char *ns, Message& m, DbResponse &dbresponse) {
BSONObj obj;
- if( ! cc().isAdmin() || strncmp(ns, "admin.", 6) != 0 ) {
+ if( ! cc().isAdmin() || strncmp(ns, "admin.", 6) != 0 ) {
obj = fromjson("{\"err\":\"unauthorized\"}");
}
else {
- if( lockedForWriting ) {
- log() << "command: unlock requested" << endl;
+ if( lockedForWriting ) {
+ log() << "command: unlock requested" << endl;
obj = fromjson("{ok:1,\"info\":\"unlock requested\"}");
unlockRequested = true;
}
- else {
+ else {
obj = fromjson("{ok:0,\"errmsg\":\"not locked\"}");
}
}
replyToQuery(0, m, dbresponse, obj);
}
- static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ){
+ static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ) {
bool ok = true;
MSGID responseTo = m.header()->id;
@@ -168,7 +168,7 @@ namespace mongo {
auto_ptr< Message > resp( new Message() );
CurOp& op = *(c.curop());
-
+
try {
dbresponse.exhaust = runQuery(m, q, op, *resp);
assert( !resp->empty() );
@@ -176,9 +176,9 @@ namespace mongo {
catch ( AssertionException& e ) {
ok = false;
op.debug().str << " exception ";
- LOGSOME {
+ LOGSOME {
log() << "assertion " << e.toString() << " ns:" << q.ns << " query:" <<
- (q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;
+ (q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;
if( q.ntoskip || q.ntoreturn )
log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << endl;
}
@@ -207,13 +207,13 @@ namespace mongo {
resp->setData( msgdata, true );
}
- if ( op.shouldDBProfile( 0 ) ){
+ if ( op.shouldDBProfile( 0 ) ) {
op.debug().str << " bytes:" << resp->header()->dataLen();
}
-
+
dbresponse.response = resp.release();
dbresponse.responseTo = responseTo;
-
+
return ok;
}
@@ -228,16 +228,16 @@ namespace mongo {
if( strstr(ns, ".$cmd") ) {
isCommand = true;
opwrite(m);
- if( strstr(ns, ".$cmd.sys.") ) {
+ if( strstr(ns, ".$cmd.sys.") ) {
if( strstr(ns, "$cmd.sys.inprog") ) {
inProgCmd(m, dbresponse);
return true;
}
- if( strstr(ns, "$cmd.sys.killop") ) {
+ if( strstr(ns, "$cmd.sys.killop") ) {
killOp(m, dbresponse);
return true;
}
- if( strstr(ns, "$cmd.sys.unlock") ) {
+ if( strstr(ns, "$cmd.sys.unlock") ) {
unlockFsync(ns, m, dbresponse);
return true;
}
@@ -253,27 +253,27 @@ namespace mongo {
else {
opwrite(m);
}
-
+
globalOpCounters.gotOp( op , isCommand );
-
+
Client& c = cc();
-
+
auto_ptr<CurOp> nestedOp;
CurOp* currentOpP = c.curop();
- if ( currentOpP->active() ){
+ if ( currentOpP->active() ) {
nestedOp.reset( new CurOp( &c , currentOpP ) );
currentOpP = nestedOp.get();
}
CurOp& currentOp = *currentOpP;
currentOp.reset(client,op);
-
+
OpDebug& debug = currentOp.debug();
StringBuilder& ss = debug.str;
ss << opToString( op ) << " ";
int logThreshold = cmdLine.slowMS;
bool log = logLevel >= 1;
-
+
if ( op == dbQuery ) {
if ( handlePossibleShardedMessage( m , &dbresponse ) )
return true;
@@ -289,7 +289,7 @@ namespace mongo {
int len = strlen(p);
if ( len > 400 )
out() << curTimeMillis() % 10000 <<
- " long msg received, len:" << len << endl;
+ " long msg received, len:" << len << endl;
Message *resp = new Message();
if ( strcmp( "end" , p ) == 0 )
@@ -304,7 +304,7 @@ namespace mongo {
const char *ns = m.singleData()->_data + 4;
char cl[256];
nsToDatabase(ns, cl);
- if( ! c.getAuthenticationInfo()->isAuthorized(cl) ) {
+ if( ! c.getAuthenticationInfo()->isAuthorized(cl) ) {
uassert_nothrow("unauthorized");
}
else {
@@ -345,24 +345,25 @@ namespace mongo {
currentOp.done();
int ms = currentOp.totalTimeMillis();
- //DEV log = true;
+ //DEV log = true;
if ( log || ms > logThreshold ) {
if( logLevel < 3 && op == dbGetMore && strstr(ns, ".oplog.") && ms < 3000 && !log ) {
/* it's normal for getMore on the oplog to be slow because of use of awaitdata flag. */
- } else {
+ }
+ else {
ss << ' ' << ms << "ms";
mongo::tlog() << ss.str() << endl;
}
}
-
- if ( currentOp.shouldDBProfile( ms ) ){
+
+ if ( currentOp.shouldDBProfile( ms ) ) {
// performance profiling is on
- if ( dbMutex.getState() < 0 ){
+ if ( dbMutex.getState() < 0 ) {
mongo::log(1) << "note: not profiling because recursive read lock" << endl;
}
else {
writelock lk;
- if ( dbHolder.isLoaded( nsToDatabase( currentOp.getNS() ) , dbpath ) ){
+ if ( dbHolder.isLoaded( nsToDatabase( currentOp.getNS() ) , dbpath ) ) {
Client::Context c( currentOp.getNS() );
profile(ss.str().c_str(), ms);
}
@@ -380,17 +381,17 @@ namespace mongo {
x++; // reserved
int n = *x++;
- assert( m.dataSize() == 8 + ( 8 * n ) );
+ assert( m.dataSize() == 8 + ( 8 * n ) );
uassert( 13004 , "sent 0 cursors to kill" , n >= 1 );
if ( n > 2000 ) {
log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
assert( n < 30000 );
}
-
+
int found = ClientCursor::erase(n, (long long *) x);
- if ( logLevel > 0 || found != n ){
+ if ( logLevel > 0 || found != n ) {
log( found == n ) << "killcursors: found " << found << " of " << n << endl;
}
@@ -401,16 +402,16 @@ namespace mongo {
*/
/*static*/ void Database::closeDatabase( const char *db, const string& path ) {
assertInWriteLock();
-
+
Client::Context * ctx = cc().getContext();
assert( ctx );
assert( ctx->inDB( db , path ) );
Database *database = ctx->db();
assert( database->name == db );
-
+
oplogCheckCloseDatabase( database ); // oplog caches some things, dirty its caches
- if( BackgroundOperation::inProgForDb(db) ) {
+ if( BackgroundOperation::inProgForDb(db) ) {
log() << "warning: bg op in prog during close db? " << db << endl;
}
@@ -446,13 +447,13 @@ namespace mongo {
bool broadcast = flags & UpdateOption_Broadcast;
{
string s = query.toString();
- /* todo: we shouldn't do all this ss stuff when we don't need it, it will slow us down.
- instead, let's just story the query BSON in the debug object, and it can toString()
+ /* todo: we shouldn't do all this ss stuff when we don't need it, it will slow us down.
+ instead, let's just story the query BSON in the debug object, and it can toString()
lazily
*/
op.debug().str << " query: " << s;
op.setQuery(query);
- }
+ }
writelock lk;
@@ -481,37 +482,37 @@ namespace mongo {
string s = pattern.toString();
op.debug().str << " query: " << s;
op.setQuery(pattern);
- }
+ }
writelock lk(ns);
// if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
if ( ! broadcast & handlePossibleShardedMessage( m , 0 ) )
return;
-
+
Client::Context ctx(ns);
-
+
long long n = deleteObjects(ns, pattern, justOne, true);
lastError.getSafe()->recordDelete( n );
}
-
+
QueryResult* emptyMoreResult(long long);
bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop ) {
StringBuilder& ss = curop.debug().str;
bool ok = true;
-
+
DbMessage d(m);
const char *ns = d.getns();
int ntoreturn = d.pullInt();
long long cursorid = d.pullInt64();
-
+
ss << ns << " cid:" << cursorid;
- if( ntoreturn )
+ if( ntoreturn )
ss << " ntoreturn:" << ntoreturn;
- time_t start = 0;
- int pass = 0;
+ time_t start = 0;
+ int pass = 0;
bool exhaust = false;
QueryResult* msgdata;
while( 1 ) {
@@ -520,24 +521,24 @@ namespace mongo {
Client::Context ctx(ns);
msgdata = processGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust);
}
- catch ( GetMoreWaitException& ) {
+ catch ( GetMoreWaitException& ) {
exhaust = false;
massert(13073, "shutting down", !inShutdown() );
- if( pass == 0 ) {
- start = time(0);
- }
- else {
- if( time(0) - start >= 4 ) {
- // after about 4 seconds, return. this is a sanity check. pass stops at 1000 normally
- // for DEV this helps and also if sleep is highly inaccurate on a platform. we want to
- // return occasionally so slave can checkpoint.
- pass = 10000;
- }
- }
+ if( pass == 0 ) {
+ start = time(0);
+ }
+ else {
+ if( time(0) - start >= 4 ) {
+ // after about 4 seconds, return. this is a sanity check. pass stops at 1000 normally
+ // for DEV this helps and also if sleep is highly inaccurate on a platform. we want to
+ // return occasionally so slave can checkpoint.
+ pass = 10000;
+ }
+ }
pass++;
- DEV
- sleepmillis(20);
- else
+ DEV
+ sleepmillis(20);
+ else
sleepmillis(2);
continue;
}
@@ -556,8 +557,8 @@ namespace mongo {
ss << " nreturned:" << msgdata->nReturned;
dbresponse.response = resp;
dbresponse.responseTo = m.header()->id;
- if( exhaust ) {
- ss << " exhaust ";
+ if( exhaust ) {
+ ss << " exhaust ";
dbresponse.exhaust = ns;
}
return ok;
@@ -565,8 +566,8 @@ namespace mongo {
void receivedInsert(Message& m, CurOp& op) {
DbMessage d(m);
- const char *ns = d.getns();
- assert(*ns);
+ const char *ns = d.getns();
+ assert(*ns);
uassert( 10058 , "not master", isMasterNs( ns ) );
op.debug().str << ns;
@@ -575,14 +576,15 @@ namespace mongo {
if ( handlePossibleShardedMessage( m , 0 ) )
return;
- Client::Context ctx(ns);
+ Client::Context ctx(ns);
while ( d.moreJSObjs() ) {
BSONObj js = d.nextJsObj();
uassert( 10059 , "object to insert too large", js.objsize() <= BSONObjMaxUserSize);
- { // check no $ modifiers
+ {
+ // check no $ modifiers
BSONObjIterator i( js );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
uassert( 13511 , "object to insert can't have $ modifiers" , e.fieldName()[0] != '$' );
}
@@ -604,7 +606,8 @@ namespace mongo {
p /= ( dbName + ".ns" );
if ( MMF::exists( p ) )
names.push_back( dbName );
- } else {
+ }
+ else {
string fileName = boost::filesystem::path(*i).leaf();
if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
@@ -612,14 +615,14 @@ namespace mongo {
}
}
- /* returns true if there is data on this server. useful when starting replication.
+ /* returns true if there is data on this server. useful when starting replication.
local database does NOT count except for rsoplog collection.
*/
- bool replHasDatabases() {
+ bool replHasDatabases() {
vector<string> names;
getDatabaseNames(names);
if( names.size() >= 2 ) return true;
- if( names.size() == 1 ){
+ if( names.size() == 1 ) {
if( names[0] != "local" )
return true;
// we have a local database. return true if oplog isn't empty
@@ -652,8 +655,8 @@ namespace mongo {
}
auto_ptr<DBClientCursor> DBDirectClient::query(const string &ns, Query query, int nToReturn , int nToSkip ,
- const BSONObj *fieldsToReturn , int queryOptions ){
-
+ const BSONObj *fieldsToReturn , int queryOptions ) {
+
//if ( ! query.obj.isEmpty() || nToReturn != 0 || nToSkip != 0 || fieldsToReturn || queryOptions )
return DBClientBase::query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions );
//
@@ -661,41 +664,41 @@ namespace mongo {
//throw UserException( (string)"yay:" + ns );
}
- void DBDirectClient::killCursor( long long id ){
+ void DBDirectClient::killCursor( long long id ) {
ClientCursor::erase( id );
}
- DBClientBase * createDirectClient(){
+ DBClientBase * createDirectClient() {
return new DBDirectClient();
}
mongo::mutex exitMutex("exit");
int numExitCalls = 0;
- bool inShutdown(){
+ bool inShutdown() {
return numExitCalls > 0;
}
- void tryToOutputFatal( const string& s ){
+ void tryToOutputFatal( const string& s ) {
try {
rawOut( s );
return;
}
- catch ( ... ){}
+ catch ( ... ) {}
try {
cerr << s << endl;
return;
}
- catch ( ... ){}
-
+ catch ( ... ) {}
+
// uh - oh, not sure there is anything else we can do...
}
/** also called by ntservice.cpp */
void shutdownServer() {
- log() << "shutdown: going to close listening sockets..." << endl;
+ log() << "shutdown: going to close listening sockets..." << endl;
ListeningSockets::get()->closeAll();
log() << "shutdown: going to flush oplog..." << endl;
@@ -710,7 +713,7 @@ namespace mongo {
// synchronous signal, which we don't expect
log() << "shutdown: waiting for fs preallocator..." << endl;
theFileAllocator().waitUntilFinished();
-
+
log() << "shutdown: closing all files..." << endl;
if( cmdLine.dur ) {
/* is this useful? needed? helpful? perhaps even without _DURABLE. ifdef'd for now just to avoid behavior change short term */
@@ -730,12 +733,12 @@ namespace mongo {
}
#if !defined(_WIN32) && !defined(__sunos__)
- if ( lockFile ){
+ if ( lockFile ) {
log() << "shutdown: removing fs lock..." << endl;
/* This ought to be an unlink(), but Eliot says the last
time that was attempted, there was a race condition
with acquirePathLock(). */
- if( ftruncate( lockFile , 0 ) )
+ if( ftruncate( lockFile , 0 ) )
log() << "couldn't remove fs lock " << errnoWithDescription() << endl;
flock( lockFile, LOCK_UN );
}
@@ -743,19 +746,19 @@ namespace mongo {
}
/* not using log() herein in case we are already locked */
- void dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
-
+ void dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
+
auto_ptr<writelocktry> wlt;
- if ( tryToGetLock ){
+ if ( tryToGetLock ) {
wlt.reset( new writelocktry( "" , 2 * 60 * 1000 ) );
uassert( 13455 , "dbexit timed out getting lock" , wlt->got() );
}
-
+
Client * c = currentClient.get();
{
scoped_lock lk( exitMutex );
if ( numExitCalls++ > 0 ) {
- if ( numExitCalls > 5 ){
+ if ( numExitCalls > 5 ) {
// this means something horrible has happened
::_exit( rc );
}
@@ -763,33 +766,33 @@ namespace mongo {
ss << "dbexit: " << why << "; exiting immediately";
tryToOutputFatal( ss.str() );
if ( c ) c->shutdown();
- ::exit( rc );
+ ::exit( rc );
}
}
-
+
{
stringstream ss;
ss << "dbexit: " << why;
tryToOutputFatal( ss.str() );
}
-
+
try {
shutdownServer(); // gracefully shutdown instance
}
- catch ( ... ){
+ catch ( ... ) {
tryToOutputFatal( "shutdown failed with exception" );
}
- try {
+ try {
mutexDebugger.programEnding();
}
catch (...) { }
-
+
tryToOutputFatal( "dbexit: really exiting now" );
if ( c ) c->shutdown();
::exit(rc);
}
-
+
#if !defined(_WIN32) && !defined(__sunos__)
void writePid(int fd) {
stringstream ss;
@@ -800,7 +803,7 @@ namespace mongo {
}
void acquirePathLock() {
- string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
+ string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
bool oldFile = false;
@@ -809,8 +812,8 @@ namespace mongo {
}
lockFile = open( name.c_str(), O_RDWR | O_CREAT , S_IRWXU | S_IRWXG | S_IRWXO );
- if( lockFile <= 0 ) {
- uasserted( 10309 , str::stream() << "Unable to create / open lock file for lockfilepath: " << name << ' ' << errnoWithDescription());
+ if( lockFile <= 0 ) {
+ uasserted( 10309 , str::stream() << "Unable to create / open lock file for lockfilepath: " << name << ' ' << errnoWithDescription());
}
if (flock( lockFile, LOCK_EX | LOCK_NB ) != 0) {
close ( lockFile );
@@ -818,28 +821,28 @@ namespace mongo {
uassert( 10310 , "Unable to acquire lock for lockfilepath: " + name, 0 );
}
- if ( oldFile ){
+ if ( oldFile ) {
// we check this here because we want to see if we can get the lock
// if we can't, then its probably just another mongod running
string errmsg;
if (cmdLine.dur) {
- if (!dur::haveJournalFiles()){
+ if (!dur::haveJournalFiles()) {
errmsg = str::stream()
- << "************** \n"
- << "old lock file: " << name << ". probably means unclean shutdown\n"
- << "but there are no journal files to recover.\n"
- << "see: http://dochub.mongodb.org/core/repair for more information\n"
- << "*************";
+ << "************** \n"
+ << "old lock file: " << name << ". probably means unclean shutdown\n"
+ << "but there are no journal files to recover.\n"
+ << "see: http://dochub.mongodb.org/core/repair for more information\n"
+ << "*************";
}
}
else {
errmsg = str::stream()
- << "************** \n"
- << "old lock file: " << name << ". probably means unclean shutdown\n"
- << "recommend removing file and running --repair\n"
- << "see: http://dochub.mongodb.org/core/repair for more information\n"
- << "*************";
+ << "************** \n"
+ << "old lock file: " << name << ". probably means unclean shutdown\n"
+ << "recommend removing file and running --repair\n"
+ << "see: http://dochub.mongodb.org/core/repair for more information\n"
+ << "*************";
}
if (!errmsg.empty()) {
@@ -878,6 +881,6 @@ namespace mongo {
uasserted(13618, "can't start without --dur enabled when journal/ files are present");
}
}
-#endif
-
+#endif
+
} // namespace mongo
diff --git a/db/instance.h b/db/instance.h
index 96fc9007496..570536ae09b 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -40,7 +40,7 @@ namespace mongo {
DiagLog() : f(0) , level(0), mutex("DiagLog") { }
void init() {
- if ( ! f && level ){
+ if ( ! f && level ) {
log() << "diagLogging = " << level << endl;
stringstream ss;
ss << dbpath << "/diaglog." << hex << time(0);
@@ -55,20 +55,20 @@ namespace mongo {
/**
* @return old
*/
- int setLevel( int newLevel ){
+ int setLevel( int newLevel ) {
int old = level;
level = newLevel;
init();
return old;
}
void flush() {
- if ( level ){
+ if ( level ) {
scoped_lock lk(mutex);
f->flush();
}
}
void write(char *data,int len) {
- if ( level & 1 ){
+ if ( level & 1 ) {
scoped_lock lk(mutex);
f->write(data,len);
}
@@ -77,7 +77,7 @@ namespace mongo {
if ( level & 2 ) {
bool log = (level & 4) == 0;
OCCASIONALLY log = true;
- if ( log ){
+ if ( log ) {
scoped_lock lk(mutex);
assert( f );
f->write(data,len);
@@ -102,29 +102,29 @@ namespace mongo {
}
~DbResponse() { delete response; }
};
-
+
bool assembleResponse( Message &m, DbResponse &dbresponse, const SockAddr &client = unknownAddress );
void getDatabaseNames( vector< string > &names , const string& usePath = dbpath );
- /* returns true if there is no data on this server. useful when starting replication.
- local database does NOT count.
+ /* returns true if there is no data on this server. useful when starting replication.
+ local database does NOT count.
*/
bool replHasDatabases();
/** "embedded" calls to the local server directly. */
- class DBDirectClient : public DBClientBase {
+ class DBDirectClient : public DBClientBase {
public:
virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
-
+
virtual bool isFailed() const {
return false;
}
virtual string toString() {
return "DBDirectClient";
}
- virtual string getServerAddress() const{
+ virtual string getServerAddress() const {
return "localhost"; // TODO: should this have the port?
}
virtual bool call( Message &toSend, Message &response, bool assertOk=true );
@@ -133,18 +133,18 @@ namespace mongo {
// don't need to piggy back when connected locally
return say( toSend );
}
-
+
virtual void killCursor( long long cursorID );
-
- virtual bool callRead( Message& toSend , Message& response ){
+
+ virtual bool callRead( Message& toSend , Message& response ) {
return call( toSend , response );
}
- virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
};
extern int lockFile;
void acquirePathLock();
void maybeCreatePidFile();
-
+
} // namespace mongo
diff --git a/db/introspect.cpp b/db/introspect.cpp
index d72bb3fcea7..cee0da869f4 100644
--- a/db/introspect.cpp
+++ b/db/introspect.cpp
@@ -26,8 +26,7 @@
namespace mongo {
- void profile( const char *str, int millis)
- {
+ void profile( const char *str, int millis) {
BSONObjBuilder b;
b.appendDate("ts", jsTime());
b.append("info", str);
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index e79a367887d..5b1da6344e4 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -86,7 +86,8 @@ namespace mongo {
//TODO: these should be utf16 code-units not bytes
char c = *i;
ret << "\\u00" << toHexLower(&c, 1);
- } else {
+ }
+ else {
ret << *i;
}
}
@@ -116,7 +117,8 @@ namespace mongo {
number() <= numeric_limits< double >::max() ) {
s.precision( 16 );
s << number();
- } else {
+ }
+ else {
StringBuilder ss;
ss << "Number " << number() << " cannot be represented in JSON";
string message = ss.str();
@@ -175,13 +177,15 @@ namespace mongo {
case jstOID:
if ( format == TenGen ) {
s << "ObjectId( ";
- } else {
+ }
+ else {
s << "{ \"$oid\" : ";
}
s << '"' << __oid() << '"';
if ( format == TenGen ) {
s << " )";
- } else {
+ }
+ else {
s << " }";
}
break;
@@ -208,7 +212,8 @@ namespace mongo {
if( d == 0 ) s << '0';
else
s << '"' << date().toString() << '"';
- } else
+ }
+ else
s << date();
if ( format == Strict )
s << " }";
@@ -216,13 +221,14 @@ namespace mongo {
s << " )";
break;
case RegEx:
- if ( format == Strict ){
+ if ( format == Strict ) {
s << "{ \"$regex\" : \"" << escape( regex() );
s << "\", \"$options\" : \"" << regexFlags() << "\" }";
- } else {
+ }
+ else {
s << "/" << escape( regex() , true ) << "/";
// FIXME Worry about alpha order?
- for ( const char *f = regexFlags(); *f; ++f ){
+ for ( const char *f = regexFlags(); *f; ++f ) {
switch ( *f ) {
case 'g':
case 'i':
@@ -237,7 +243,7 @@ namespace mongo {
case CodeWScope: {
BSONObj scope = codeWScopeObject();
- if ( ! scope.isEmpty() ){
+ if ( ! scope.isEmpty() ) {
s << "{ \"$code\" : " << _asCode() << " , "
<< " \"$scope\" : " << scope.jsonString() << " }";
break;
@@ -248,7 +254,7 @@ namespace mongo {
case Code:
s << _asCode();
break;
-
+
case Timestamp:
s << "{ \"t\" : " << timestampTime() << " , \"i\" : " << timestampInc() << " }";
break;
@@ -264,7 +270,7 @@ namespace mongo {
default:
StringBuilder ss;
ss << "Cannot create a properly formatted JSON string with "
- << "element: " << toString() << " of type: " << type();
+ << "element: " << toString() << " of type: " << type();
string message = ss.str();
massert( 10312 , message.c_str(), false );
}
@@ -284,13 +290,13 @@ namespace mongo {
else if ( fn[3] == 'e' && fn[4] == 0 ) return BSONObj::LTE;
}
}
- else if ( fn[1] == 'n' && fn[2] == 'e' ){
+ else if ( fn[1] == 'n' && fn[2] == 'e' ) {
if ( fn[3] == 0 )
return BSONObj::NE;
if ( fn[3] == 'a' && fn[4] == 'r') // matches anything with $near prefix
return BSONObj::opNEAR;
}
- else if ( fn[1] == 'm' ){
+ else if ( fn[1] == 'm' ) {
if ( fn[2] == 'o' && fn[3] == 'd' && fn[4] == 0 )
return BSONObj::opMOD;
if ( fn[2] == 'a' && fn[3] == 'x' && fn[4] == 'D' && fn[5] == 'i' && fn[6] == 's' && fn[7] == 't' && fn[8] == 'a' && fn[9] == 'n' && fn[10] == 'c' && fn[11] == 'e' && fn[12] == 0 )
@@ -306,7 +312,7 @@ namespace mongo {
return BSONObj::opALL;
else if ( fn[1] == 's' && fn[2] == 'i' && fn[3] == 'z' && fn[4] == 'e' && fn[5] == 0 )
return BSONObj::opSIZE;
- else if ( fn[1] == 'e' ){
+ else if ( fn[1] == 'e' ) {
if ( fn[2] == 'x' && fn[3] == 'i' && fn[4] == 's' && fn[5] == 't' && fn[6] == 's' && fn[7] == 0 )
return BSONObj::opEXISTS;
if ( fn[2] == 'l' && fn[3] == 'e' && fn[4] == 'm' && fn[5] == 'M' && fn[6] == 'a' && fn[7] == 't' && fn[8] == 'c' && fn[9] == 'h' && fn[10] == 0 )
@@ -375,22 +381,24 @@ namespace mongo {
double left = l.number();
double right = r.number();
bool lNan = !( left <= numeric_limits< double >::max() &&
- left >= -numeric_limits< double >::max() );
+ left >= -numeric_limits< double >::max() );
bool rNan = !( right <= numeric_limits< double >::max() &&
- right >= -numeric_limits< double >::max() );
+ right >= -numeric_limits< double >::max() );
if ( lNan ) {
if ( rNan ) {
return 0;
- } else {
+ }
+ else {
return -1;
}
- } else if ( rNan ) {
+ }
+ else if ( rNan ) {
return 1;
}
x = left - right;
if ( x < 0 ) return -1;
return x == 0 ? 0 : 1;
- }
+ }
case jstOID:
return memcmp(l.value(), r.value(), 12);
case Code:
@@ -413,8 +421,7 @@ namespace mongo {
if ( lsz - rsz != 0 ) return lsz - rsz;
return memcmp(l.value()+4, r.value()+4, lsz+1);
}
- case RegEx:
- {
+ case RegEx: {
int c = strcmp(l.regex(), r.regex());
if ( c )
return c;
@@ -467,14 +474,14 @@ namespace mongo {
return fe.getGtLtOp();
}
- FieldCompareResult compareDottedFieldNames( const string& l , const string& r ){
+ FieldCompareResult compareDottedFieldNames( const string& l , const string& r ) {
static int maxLoops = 1024 * 1024;
-
+
size_t lstart = 0;
size_t rstart = 0;
- for ( int i=0; i<maxLoops; i++ ){
- if ( lstart >= l.size() ){
+ for ( int i=0; i<maxLoops; i++ ) {
+ if ( lstart >= l.size() ) {
if ( rstart >= r.size() )
return SAME;
return RIGHT_SUBFIELD;
@@ -547,31 +554,34 @@ namespace mongo {
}
bool BSONObj::valid() const {
- try{
+ try {
BSONObjIterator it(*this);
- while( it.moreWithEOO() ){
+ while( it.moreWithEOO() ) {
// both throw exception on failure
BSONElement e = it.next(true);
e.validate();
- if (e.eoo()){
+ if (e.eoo()) {
if (it.moreWithEOO())
return false;
return true;
- }else if (e.isABSONObj()){
+ }
+ else if (e.isABSONObj()) {
if(!e.embeddedObject().valid())
return false;
- }else if (e.type() == CodeWScope){
+ }
+ else if (e.type() == CodeWScope) {
if(!e.codeWScopeObject().valid())
return false;
}
}
- } catch (...) {
+ }
+ catch (...) {
}
return false;
}
- int BSONObj::woCompare(const BSONObj& r, const Ordering &o, bool considerFieldName) const {
+ int BSONObj::woCompare(const BSONObj& r, const Ordering &o, bool considerFieldName) const {
if ( isEmpty() )
return r.isEmpty() ? 0 : -1;
if ( r.isEmpty() )
@@ -630,13 +640,13 @@ namespace mongo {
return 1;
int x;
-/*
- if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
- l.type() == String && r.type() == String ) {
- // note: no negative support yet, as this is just sort of a POC
- x = _stricmp(l.valuestr(), r.valuestr());
- }
- else*/ {
+ /*
+ if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
+ l.type() == String && r.type() == String ) {
+ // note: no negative support yet, as this is just sort of a POC
+ x = _stricmp(l.valuestr(), r.valuestr());
+ }
+ else*/ {
x = l.woCompare( r, considerFieldName );
if ( ordered && o.number() < 0 )
x = -x;
@@ -650,7 +660,7 @@ namespace mongo {
BSONObj staticNull = fromjson( "{'':null}" );
/* well ordered compare */
- int BSONObj::woSortOrder(const BSONObj& other, const BSONObj& sortKey , bool useDotted ) const{
+ int BSONObj::woSortOrder(const BSONObj& other, const BSONObj& sortKey , bool useDotted ) const {
if ( isEmpty() )
return other.isEmpty() ? 0 : -1;
if ( other.isEmpty() )
@@ -659,7 +669,7 @@ namespace mongo {
uassert( 10060 , "woSortOrder needs a non-empty sortKey" , ! sortKey.isEmpty() );
BSONObjIterator i(sortKey);
- while ( 1 ){
+ while ( 1 ) {
BSONElement f = i.next();
if ( f.eoo() )
return 0;
@@ -689,11 +699,12 @@ namespace mongo {
const char* next = p+1;
BSONElement e = getField( left.c_str() );
- if (e.type() == Object){
+ if (e.type() == Object) {
e.embeddedObject().getFieldsDotted(next, ret);
- } else if (e.type() == Array) {
+ }
+ else if (e.type() == Array) {
bool allDigits = false;
- if ( isdigit( *next ) ){
+ if ( isdigit( *next ) ) {
const char * temp = next + 1;
while ( isdigit( *temp ) )
temp++;
@@ -701,24 +712,28 @@ namespace mongo {
}
if (allDigits) {
e.embeddedObject().getFieldsDotted(next, ret);
- } else {
+ }
+ else {
BSONObjIterator i(e.embeddedObject());
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e2 = i.next();
if (e2.type() == Object || e2.type() == Array)
e2.embeddedObject().getFieldsDotted(next, ret);
}
}
- } else {
+ }
+ else {
// do nothing: no match
}
}
- } else {
- if (e.type() == Array){
+ }
+ else {
+ if (e.type() == Array) {
BSONObjIterator i(e.embeddedObject());
while ( i.more() )
ret.insert(i.next());
- } else {
+ }
+ else {
ret.insert(e);
}
}
@@ -730,7 +745,8 @@ namespace mongo {
if ( p ) {
left = string(name, p-name);
name = p + 1;
- } else {
+ }
+ else {
left = string(name);
name = name + strlen(name);
}
@@ -789,7 +805,7 @@ namespace mongo {
break;
BSONElement x = filter.getField( e.fieldName() );
if ( ( x.eoo() && !inFilter ) ||
- ( !x.eoo() && inFilter ) )
+ ( !x.eoo() && inFilter ) )
b.append( e );
}
return b.obj();
@@ -869,7 +885,8 @@ namespace mongo {
gotId = gotId || strcmp(fname, "_id")==0;
if ( n == N && gotId )
break;
- } else if ( strcmp(fname, "_id")==0 ) {
+ }
+ else if ( strcmp(fname, "_id")==0 ) {
b.append(e);
gotId = true;
if ( n == N && gotId )
@@ -893,20 +910,20 @@ namespace mongo {
if ( e.eoo() )
break;
switch( e.type() ) {
- case MinKey: {
- BSONObjBuilder m;
- m.append( "$minElement", 1 );
- b.append( e.fieldName(), m.done() );
- break;
- }
- case MaxKey: {
- BSONObjBuilder m;
- m.append( "$maxElement", 1 );
- b.append( e.fieldName(), m.done() );
- break;
- }
- default:
- b.append( e );
+ case MinKey: {
+ BSONObjBuilder m;
+ m.append( "$minElement", 1 );
+ b.append( e.fieldName(), m.done() );
+ break;
+ }
+ case MaxKey: {
+ BSONObjBuilder m;
+ m.append( "$maxElement", 1 );
+ b.append( e.fieldName(), m.done() );
+ break;
+ }
+ default:
+ b.append( e );
}
}
return b.obj();
@@ -924,7 +941,8 @@ namespace mongo {
if ( !f.eoo() ) {
b.appendAs( e, f.fieldName() );
f = j.next();
- } else {
+ }
+ else {
b.append( e );
}
}
@@ -933,20 +951,20 @@ namespace mongo {
bool BSONObj::okForStorage() const {
BSONObjIterator i( *this );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
const char * name = e.fieldName();
-
+
if ( strchr( name , '.' ) ||
- strchr( name , '$' ) ){
- return
+ strchr( name , '$' ) ) {
+ return
strcmp( name , "$ref" ) == 0 ||
strcmp( name , "$id" ) == 0
;
}
-
- if ( e.mayEncapsulate() ){
- switch ( e.type() ){
+
+ if ( e.mayEncapsulate() ) {
+ switch ( e.type() ) {
case Object:
case Array:
if ( ! e.embeddedObject().okForStorage() )
@@ -959,7 +977,7 @@ namespace mongo {
default:
uassert( 12579, "unhandled cases in BSONObj okForStorage" , 0 );
}
-
+
}
}
return true;
@@ -993,25 +1011,26 @@ namespace mongo {
return ss.str();
}
- void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base){
+ void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base) {
BSONObjIterator it(obj);
- while (it.more()){
+ while (it.more()) {
BSONElement e = it.next();
- if (e.type() == Object){
+ if (e.type() == Object) {
string newbase = base + e.fieldName() + ".";
nested2dotted(b, e.embeddedObject(), newbase);
- }else{
+ }
+ else {
string newbase = base + e.fieldName();
b.appendAs(e, newbase);
}
}
}
- void dotted2nested(BSONObjBuilder& b, const BSONObj& obj){
+ void dotted2nested(BSONObjBuilder& b, const BSONObj& obj) {
//use map to sort fields
BSONMap sorted = bson2map(obj);
EmbeddedBuilder eb(&b);
- for(BSONMap::const_iterator it=sorted.begin(); it!=sorted.end(); ++it){
+ for(BSONMap::const_iterator it=sorted.begin(); it!=sorted.end(); ++it) {
eb.appendAs(it->second, it->first);
}
eb.done();
@@ -1048,16 +1067,16 @@ namespace mongo {
} minkeydata;
BSONObj minKey((const char *) &minkeydata);
-/*
- struct JSObj0 {
- JSObj0() {
- totsize = 5;
- eoo = EOO;
- }
- int totsize;
- char eoo;
- } js0;
-*/
+ /*
+ struct JSObj0 {
+ JSObj0() {
+ totsize = 5;
+ eoo = EOO;
+ }
+ int totsize;
+ char eoo;
+ } js0;
+ */
#pragma pack()
struct BsonUnitTest : public UnitTest {
@@ -1089,7 +1108,7 @@ namespace mongo {
assert( b == id );
}
- void testbounds(){
+ void testbounds() {
BSONObj l , r;
{
BSONObjBuilder b;
@@ -1112,7 +1131,7 @@ namespace mongo {
assert( r.woCompare( l ) > 0 );
}
- void testorder(){
+ void testorder() {
{
BSONObj x,y,z;
{ BSONObjBuilder b; b.append( "x" , (long long)2 ); x = b.obj(); }
@@ -1201,21 +1220,20 @@ namespace mongo {
timestamp = OpTime::now().asDate();
}
- void BSONObjBuilder::appendMinForType( const StringData& fieldName , int t ){
- switch ( t ){
+ void BSONObjBuilder::appendMinForType( const StringData& fieldName , int t ) {
+ switch ( t ) {
case MinKey: appendMinKey( fieldName ); return;
case MaxKey: appendMinKey( fieldName ); return;
case NumberInt:
case NumberDouble:
case NumberLong:
append( fieldName , - numeric_limits<double>::max() ); return;
- case jstOID:
- {
- OID o;
- memset(&o, 0, sizeof(o));
- appendOID( fieldName , &o);
- return;
- }
+ case jstOID: {
+ OID o;
+ memset(&o, 0, sizeof(o));
+ appendOID( fieldName , &o);
+ return;
+ }
case Bool: appendBool( fieldName , false); return;
case Date: appendDate( fieldName , 0); return;
case jstNULL: appendNull( fieldName ); return;
@@ -1229,13 +1247,12 @@ namespace mongo {
case Undefined:
appendUndefined( fieldName ); return;
case RegEx: appendRegex( fieldName , "" ); return;
- case DBRef:
- {
- OID o;
- memset(&o, 0, sizeof(o));
- appendDBRef( fieldName , "" , o );
- return;
- }
+ case DBRef: {
+ OID o;
+ memset(&o, 0, sizeof(o));
+ appendDBRef( fieldName , "" , o );
+ return;
+ }
case Code: appendCode( fieldName , "" ); return;
case CodeWScope: appendCodeWScope( fieldName , "" , BSONObj() ); return;
case Timestamp: appendTimestamp( fieldName , 0); return;
@@ -1245,8 +1262,8 @@ namespace mongo {
uassert( 10061 , "type not supported for appendMinElementForType" , false );
}
- void BSONObjBuilder::appendMaxForType( const StringData& fieldName , int t ){
- switch ( t ){
+ void BSONObjBuilder::appendMaxForType( const StringData& fieldName , int t ) {
+ switch ( t ) {
case MinKey: appendMaxKey( fieldName ); break;
case MaxKey: appendMaxKey( fieldName ); break;
case NumberInt:
@@ -1257,13 +1274,12 @@ namespace mongo {
case BinData:
appendMinForType( fieldName , jstOID );
break;
- case jstOID:
- {
- OID o;
- memset(&o, 0xFF, sizeof(o));
- appendOID( fieldName , &o);
- break;
- }
+ case jstOID: {
+ OID o;
+ memset(&o, 0xFF, sizeof(o));
+ appendOID( fieldName , &o);
+ break;
+ }
case Undefined:
case jstNULL:
appendMinForType( fieldName , NumberInt );
@@ -1282,7 +1298,7 @@ namespace mongo {
}
const string BSONObjBuilder::numStrs[] = {
- "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
"20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
@@ -1294,77 +1310,77 @@ namespace mongo {
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
};
- bool BSONObjBuilder::appendAsNumber( const StringData& fieldName , const string& data ){
+ bool BSONObjBuilder::appendAsNumber( const StringData& fieldName , const string& data ) {
if ( data.size() == 0 || data == "-")
return false;
-
+
unsigned int pos=0;
if ( data[0] == '-' )
pos++;
-
+
bool hasDec = false;
-
- for ( ; pos<data.size(); pos++ ){
+
+ for ( ; pos<data.size(); pos++ ) {
if ( isdigit(data[pos]) )
continue;
- if ( data[pos] == '.' ){
+ if ( data[pos] == '.' ) {
if ( hasDec )
return false;
hasDec = true;
continue;
}
-
+
return false;
}
-
- if ( hasDec ){
+
+ if ( hasDec ) {
double d = atof( data.c_str() );
append( fieldName , d );
return true;
}
-
- if ( data.size() < 8 ){
+
+ if ( data.size() < 8 ) {
append( fieldName , atoi( data.c_str() ) );
return true;
}
-
+
try {
long long num = boost::lexical_cast<long long>( data );
append( fieldName , num );
return true;
}
- catch(bad_lexical_cast &){
+ catch(bad_lexical_cast &) {
return false;
}
}
- void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ){
+ void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ) {
BSONObjIterator i(keyPattern);
BSONObjIterator j(values);
-
- while ( i.more() && j.more() ){
+
+ while ( i.more() && j.more() ) {
appendAs( j.next() , i.next().fieldName() );
}
-
+
assert( ! i.more() );
assert( ! j.more() );
}
- int BSONElementFieldSorter( const void * a , const void * b ){
+ int BSONElementFieldSorter( const void * a , const void * b ) {
const char * x = *((const char**)a);
const char * y = *((const char**)b);
x++; y++;
return lexNumCmp( x , y );
}
-
- BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ){
+
+ BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ) {
_nfields = o.nFields();
_fields = new const char*[_nfields];
int x = 0;
BSONObjIterator i( o );
- while ( i.more() ){
+ while ( i.more() ) {
_fields[x++] = i.next().rawdata();
assert( _fields[x-1] );
}
@@ -1374,10 +1390,10 @@ namespace mongo {
}
/** transform a BSON array into a vector of BSONElements.
- we match array # positions with their vector position, and ignore
- any fields with non-numeric field names.
+ we match array # positions with their vector position, and ignore
+ any fields with non-numeric field names.
*/
- vector<BSONElement> BSONElement::Array() const {
+ vector<BSONElement> BSONElement::Array() const {
chk(mongo::Array);
vector<BSONElement> v;
BSONObjIterator i(Obj());
diff --git a/db/jsobj.h b/db/jsobj.h
index fb866c4424a..a6472d5f36c 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -1,4 +1,4 @@
-/** @file jsobj.h
+/** @file jsobj.h
BSON classes
*/
diff --git a/db/jsobjmanipulator.h b/db/jsobjmanipulator.h
index 2b628cff4e5..0b3c0c240d3 100644
--- a/db/jsobjmanipulator.h
+++ b/db/jsobjmanipulator.h
@@ -36,7 +36,7 @@ namespace mongo {
OpTime::now().asDate()
*/
void initTimestamp();
-
+
/** Change the value, in place, of the number. */
void setNumber(double d) {
if ( _element.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
@@ -44,30 +44,30 @@ namespace mongo {
else assert(0);
}
void SetNumber(double d) {
- if ( _element.type() == NumberDouble )
+ if ( _element.type() == NumberDouble )
*getDur().writing( reinterpret_cast< double * >( value() ) ) = d;
- else if ( _element.type() == NumberInt )
+ else if ( _element.type() == NumberInt )
*getDur().writing( reinterpret_cast< int * >( value() ) ) = (int) d;
else assert(0);
}
- void setLong(long long n) {
+ void setLong(long long n) {
assert( _element.type() == NumberLong );
*reinterpret_cast< long long * >( value() ) = n;
}
- void SetLong(long long n) {
+ void SetLong(long long n) {
assert( _element.type() == NumberLong );
*getDur().writing( reinterpret_cast< long long * >(value()) ) = n;
}
- void setInt(int n) {
+ void setInt(int n) {
assert( _element.type() == NumberInt );
*reinterpret_cast< int * >( value() ) = n;
}
- void SetInt(int n) {
+ void SetInt(int n) {
assert( _element.type() == NumberInt );
getDur().writingInt( *reinterpret_cast< int * >( value() ) ) = n;
}
-
+
/** Replace the type and value of the element with the type and value of e,
preserving the original fieldName */
void replaceTypeAndValue( const BSONElement &e ) {
@@ -86,18 +86,18 @@ namespace mongo {
*p = e.type();
memcpy( p + ofs, e.value(), valsize );
}
-
- static void lookForTimestamps( const BSONObj& obj ){
+
+ static void lookForTimestamps( const BSONObj& obj ) {
// If have a Timestamp field as the first or second element,
// update it to a Date field set to OpTime::now().asDate(). The
// replacement policy is a work in progress.
-
+
BSONObjIterator i( obj );
for( int j = 0; i.moreWithEOO() && j < 2; ++j ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
- if ( e.type() == Timestamp ){
+ if ( e.type() == Timestamp ) {
BSONElementManipulator( e ).initTimestamp();
break;
}
diff --git a/db/json.cpp b/db/json.cpp
index de1b69a2805..4a6fad86595 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -43,12 +43,12 @@ using namespace boost::spirit;
namespace mongo {
struct ObjectBuilder : boost::noncopyable {
- ~ObjectBuilder(){
+ ~ObjectBuilder() {
unsigned i = builders.size();
- if ( i ){
+ if ( i ) {
i--;
- for ( ; i>=1; i-- ){
- if ( builders[i] ){
+ for ( ; i>=1; i-- ) {
+ if ( builders[i] ) {
builders[i]->done();
}
}
@@ -205,7 +205,8 @@ namespace mongo {
else if ( first < 0x08 ) {
b.ss << char( 0xc0 | ( ( first << 2 ) | ( second >> 6 ) ) );
b.ss << char( 0x80 | ( ~0xc0 & second ) );
- } else {
+ }
+ else {
b.ss << char( 0xe0 | ( first >> 4 ) );
b.ss << char( 0x80 | ( ~0xc0 & ( ( first << 2 ) | ( second >> 6 ) ) ) );
b.ss << char( 0x80 | ( ~0xc0 & second ) );
@@ -437,7 +438,7 @@ namespace mongo {
// in the original z example on line 3, if the input was "ab", foo() would only
// be called once.
struct JsonGrammar : public grammar< JsonGrammar > {
-public:
+ public:
JsonGrammar( ObjectBuilder &_b ) : b( _b ) {}
template < typename ScannerT >
@@ -471,32 +472,32 @@ public:
str = lexeme_d[ ch_p( '"' )[ chClear( self.b ) ] >>
*( ( ch_p( '\\' ) >>
(
- ch_p( 'b' )[ chE( self.b ) ] |
- ch_p( 'f' )[ chE( self.b ) ] |
- ch_p( 'n' )[ chE( self.b ) ] |
- ch_p( 'r' )[ chE( self.b ) ] |
- ch_p( 't' )[ chE( self.b ) ] |
- ch_p( 'v' )[ chE( self.b ) ] |
- ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
- ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ch_p( 'v' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
+ ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
)
) |
( ~range_p( 0x00, 0x1f ) & ~ch_p( '"' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '"' ];
singleQuoteStr = lexeme_d[ ch_p( '\'' )[ chClear( self.b ) ] >>
- *( ( ch_p( '\\' ) >>
- (
- ch_p( 'b' )[ chE( self.b ) ] |
- ch_p( 'f' )[ chE( self.b ) ] |
- ch_p( 'n' )[ chE( self.b ) ] |
- ch_p( 'r' )[ chE( self.b ) ] |
- ch_p( 't' )[ chE( self.b ) ] |
- ch_p( 'v' )[ chE( self.b ) ] |
- ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
- ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
- )
- ) |
- ( ~range_p( 0x00, 0x1f ) & ~ch_p( '\'' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '\'' ];
+ *( ( ch_p( '\\' ) >>
+ (
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ch_p( 'v' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
+ ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
+ )
+ ) |
+ ( ~range_p( 0x00, 0x1f ) & ~ch_p( '\'' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '\'' ];
// real_p accepts numbers with nonsignificant zero prefixes, which
// aren't allowed in JSON. Oh well.
@@ -547,8 +548,8 @@ public:
>> ( *( ch_p( 'i' ) | ch_p( 'g' ) | ch_p( 'm' ) ) )[ regexOptions( self.b ) ] ];
}
rule< ScannerT > object, members, array, elements, value, str, number, integer,
- dbref, dbrefS, dbrefT, oid, oidS, oidT, bindata, date, dateS, dateT,
- regex, regexS, regexT, quotedOid, fieldName, unquotedFieldName, singleQuoteStr;
+ dbref, dbrefS, dbrefT, oid, oidS, oidT, bindata, date, dateS, dateT,
+ regex, regexS, regexT, quotedOid, fieldName, unquotedFieldName, singleQuoteStr;
const rule< ScannerT > &start() const {
return object;
}
@@ -557,7 +558,7 @@ public:
};
BSONObj fromjson( const char *str , int* len) {
- if ( str[0] == '\0' ){
+ if ( str[0] == '\0' ) {
if (len) *len = 0;
return BSONObj();
}
@@ -567,7 +568,8 @@ public:
parse_info<> result = parse( str, parser, space_p );
if (len) {
*len = result.stop - str;
- } else if ( !result.full ) {
+ }
+ else if ( !result.full ) {
int limit = strnlen(result.stop , 10);
if (limit == -1) limit = 10;
msgasserted(10340, "Failure parsing JSON string near: " + string( result.stop, limit ));
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 06fd23055c0..ba52111c883 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -34,14 +34,16 @@ namespace mongo {
void raiseError(int code , const char *msg) {
LastError *le = lastError.get();
if ( le == 0 ) {
- /* might be intentional (non-user thread) */
+ /* might be intentional (non-user thread) */
DEV {
static unsigned n;
if( ++n < 4 && !isShell ) log() << "dev: lastError==0 won't report:" << msg << endl;
}
- } else if ( le->disabled ) {
+ }
+ else if ( le->disabled ) {
log() << "lastError disabled, can't report: " << code << ":" << msg << endl;
- } else {
+ }
+ else {
le->raiseError(code, msg);
}
}
@@ -56,8 +58,8 @@ namespace mongo {
if ( msg.empty() ) {
if ( blankErr ) {
- b.appendNull( "err" );
- }
+ b.appendNull( "err" );
+ }
}
else {
b.append( "err", msg );
@@ -72,12 +74,12 @@ namespace mongo {
if ( writebackId.isSet() )
b.append( "writeback" , writebackId );
b.appendNumber( "n", nObjects );
-
+
return ! msg.empty();
}
- LastErrorHolder::~LastErrorHolder(){
- for ( IDMap::iterator i = _ids.begin(); i != _ids.end(); ++i ){
+ LastErrorHolder::~LastErrorHolder() {
+ for ( IDMap::iterator i = _ids.begin(); i != _ids.end(); ++i ) {
delete i->second.lerr;
i->second.lerr = 0;
}
@@ -85,11 +87,11 @@ namespace mongo {
}
- void LastErrorHolder::setID( int id ){
+ void LastErrorHolder::setID( int id ) {
_id.set( id );
}
-
- int LastErrorHolder::getID(){
+
+ int LastErrorHolder::getID() {
return _id.get();
}
@@ -107,24 +109,24 @@ namespace mongo {
return ret;
return 0;
}
-
- LastError * LastErrorHolder::_get( bool create ){
+
+ LastError * LastErrorHolder::_get( bool create ) {
int id = _id.get();
- if ( id == 0 ){
+ if ( id == 0 ) {
LastError * le = _tl.get();
- if ( ! le && create ){
+ if ( ! le && create ) {
le = new LastError();
_tl.reset( le );
}
return le;
}
- scoped_lock lock(_idsmutex);
+ scoped_lock lock(_idsmutex);
map<int,Status>::iterator i = _ids.find( id );
- if ( i == _ids.end() ){
+ if ( i == _ids.end() ) {
if ( ! create )
return 0;
-
+
LastError * le = new LastError();
Status s;
s.time = time(0);
@@ -132,42 +134,42 @@ namespace mongo {
_ids[id] = s;
return le;
}
-
+
Status &status = i->second;
status.time = time(0);
return status.lerr;
}
- void LastErrorHolder::remove( int id ){
+ void LastErrorHolder::remove( int id ) {
scoped_lock lock(_idsmutex);
map<int,Status>::iterator i = _ids.find( id );
if ( i == _ids.end() )
return;
-
+
delete i->second.lerr;
_ids.erase( i );
}
- void LastErrorHolder::release(){
+ void LastErrorHolder::release() {
int id = _id.get();
- if ( id == 0 ){
+ if ( id == 0 ) {
_tl.release();
return;
}
-
+
remove( id );
}
/** ok to call more than once. */
- void LastErrorHolder::initThread() {
+ void LastErrorHolder::initThread() {
if( _tl.get() ) return;
assert( _id.get() == 0 );
_tl.reset( new LastError() );
}
-
- void LastErrorHolder::reset( LastError * le ){
+
+ void LastErrorHolder::reset( LastError * le ) {
int id = _id.get();
- if ( id == 0 ){
+ if ( id == 0 ) {
_tl.reset( le );
return;
}
@@ -177,17 +179,18 @@ namespace mongo {
status.time = time(0);
status.lerr = le;
}
-
+
void prepareErrForNewRequest( Message &m, LastError * err ) {
// a killCursors message shouldn't affect last error
if ( m.operation() == dbKillCursors ) {
err->disabled = true;
- } else {
+ }
+ else {
err->disabled = false;
err->nPrev++;
- }
+ }
}
-
+
LastError * LastErrorHolder::startRequest( Message& m , int clientId ) {
assert( clientId );
setID( clientId );
@@ -201,33 +204,33 @@ namespace mongo {
prepareErrForNewRequest( m, connectionOwned );
}
- void LastErrorHolder::disconnect( int clientId ){
+ void LastErrorHolder::disconnect( int clientId ) {
if ( clientId )
remove(clientId);
}
struct LastErrorHolderTest : public UnitTest {
public:
-
- void test( int i ){
+
+ void test( int i ) {
_tl.set( i );
assert( _tl.get() == i );
}
-
- void tlmaptest(){
+
+ void tlmaptest() {
test( 1 );
test( 12123123 );
test( -123123 );
test( numeric_limits<int>::min() );
test( numeric_limits<int>::max() );
}
-
- void run(){
+
+ void run() {
tlmaptest();
LastError * a = new LastError();
LastError * b = new LastError();
-
+
LastErrorHolder holder;
holder.reset( a );
assert( a == holder.get() );
@@ -237,10 +240,10 @@ namespace mongo {
assert( b == holder.get() );
holder.setID( 0 );
assert( a == holder.get() );
-
+
holder.remove( 1 );
}
-
+
ThreadLocalValue<int> _tl;
} lastErrorHolderTest;
diff --git a/db/lasterror.h b/db/lasterror.h
index 39152b336a6..c77ec740f03 100644
--- a/db/lasterror.h
+++ b/db/lasterror.h
@@ -33,7 +33,7 @@ namespace mongo {
int nPrev;
bool valid;
bool disabled;
- void writeback( OID& oid ){
+ void writeback( OID& oid ) {
reset( true );
writebackId = oid;
}
@@ -42,13 +42,13 @@ namespace mongo {
code = _code;
msg = _msg;
}
- void recordUpdate( bool _updateObjects , long long _nObjects , OID _upsertedId ){
+ void recordUpdate( bool _updateObjects , long long _nObjects , OID _upsertedId ) {
reset( true );
nObjects = _nObjects;
updatedExisting = _updateObjects ? True : False;
if ( _upsertedId.isSet() )
upsertedId = _upsertedId;
-
+
}
void recordDelete( long long nDeleted ) {
reset( true );
@@ -75,17 +75,18 @@ namespace mongo {
bool appendSelf( BSONObjBuilder &b , bool blankErr = true );
struct Disabled : boost::noncopyable {
- Disabled( LastError * le ){
+ Disabled( LastError * le ) {
_le = le;
- if ( _le ){
+ if ( _le ) {
_prev = _le->disabled;
_le->disabled = true;
- } else {
+ }
+ else {
_prev = false;
}
}
-
- ~Disabled(){
+
+ ~Disabled() {
if ( _le )
_le->disabled = _prev;
}
@@ -93,7 +94,7 @@ namespace mongo {
LastError * _le;
bool _prev;
};
-
+
static LastError noError;
};
@@ -103,9 +104,9 @@ namespace mongo {
~LastErrorHolder();
LastError * get( bool create = false );
- LastError * getSafe(){
+ LastError * getSafe() {
LastError * le = get(false);
- if ( ! le ){
+ if ( ! le ) {
log( LL_ERROR ) << " no LastError! id: " << getID() << endl;
assert( le );
}
@@ -127,11 +128,11 @@ namespace mongo {
void remove( int id );
void release();
-
+
/** when db receives a message/request, call this */
void startRequest( Message& m , LastError * connectionOwned );
LastError * startRequest( Message& m , int clientId );
-
+
void disconnect( int clientId );
// used to disable lastError reporting while processing a killCursors message
@@ -140,7 +141,7 @@ namespace mongo {
private:
ThreadLocalValue<int> _id;
boost::thread_specific_ptr<LastError> _tl;
-
+
struct Status {
time_t time;
LastError *lerr;
@@ -148,7 +149,7 @@ namespace mongo {
typedef map<int,Status> IDMap;
static mongo::mutex _idsmutex;
- IDMap _ids;
+ IDMap _ids;
} lastError;
void raiseError(int code , const char *msg);
diff --git a/db/matcher.cpp b/db/matcher.cpp
index 544ec27f1cf..494063f4bed 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -30,7 +30,7 @@
#include "pdfile.h"
namespace {
- inline pcrecpp::RE_Options flags2options(const char* flags){
+ inline pcrecpp::RE_Options flags2options(const char* flags) {
pcrecpp::RE_Options options;
options.set_utf8(true);
while ( flags && *flags ) {
@@ -52,7 +52,7 @@ namespace {
namespace mongo {
extern BSONObj staticNull;
-
+
class Where {
public:
Where() {
@@ -64,22 +64,22 @@ namespace mongo {
if ( scope.get() )
scope->execSetup( "_mongo.readOnly = false;" , "make not read only" );
- if ( jsScope ){
+ if ( jsScope ) {
delete jsScope;
jsScope = 0;
}
func = 0;
}
-
+
auto_ptr<Scope> scope;
ScriptingFunction func;
BSONObj *jsScope;
-
+
void setFunc(const char *code) {
massert( 10341 , "scope has to be created first!" , scope.get() );
func = scope->createFunction( code );
}
-
+
};
Matcher::~Matcher() {
@@ -87,23 +87,23 @@ namespace mongo {
where = 0;
}
- ElementMatcher::ElementMatcher( BSONElement _e , int _op, bool _isNot )
- : toMatch( _e ) , compareOp( _op ), isNot( _isNot ), subMatcherOnPrimitives(false){
- if ( _op == BSONObj::opMOD ){
+ ElementMatcher::ElementMatcher( BSONElement _e , int _op, bool _isNot )
+ : toMatch( _e ) , compareOp( _op ), isNot( _isNot ), subMatcherOnPrimitives(false) {
+ if ( _op == BSONObj::opMOD ) {
BSONObj o = _e.embeddedObject();
mod = o["0"].numberInt();
modm = o["1"].numberInt();
-
+
uassert( 10073 , "mod can't be 0" , mod );
}
- else if ( _op == BSONObj::opTYPE ){
+ else if ( _op == BSONObj::opTYPE ) {
type = (BSONType)(_e.numberInt());
}
- else if ( _op == BSONObj::opELEM_MATCH ){
+ else if ( _op == BSONObj::opELEM_MATCH ) {
BSONElement m = _e;
uassert( 12517 , "$elemMatch needs an Object" , m.type() == Object );
BSONObj x = m.embeddedObject();
- if ( x.firstElement().getGtLtOp() == 0 ){
+ if ( x.firstElement().getGtLtOp() == 0 ) {
subMatcher.reset( new Matcher( x ) );
subMatcherOnPrimitives = false;
}
@@ -115,19 +115,20 @@ namespace mongo {
}
}
- ElementMatcher::ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot )
+ ElementMatcher::ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot )
: toMatch( _e ) , compareOp( _op ), isNot( _isNot ), subMatcherOnPrimitives(false) {
-
+
myset.reset( new set<BSONElement,element_lt>() );
-
+
BSONObjIterator i( array );
while ( i.more() ) {
BSONElement ie = i.next();
- if ( _op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ){
+ if ( _op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
shared_ptr<Matcher> s;
s.reset( new Matcher( ie.embeddedObject().firstElement().embeddedObjectUserCheck() ) );
allMatchers.push_back( s );
- } else if ( ie.type() == RegEx ) {
+ }
+ else if ( ie.type() == RegEx ) {
if ( !myregex.get() ) {
myregex.reset( new vector< RegexMatcher >() );
}
@@ -142,19 +143,20 @@ namespace mongo {
string prefix = simpleRegex(rm.regex, rm.flags, &purePrefix);
if (purePrefix)
rm.prefix = prefix;
- } else {
+ }
+ else {
myset->insert(ie);
}
}
-
- if ( allMatchers.size() ){
+
+ if ( allMatchers.size() ) {
uassert( 13020 , "with $all, can't mix $elemMatch and others" , myset->size() == 0 && !myregex.get());
}
-
+
}
-
-
- void Matcher::addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot){
+
+
+ void Matcher::addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot) {
if ( nRegex >= 4 ) {
out() << "ERROR: too many regexes in query" << endl;
@@ -168,106 +170,106 @@ namespace mongo {
rm.isNot = isNot;
nRegex++;
- if (!isNot){ //TODO something smarter
+ if (!isNot) { //TODO something smarter
bool purePrefix;
string prefix = simpleRegex(regex, flags, &purePrefix);
if (purePrefix)
rm.prefix = prefix;
}
- }
+ }
}
-
+
bool Matcher::addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags ) {
const char *fn = fe.fieldName();
int op = fe.getGtLtOp( -1 );
- if ( op == -1 ){
- if ( !isNot && fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ){
+ if ( op == -1 ) {
+ if ( !isNot && fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ) {
return false; // { $ref : xxx } - treat as normal object
}
uassert( 10068 , (string)"invalid operator: " + fn , op != -1 );
}
-
- switch ( op ){
- case BSONObj::GT:
- case BSONObj::GTE:
- case BSONObj::LT:
- case BSONObj::LTE:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), op, isNot);
- break;
- }
- case BSONObj::NE:{
- haveNeg = true;
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::NE, isNot);
- break;
- }
- case BSONObj::opALL:
- all = true;
- case BSONObj::opIN:
- uassert( 13276 , "$in needs an array" , fe.isABSONObj() );
- basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
- break;
- case BSONObj::NIN:
- uassert( 13277 , "$nin needs an array" , fe.isABSONObj() );
- haveNeg = true;
- basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
- break;
- case BSONObj::opMOD:
- case BSONObj::opTYPE:
- case BSONObj::opELEM_MATCH: {
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- // these are types where ElementMatcher has all the info
- basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
- break;
- }
- case BSONObj::opSIZE:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::opSIZE, isNot);
- haveSize = true;
- break;
- }
- case BSONObj::opEXISTS:{
- shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- _builders.push_back( b );
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), BSONObj::opEXISTS, isNot);
- break;
- }
- case BSONObj::opREGEX:{
- uassert( 13032, "can't use $not with $regex, use BSON regex type instead", !isNot );
- if ( fe.type() == RegEx ){
- regex = fe.regex();
- flags = fe.regexFlags();
- }
- else {
- regex = fe.valuestrsafe();
- }
- break;
- }
- case BSONObj::opOPTIONS:{
- uassert( 13029, "can't use $not with $options, use BSON regex type instead", !isNot );
- flags = fe.valuestrsafe();
- break;
- }
- case BSONObj::opNEAR:
- case BSONObj::opWITHIN:
- case BSONObj::opMAX_DISTANCE:
- break;
- default:
- uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
- }
+
+ switch ( op ) {
+ case BSONObj::GT:
+ case BSONObj::GTE:
+ case BSONObj::LT:
+ case BSONObj::LTE: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), op, isNot);
+ break;
+ }
+ case BSONObj::NE: {
+ haveNeg = true;
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::NE, isNot);
+ break;
+ }
+ case BSONObj::opALL:
+ all = true;
+ case BSONObj::opIN:
+ uassert( 13276 , "$in needs an array" , fe.isABSONObj() );
+ basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ break;
+ case BSONObj::NIN:
+ uassert( 13277 , "$nin needs an array" , fe.isABSONObj() );
+ haveNeg = true;
+ basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ break;
+ case BSONObj::opMOD:
+ case BSONObj::opTYPE:
+ case BSONObj::opELEM_MATCH: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ // these are types where ElementMatcher has all the info
+ basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
+ break;
+ }
+ case BSONObj::opSIZE: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opSIZE, isNot);
+ haveSize = true;
+ break;
+ }
+ case BSONObj::opEXISTS: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opEXISTS, isNot);
+ break;
+ }
+ case BSONObj::opREGEX: {
+ uassert( 13032, "can't use $not with $regex, use BSON regex type instead", !isNot );
+ if ( fe.type() == RegEx ) {
+ regex = fe.regex();
+ flags = fe.regexFlags();
+ }
+ else {
+ regex = fe.valuestrsafe();
+ }
+ break;
+ }
+ case BSONObj::opOPTIONS: {
+ uassert( 13029, "can't use $not with $options, use BSON regex type instead", !isNot );
+ flags = fe.valuestrsafe();
+ break;
+ }
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ case BSONObj::opMAX_DISTANCE:
+ break;
+ default:
+ uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
+ }
return true;
}
-
+
void Matcher::parseOr( const BSONElement &e, bool subMatcher, list< shared_ptr< Matcher > > &matchers ) {
uassert( 13090, "nested $or/$nor not allowed", !subMatcher );
uassert( 13086, "$or/$nor must be a nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
@@ -286,14 +288,16 @@ namespace mongo {
return false;
if ( ef[ 1 ] == 'o' && ef[ 2 ] == 'r' && ef[ 3 ] == 0 ) {
parseOr( e, subMatcher, _orMatchers );
- } else if ( ef[ 1 ] == 'n' && ef[ 2 ] == 'o' && ef[ 3 ] == 'r' && ef[ 4 ] == 0 ) {
+ }
+ else if ( ef[ 1 ] == 'n' && ef[ 2 ] == 'o' && ef[ 3 ] == 'r' && ef[ 4 ] == 0 ) {
parseOr( e, subMatcher, _norMatchers );
- } else {
+ }
+ else {
return false;
}
return true;
}
-
+
/* _jsobj - the query pattern
*/
Matcher::Matcher(const BSONObj &_jsobj, bool subMatcher) :
@@ -302,7 +306,7 @@ namespace mongo {
BSONObjIterator i(jsobj);
while ( i.more() ) {
BSONElement e = i.next();
-
+
if ( parseOrNor( e, subMatcher ) ) {
continue;
}
@@ -311,7 +315,7 @@ namespace mongo {
// $where: function()...
uassert( 10066 , "$where occurs twice?", where == 0 );
uassert( 10067 , "$where query, but no script engine", globalScriptEngine );
- massert( 13089 , "no current client needed for $where" , haveClient() );
+ massert( 13089 , "no current client needed for $where" , haveClient() );
where = new Where();
where->scope = globalScriptEngine->getPooledScope( cc().ns() );
where->scope->localConnect( cc().database()->name.c_str() );
@@ -324,7 +328,7 @@ namespace mongo {
const char *code = e.valuestr();
where->setFunc(code);
}
-
+
where->scope->execSetup( "_mongo.readOnly = true;" , "make read only" );
continue;
@@ -334,7 +338,7 @@ namespace mongo {
addRegex( e.fieldName(), e.regex(), e.regexFlags() );
continue;
}
-
+
// greater than / less than...
// e.g., e == { a : { $gt : 3 } }
// or
@@ -343,35 +347,36 @@ namespace mongo {
// support {$regex:"a|b", $options:"imx"}
const char* regex = NULL;
const char* flags = "";
-
+
// e.g., fe == { $gt : 3 }
BSONObjIterator j(e.embeddedObject());
bool isOperator = false;
while ( j.more() ) {
BSONElement fe = j.next();
const char *fn = fe.fieldName();
-
+
if ( fn[0] == '$' && fn[1] ) {
isOperator = true;
-
+
if ( fn[1] == 'n' && fn[2] == 'o' && fn[3] == 't' && fn[4] == 0 ) {
haveNeg = true;
switch( fe.type() ) {
- case Object: {
- BSONObjIterator k( fe.embeddedObject() );
- uassert( 13030, "$not cannot be empty", k.more() );
- while( k.more() ) {
- addOp( e, k.next(), true, regex, flags );
- }
- break;
+ case Object: {
+ BSONObjIterator k( fe.embeddedObject() );
+ uassert( 13030, "$not cannot be empty", k.more() );
+ while( k.more() ) {
+ addOp( e, k.next(), true, regex, flags );
}
- case RegEx:
- addRegex( e.fieldName(), fe.regex(), fe.regexFlags(), true );
- break;
- default:
- uassert( 13031, "invalid use of $not", false );
+ break;
+ }
+ case RegEx:
+ addRegex( e.fieldName(), fe.regex(), fe.regexFlags(), true );
+ break;
+ default:
+ uassert( 13031, "invalid use of $not", false );
}
- } else {
+ }
+ else {
if ( !addOp( e, fe, false, regex, flags ) ) {
isOperator = false;
break;
@@ -383,43 +388,43 @@ namespace mongo {
break;
}
}
- if (regex){
+ if (regex) {
addRegex(e.fieldName(), regex, flags);
}
if ( isOperator )
continue;
}
- if ( e.type() == Array ){
+ if ( e.type() == Array ) {
hasArray = true;
}
else if( strcmp(e.fieldName(), "$atomic") == 0 ) {
_atomic = e.trueValue();
continue;
}
-
+
// normal, simple case e.g. { a : "foo" }
addBasic(e, BSONObj::Equality, false);
}
}
-
+
Matcher::Matcher( const Matcher &other, const BSONObj &key ) :
- where(0), constrainIndexKey_( key ), haveSize(), all(), hasArray(0), haveNeg(), _atomic(false), nRegex(0) {
+ where(0), constrainIndexKey_( key ), haveSize(), all(), hasArray(0), haveNeg(), _atomic(false), nRegex(0) {
// do not include fields which would make keyMatch() false
for( vector< ElementMatcher >::const_iterator i = other.basics.begin(); i != other.basics.end(); ++i ) {
if ( key.hasField( i->toMatch.fieldName() ) ) {
switch( i->compareOp ) {
- case BSONObj::opSIZE:
- case BSONObj::opALL:
- case BSONObj::NE:
- case BSONObj::NIN:
- break;
- default: {
- if ( !i->isNot && i->toMatch.type() != Array ) {
- basics.push_back( *i );
- }
+ case BSONObj::opSIZE:
+ case BSONObj::opALL:
+ case BSONObj::NE:
+ case BSONObj::NIN:
+ break;
+ default: {
+ if ( !i->isNot && i->toMatch.type() != Array ) {
+ basics.push_back( *i );
}
}
+ }
}
}
for( int i = 0; i < other.nRegex; ++i ) {
@@ -431,29 +436,29 @@ namespace mongo {
_orMatchers.push_back( shared_ptr< Matcher >( new Matcher( **i, key ) ) );
}
}
-
+
inline bool regexMatches(const RegexMatcher& rm, const BSONElement& e) {
- switch (e.type()){
- case String:
- case Symbol:
- if (rm.prefix.empty())
- return rm.re->PartialMatch(e.valuestr());
- else
- return !strncmp(e.valuestr(), rm.prefix.c_str(), rm.prefix.size());
- case RegEx:
- return !strcmp(rm.regex, e.regex()) && !strcmp(rm.flags, e.regexFlags());
- default:
- return false;
+ switch (e.type()) {
+ case String:
+ case Symbol:
+ if (rm.prefix.empty())
+ return rm.re->PartialMatch(e.valuestr());
+ else
+ return !strncmp(e.valuestr(), rm.prefix.c_str(), rm.prefix.size());
+ case RegEx:
+ return !strcmp(rm.regex, e.regex()) && !strcmp(rm.flags, e.regexFlags());
+ default:
+ return false;
}
}
-
+
inline int Matcher::valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) {
assert( op != BSONObj::NE && op != BSONObj::NIN );
-
+
if ( op == BSONObj::Equality ) {
return l.valuesEqual(r);
}
-
+
if ( op == BSONObj::opIN ) {
// { $in : [1,2,3] }
int count = bm.myset->count(l);
@@ -481,15 +486,15 @@ namespace mongo {
}
return count == r.number();
}
-
- if ( op == BSONObj::opMOD ){
+
+ if ( op == BSONObj::opMOD ) {
if ( ! l.isNumber() )
return false;
-
+
return l.numberLong() % bm.mod == bm.modm;
}
-
- if ( op == BSONObj::opTYPE ){
+
+ if ( op == BSONObj::opTYPE ) {
return bm.type == l.type();
}
@@ -516,7 +521,7 @@ namespace mongo {
return 0;
return bm.toMatch.boolean() ? -1 : 1;
}
-
+
/* Check if a particular field matches.
fieldName - field to match "a.b" if we are reaching into an embedded object.
@@ -529,8 +534,8 @@ namespace mongo {
{ "a.b" : 3 } means obj.a.b == 3
{ a : { $lt : 3 } } means obj.a < 3
- { a : { $in : [1,2] } } means [1,2].contains(obj.a)
-
+ { a : { $in : [1,2] } } means [1,2].contains(obj.a)
+
return value
-1 mismatch
0 missing element
@@ -539,20 +544,20 @@ namespace mongo {
int Matcher::matchesDotted(const char *fieldName, const BSONElement& toMatch, const BSONObj& obj, int compareOp, const ElementMatcher& em , bool isArr, MatchDetails * details ) {
DEBUGMATCHER( "\t matchesDotted : " << fieldName << " hasDetails: " << ( details ? "yes" : "no" ) );
if ( compareOp == BSONObj::opALL ) {
-
- if ( em.allMatchers.size() ){
+
+ if ( em.allMatchers.size() ) {
BSONElement e = obj.getFieldDotted( fieldName );
uassert( 13021 , "$all/$elemMatch needs to be applied to array" , e.type() == Array );
-
- for ( unsigned i=0; i<em.allMatchers.size(); i++ ){
+
+ for ( unsigned i=0; i<em.allMatchers.size(); i++ ) {
bool found = false;
BSONObjIterator x( e.embeddedObject() );
- while ( x.more() ){
+ while ( x.more() ) {
BSONElement f = x.next();
if ( f.type() != Object )
continue;
- if ( em.allMatchers[i]->matches( f.embeddedObject() ) ){
+ if ( em.allMatchers[i]->matches( f.embeddedObject() ) ) {
found = true;
break;
}
@@ -561,28 +566,28 @@ namespace mongo {
if ( ! found )
return -1;
}
-
+
return 1;
}
-
+
if ( em.myset->size() == 0 && !em.myregex.get() )
return -1; // is this desired?
-
+
BSONElementSet myValues;
obj.getFieldsDotted( fieldName , myValues );
-
+
for( set< BSONElement, element_lt >::const_iterator i = em.myset->begin(); i != em.myset->end(); ++i ) {
// ignore nulls
if ( i->type() == jstNULL )
continue;
-
+
if ( myValues.count( *i ) == 0 )
return -1;
- }
+ }
if ( !em.myregex.get() )
return 1;
-
+
for( vector< RegexMatcher >::const_iterator i = em.myregex->begin(); i != em.myregex->end(); ++i ) {
bool match = false;
for( BSONElementSet::const_iterator j = myValues.begin(); j != myValues.end(); ++j ) {
@@ -594,10 +599,10 @@ namespace mongo {
if ( !match )
return -1;
}
-
+
return 1;
} // end opALL
-
+
if ( compareOp == BSONObj::NE )
return matchesNe( fieldName, toMatch, obj, em , details );
if ( compareOp == BSONObj::NIN ) {
@@ -619,18 +624,19 @@ namespace mongo {
}
return 1;
}
-
+
BSONElement e;
bool indexed = !constrainIndexKey_.isEmpty();
if ( indexed ) {
e = obj.getFieldUsingIndexNames(fieldName, constrainIndexKey_);
- if( e.eoo() ){
+ if( e.eoo() ) {
cout << "obj: " << obj << endl;
cout << "fieldName: " << fieldName << endl;
cout << "constrainIndexKey_: " << constrainIndexKey_ << endl;
assert( !e.eoo() );
}
- } else {
+ }
+ else {
const char *p = strchr(fieldName, '.');
if ( p ) {
@@ -668,7 +674,7 @@ namespace mongo {
if ( details )
details->elemMatchKey = z.fieldName();
return 1;
- }
+ }
else if ( cmp < 0 ) {
found = true;
}
@@ -677,7 +683,7 @@ namespace mongo {
return found ? -1 : retMissing( em );
}
- if( p ) {
+ if( p ) {
return retMissing( em );
}
else {
@@ -687,25 +693,27 @@ namespace mongo {
if ( compareOp == BSONObj::opEXISTS ) {
return ( e.eoo() ^ ( toMatch.boolean() ^ em.isNot ) ) ? 1 : -1;
- } else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
- valuesMatch(e, toMatch, compareOp, em ) ) {
+ }
+ else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
+ valuesMatch(e, toMatch, compareOp, em ) ) {
return 1;
- } else if ( e.type() == Array && compareOp != BSONObj::opSIZE ) {
+ }
+ else if ( e.type() == Array && compareOp != BSONObj::opSIZE ) {
BSONObjIterator ai(e.embeddedObject());
while ( ai.moreWithEOO() ) {
BSONElement z = ai.next();
-
- if ( compareOp == BSONObj::opELEM_MATCH ){
- if ( z.type() == Object ){
- if ( em.subMatcher->matches( z.embeddedObject() ) ){
+
+ if ( compareOp == BSONObj::opELEM_MATCH ) {
+ if ( z.type() == Object ) {
+ if ( em.subMatcher->matches( z.embeddedObject() ) ) {
if ( details )
details->elemMatchKey = z.fieldName();
return 1;
}
}
- else if ( em.subMatcherOnPrimitives ){
- if ( z.type() && em.subMatcher->matches( z.wrap( "" ) ) ){
+ else if ( em.subMatcherOnPrimitives ) {
+ if ( z.type() && em.subMatcher->matches( z.wrap( "" ) ) ) {
if ( details )
details->elemMatchKey = z.fieldName();
return 1;
@@ -721,12 +729,12 @@ namespace mongo {
}
}
-
- if ( compareOp == BSONObj::Equality && e.woCompare( toMatch , false ) == 0 ){
+
+ if ( compareOp == BSONObj::Equality && e.woCompare( toMatch , false ) == 0 ) {
// match an entire array to itself
return 1;
}
-
+
}
else if ( e.eoo() ) {
// 0 indicates "missing element"
@@ -759,7 +767,8 @@ namespace mongo {
if ( ( bm.compareOp == BSONObj::NE ) ^ bm.isNot ) {
return false;
}
- } else {
+ }
+ else {
if ( !bm.isNot ) {
return false;
}
@@ -774,7 +783,8 @@ namespace mongo {
BSONElement e = jsobj.getFieldUsingIndexNames(rm.fieldName, constrainIndexKey_);
if ( !e.eoo() )
s.insert( e );
- } else {
+ }
+ else {
jsobj.getFieldsDotted( rm.fieldName, s );
}
bool match = false;
@@ -784,11 +794,11 @@ namespace mongo {
if ( !match ^ rm.isNot )
return false;
}
-
+
if ( _orMatchers.size() > 0 ) {
bool match = false;
for( list< shared_ptr< Matcher > >::const_iterator i = _orMatchers.begin();
- i != _orMatchers.end(); ++i ) {
+ i != _orMatchers.end(); ++i ) {
// SERVER-205 don't submit details - we don't want to track field
// matched within $or, and at this point we've already loaded the
// whole document
@@ -801,55 +811,56 @@ namespace mongo {
return false;
}
}
-
+
if ( _norMatchers.size() > 0 ) {
for( list< shared_ptr< Matcher > >::const_iterator i = _norMatchers.begin();
- i != _norMatchers.end(); ++i ) {
+ i != _norMatchers.end(); ++i ) {
// SERVER-205 don't submit details - we don't want to track field
// matched within $nor, and at this point we've already loaded the
// whole document
if ( (*i)->matches( jsobj ) ) {
return false;
}
- }
+ }
}
-
+
for( vector< shared_ptr< FieldRangeVector > >::const_iterator i = _orConstraints.begin();
- i != _orConstraints.end(); ++i ) {
+ i != _orConstraints.end(); ++i ) {
if ( (*i)->matches( jsobj ) ) {
return false;
}
}
-
+
if ( where ) {
if ( where->func == 0 ) {
uassert( 10070 , "$where compile error", false);
return false; // didn't compile
}
-
- if ( where->jsScope ){
+
+ if ( where->jsScope ) {
where->scope->init( where->jsScope );
}
where->scope->setThis( const_cast< BSONObj * >( &jsobj ) );
where->scope->setObject( "obj", const_cast< BSONObj & >( jsobj ) );
where->scope->setBoolean( "fullObject" , true ); // this is a hack b/c fullObject used to be relevant
-
+
int err = where->scope->invoke( where->func , BSONObj() , 1000 * 60 , false );
where->scope->setThis( 0 );
if ( err == -3 ) { // INVOKE_ERROR
stringstream ss;
- ss << "error on invocation of $where function:\n"
+ ss << "error on invocation of $where function:\n"
<< where->scope->getError();
uassert( 10071 , ss.str(), false);
return false;
- } else if ( err != 0 ) { // ! INVOKE_SUCCESS
+ }
+ else if ( err != 0 ) { // ! INVOKE_SUCCESS
uassert( 10072 , "unknown error in invocation of $where function", false);
- return false;
+ return false;
}
return where->scope->getBoolean( "return" ) != 0;
}
-
+
return true;
}
@@ -894,9 +905,9 @@ namespace mongo {
}
}
return true;
- }
-
-
+ }
+
+
/*- just for testing -- */
#pragma pack(1)
struct JSObj1 {
@@ -960,7 +971,7 @@ namespace mongo {
assert( !n.matches(j2) );
BSONObj j0 = BSONObj();
-// BSONObj j0((const char *) &js0);
+// BSONObj j0((const char *) &js0);
Matcher p(j0);
assert( p.matches(j1) );
assert( p.matches(j2) );
@@ -973,7 +984,7 @@ namespace mongo {
RXTest() {
}
-
+
void run() {
/*
static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
@@ -983,7 +994,7 @@ namespace mongo {
*/
int ret = 0;
-
+
pcre_config( PCRE_CONFIG_UTF8 , &ret );
massert( 10342 , "pcre not compiled with utf8 support" , ret );
@@ -1001,7 +1012,7 @@ namespace mongo {
pcre_config( PCRE_CONFIG_UNICODE_PROPERTIES , &ret );
if ( ! ret )
cout << "warning: some regex utf8 things will not work. pcre build doesn't have --enable-unicode-properties" << endl;
-
+
}
} rxtest;
diff --git a/db/matcher.h b/db/matcher.h
index f410c336cc4..d242df64452 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -24,7 +24,7 @@
#include <pcrecpp.h>
namespace mongo {
-
+
class Cursor;
class CoveredIndexMatcher;
class Matcher;
@@ -40,11 +40,9 @@ namespace mongo {
bool isNot;
RegexMatcher() : isNot() {}
};
-
- struct element_lt
- {
- bool operator()(const BSONElement& l, const BSONElement& r) const
- {
+
+ struct element_lt {
+ bool operator()(const BSONElement& l, const BSONElement& r) const {
int x = (int) l.canonicalType() - (int) r.canonicalType();
if ( x < 0 ) return true;
else if ( x > 0 ) return false;
@@ -52,17 +50,17 @@ namespace mongo {
}
};
-
+
class ElementMatcher {
public:
-
+
ElementMatcher() {
}
-
+
ElementMatcher( BSONElement _e , int _op, bool _isNot );
-
+
ElementMatcher( BSONElement _e , int _op , const BSONObj& array, bool _isNot );
-
+
~ElementMatcher() { }
BSONElement toMatch;
@@ -70,7 +68,7 @@ namespace mongo {
bool isNot;
shared_ptr< set<BSONElement,element_lt> > myset;
shared_ptr< vector<RegexMatcher> > myregex;
-
+
// these are for specific operators
int mod;
int modm;
@@ -86,15 +84,15 @@ namespace mongo {
class DiskLoc;
struct MatchDetails {
- MatchDetails(){
+ MatchDetails() {
reset();
}
-
- void reset(){
+
+ void reset() {
loadedObject = false;
elemMatchKey = 0;
}
-
+
string toString() const {
stringstream ss;
ss << "loadedObject: " << loadedObject << " ";
@@ -130,7 +128,7 @@ namespace mongo {
const char *fieldName,
const BSONElement &toMatch, const BSONObj &obj,
const ElementMatcher&bm, MatchDetails * details );
-
+
public:
static int opDirection(int op) {
return op <= BSONObj::LTE ? -1 : 1;
@@ -141,14 +139,14 @@ namespace mongo {
~Matcher();
bool matches(const BSONObj& j, MatchDetails * details = 0 );
-
+
// fast rough check to see if we must load the real doc - we also
// compare field counts against covereed index matcher; for $or clauses
// we just compare field counts
bool keyMatch() const { return !all && !haveSize && !hasArray && !haveNeg; }
bool atomic() const { return _atomic; }
-
+
bool hasType( BSONObj::MatchType type ) const;
string toString() const {
@@ -158,18 +156,18 @@ namespace mongo {
void addOrConstraint( const shared_ptr< FieldRangeVector > &frv ) {
_orConstraints.push_back( frv );
}
-
+
void popOrClause() {
_orMatchers.pop_front();
}
-
+
bool sameCriteriaCount( const Matcher &other ) const;
-
+
private:
// Only specify constrainIndexKey if matches() will be called with
// index keys having empty string field names.
Matcher( const Matcher &other, const BSONObj &constrainIndexKey );
-
+
void addBasic(const BSONElement &e, int c, bool isNot) {
// TODO May want to selectively ignore these element types based on op type.
if ( e.type() == MinKey || e.type() == MaxKey )
@@ -179,7 +177,7 @@ namespace mongo {
void addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot = false);
bool addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags );
-
+
int valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm);
bool parseOrNor( const BSONElement &e, bool subMatcher );
@@ -195,7 +193,7 @@ namespace mongo {
bool haveNeg;
/* $atomic - if true, a multi document operation (some removes, updates)
- should be done atomically. in that case, we do not yield -
+ should be done atomically. in that case, we do not yield -
i.e. we stay locked the whole time.
http://www.mongodb.org/display/DOCS/Removing[
*/
@@ -212,16 +210,16 @@ namespace mongo {
friend class CoveredIndexMatcher;
};
-
+
// If match succeeds on index key, then attempt to match full document.
class CoveredIndexMatcher : boost::noncopyable {
public:
CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKeyPattern , bool alwaysUseRecord=false );
- bool matches(const BSONObj &o){ return _docMatcher->matches( o ); }
+ bool matches(const BSONObj &o) { return _docMatcher->matches( o ); }
bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 , bool keyUsable = true );
bool matchesCurrent( Cursor * cursor , MatchDetails * details = 0 );
- bool needRecord(){ return _needRecord; }
-
+ bool needRecord() { return _needRecord; }
+
Matcher& docMatcher() { return *_docMatcher; }
// once this is called, shouldn't use this matcher for matching any more
@@ -232,7 +230,7 @@ namespace mongo {
// we may not pop all the clauses we can.
_docMatcher->popOrClause();
}
-
+
CoveredIndexMatcher *nextClauseMatcher( const BSONObj &indexKeyPattern, bool alwaysUseRecord=false ) {
return new CoveredIndexMatcher( _docMatcher, indexKeyPattern, alwaysUseRecord );
}
@@ -246,5 +244,5 @@ namespace mongo {
bool _needRecordReject; // if the key itself isn't good enough to determine a negative match
bool _useRecordOnly;
};
-
+
} // namespace mongo
diff --git a/db/matcher_covered.cpp b/db/matcher_covered.cpp
index 155abfdf730..5f88e35120c 100644
--- a/db/matcher_covered.cpp
+++ b/db/matcher_covered.cpp
@@ -33,46 +33,44 @@ namespace mongo {
CoveredIndexMatcher::CoveredIndexMatcher( const BSONObj &jsobj, const BSONObj &indexKeyPattern, bool alwaysUseRecord) :
_docMatcher( new Matcher( jsobj ) ),
- _keyMatcher( *_docMatcher, indexKeyPattern )
- {
+ _keyMatcher( *_docMatcher, indexKeyPattern ) {
init( alwaysUseRecord );
}
-
+
CoveredIndexMatcher::CoveredIndexMatcher( const shared_ptr< Matcher > &docMatcher, const BSONObj &indexKeyPattern , bool alwaysUseRecord ) :
_docMatcher( docMatcher ),
- _keyMatcher( *_docMatcher, indexKeyPattern )
- {
+ _keyMatcher( *_docMatcher, indexKeyPattern ) {
init( alwaysUseRecord );
}
void CoveredIndexMatcher::init( bool alwaysUseRecord ) {
- _needRecord =
- alwaysUseRecord ||
- ! ( _docMatcher->keyMatch() &&
+ _needRecord =
+ alwaysUseRecord ||
+ ! ( _docMatcher->keyMatch() &&
_keyMatcher.sameCriteriaCount( *_docMatcher ) );
-
+
_needRecordReject = _keyMatcher.hasType( BSONObj::opEXISTS );
}
-
- bool CoveredIndexMatcher::matchesCurrent( Cursor * cursor , MatchDetails * details ){
+
+ bool CoveredIndexMatcher::matchesCurrent( Cursor * cursor , MatchDetails * details ) {
// bool keyUsable = ! cursor->isMultiKey() && check for $orish like conditions in matcher SERVER-1264
return matches( cursor->currKey() , cursor->currLoc() , details );
}
-
+
bool CoveredIndexMatcher::matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details , bool keyUsable ) {
if ( details )
details->reset();
-
+
if ( _needRecordReject == false && keyUsable ) {
-
- if ( !_keyMatcher.matches(key, details ) ){
+
+ if ( !_keyMatcher.matches(key, details ) ) {
return false;
}
-
- if ( ! _needRecord ){
+
+ if ( ! _needRecord ) {
return true;
}
-
+
}
if ( details )
@@ -80,6 +78,6 @@ namespace mongo {
return _docMatcher->matches(recLoc.rec() , details );
}
-
+
}
diff --git a/db/minilex.h b/db/minilex.h
index 169f706f414..677514aa47c 100644
--- a/db/minilex.h
+++ b/db/minilex.h
@@ -22,34 +22,34 @@
namespace mongo {
#if defined(_WIN32)
-
+
} // namespace mongo
#include <hash_map>
using namespace stdext;
namespace mongo {
-
+
typedef const char * MyStr;
struct less_str {
bool operator()(const MyStr & x, const MyStr & y) const {
if ( strcmp(x, y) > 0)
return true;
-
+
return false;
}
};
-
+
typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhashmap;
-
+
#else
-
+
} // namespace mongo
#include <ext/hash_map>
namespace mongo {
-
+
using namespace __gnu_cxx;
typedef const char * MyStr;
@@ -57,15 +57,15 @@ namespace mongo {
bool operator()(const MyStr & x, const MyStr & y) const {
if ( strcmp(x, y) == 0)
return true;
-
+
return false;
}
};
-
+
typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
-
+
#endif
-
+
/*
struct MiniLexNotUsed {
strhashmap reserved;
@@ -75,90 +75,90 @@ namespace mongo {
// dm: very dumb about comments and escaped quotes -- but we are faster then at least,
// albeit returning too much (which is ok for jsbobj current usage).
void grabVariables(char *code , strhashmap& vars) { // 'code' modified and must stay in scope*/
- char *p = code;
- char last = 0;
- while ( *p ) {
- if ( starter[*p] ) {
- char *q = p+1;
- while ( *q && ic[*q] ) q++;
- const char *identifier = p;
- bool done = *q == 0;
- *q = 0;
- if ( !reserved.count(identifier) ) {
- // we try to be smart about 'obj' but have to be careful as obj.obj
- // can happen; this is so that nFields is right for simplistic where cases
- // so we can stop scanning in jsobj when we find the field of interest.
- if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
- ;
- else
- vars[identifier] = 1;
- }
- if ( done )
- break;
- p = q + 1;
- continue;
- }
-
- if ( *p == '\'' ) {
- p++;
- while ( *p && *p != '\'' ) p++;
- }
- else if ( *p == '"' ) {
- p++;
- while ( *p && *p != '"' ) p++;
- }
- p++;
+ char *p = code;
+ char last = 0;
+ while ( *p ) {
+ if ( starter[*p] ) {
+ char *q = p+1;
+ while ( *q && ic[*q] ) q++;
+ const char *identifier = p;
+ bool done = *q == 0;
+ *q = 0;
+ if ( !reserved.count(identifier) ) {
+ // we try to be smart about 'obj' but have to be careful as obj.obj
+ // can happen; this is so that nFields is right for simplistic where cases
+ // so we can stop scanning in jsobj when we find the field of interest.
+ if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
+ ;
+ else
+ vars[identifier] = 1;
}
+ if ( done )
+ break;
+ p = q + 1;
+ continue;
}
- MiniLex() {
- strhashmap atest;
- atest["foo"] = 3;
- assert( atest.count("bar") == 0 );
- assert( atest.count("foo") == 1 );
- assert( atest["foo"] == 3 );
-
- for ( int i = 0; i < 256; i++ ) {
- ic[i] = starter[i] = false;
- }
- for ( int i = 'a'; i <= 'z'; i++ )
- ic[i] = starter[i] = true;
- for ( int i = 'A'; i <= 'Z'; i++ )
- ic[i] = starter[i] = true;
- for ( int i = '0'; i <= '9'; i++ )
- ic[i] = true;
- for ( int i = 128; i < 256; i++ )
- ic[i] = starter[i] = true;
- ic['$'] = starter['$'] = true;
- ic['_'] = starter['_'] = true;
-
- reserved["break"] = true;
- reserved["case"] = true;
- reserved["catch"] = true;
- reserved["continue"] = true;
- reserved["default"] = true;
- reserved["delete"] = true;
- reserved["do"] = true;
- reserved["else"] = true;
- reserved["finally"] = true;
- reserved["for"] = true;
- reserved["function"] = true;
- reserved["if"] = true;
- reserved["in"] = true;
- reserved["instanceof"] = true;
- reserved["new"] = true;
- reserved["return"] = true;
- reserved["switch"] = true;
- reserved["this"] = true;
- reserved["throw"] = true;
- reserved["try"] = true;
- reserved["typeof"] = true;
- reserved["var"] = true;
- reserved["void"] = true;
- reserved["while"] = true;
- reserved["with "] = true;
+ if ( *p == '\'' ) {
+ p++;
+ while ( *p && *p != '\'' ) p++;
}
- };
- */
+ else if ( *p == '"' ) {
+ p++;
+ while ( *p && *p != '"' ) p++;
+ }
+ p++;
+ }
+}
+
+MiniLex() {
+ strhashmap atest;
+ atest["foo"] = 3;
+ assert( atest.count("bar") == 0 );
+ assert( atest.count("foo") == 1 );
+ assert( atest["foo"] == 3 );
+
+ for ( int i = 0; i < 256; i++ ) {
+ ic[i] = starter[i] = false;
+ }
+ for ( int i = 'a'; i <= 'z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = 'A'; i <= 'Z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = '0'; i <= '9'; i++ )
+ ic[i] = true;
+ for ( int i = 128; i < 256; i++ )
+ ic[i] = starter[i] = true;
+ ic['$'] = starter['$'] = true;
+ ic['_'] = starter['_'] = true;
+
+ reserved["break"] = true;
+ reserved["case"] = true;
+ reserved["catch"] = true;
+ reserved["continue"] = true;
+ reserved["default"] = true;
+ reserved["delete"] = true;
+ reserved["do"] = true;
+ reserved["else"] = true;
+ reserved["finally"] = true;
+ reserved["for"] = true;
+ reserved["function"] = true;
+ reserved["if"] = true;
+ reserved["in"] = true;
+ reserved["instanceof"] = true;
+ reserved["new"] = true;
+ reserved["return"] = true;
+ reserved["switch"] = true;
+ reserved["this"] = true;
+ reserved["throw"] = true;
+ reserved["try"] = true;
+ reserved["typeof"] = true;
+ reserved["var"] = true;
+ reserved["void"] = true;
+ reserved["while"] = true;
+ reserved["with "] = true;
+}
+};
+*/
} // namespace mongo
diff --git a/db/module.cpp b/db/module.cpp
index 1e4f51132ea..6a182f2056f 100644
--- a/db/module.cpp
+++ b/db/module.cpp
@@ -24,29 +24,29 @@ namespace mongo {
std::list<Module*> * Module::_all;
Module::Module( const string& name )
- : _name( name ) , _options( (string)"Module " + name + " options" ){
+ : _name( name ) , _options( (string)"Module " + name + " options" ) {
if ( ! _all )
_all = new list<Module*>();
_all->push_back( this );
}
- Module::~Module(){}
+ Module::~Module() {}
- void Module::addOptions( program_options::options_description& options ){
+ void Module::addOptions( program_options::options_description& options ) {
if ( ! _all ) {
return;
}
- for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ){
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
Module* m = *i;
options.add( m->_options );
}
}
- void Module::configAll( program_options::variables_map& params ){
+ void Module::configAll( program_options::variables_map& params ) {
if ( ! _all ) {
return;
}
- for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ){
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
Module* m = *i;
m->config( params );
}
@@ -54,11 +54,11 @@ namespace mongo {
}
- void Module::initAll(){
+ void Module::initAll() {
if ( ! _all ) {
return;
}
- for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ){
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
Module* m = *i;
m->init();
}
diff --git a/db/module.h b/db/module.h
index d4939dd9cde..e90923a2c5a 100644
--- a/db/module.h
+++ b/db/module.h
@@ -34,8 +34,8 @@ namespace mongo {
public:
Module( const string& name );
virtual ~Module();
-
- boost::program_options::options_description_easy_init add_options(){
+
+ boost::program_options::options_description_easy_init add_options() {
return _options.add_options();
}
@@ -54,10 +54,10 @@ namespace mongo {
*/
virtual void shutdown() = 0;
- const string& getName(){ return _name; }
-
+ const string& getName() { return _name; }
+
// --- static things
-
+
static void addOptions( program_options::options_description& options );
static void configAll( program_options::variables_map& params );
static void initAll();
diff --git a/db/modules/mms.cpp b/db/modules/mms.cpp
index 40e9001e51e..b180262feb7 100644
--- a/db/modules/mms.cpp
+++ b/db/modules/mms.cpp
@@ -37,54 +37,54 @@ namespace mongo {
MMS()
: Module( "mms" ) , _baseurl( "" ) ,
_secsToSleep(1) , _token( "" ) , _name( "" ) {
-
+
add_options()
- ( "mms-url" , po::value<string>()->default_value("http://mms.10gen.com/ping") , "url for mongo monitoring server" )
- ( "mms-token" , po::value<string>() , "account token for mongo monitoring server" )
- ( "mms-name" , po::value<string>() , "server name for mongo monitoring server" )
- ( "mms-interval" , po::value<int>()->default_value(30) , "ping interval (in seconds) for mongo monitoring server" )
- ;
- }
-
- ~MMS(){}
-
- void config( program_options::variables_map& params ){
+ ( "mms-url" , po::value<string>()->default_value("http://mms.10gen.com/ping") , "url for mongo monitoring server" )
+ ( "mms-token" , po::value<string>() , "account token for mongo monitoring server" )
+ ( "mms-name" , po::value<string>() , "server name for mongo monitoring server" )
+ ( "mms-interval" , po::value<int>()->default_value(30) , "ping interval (in seconds) for mongo monitoring server" )
+ ;
+ }
+
+ ~MMS() {}
+
+ void config( program_options::variables_map& params ) {
_baseurl = params["mms-url"].as<string>();
- if ( params.count( "mms-token" ) ){
+ if ( params.count( "mms-token" ) ) {
_token = params["mms-token"].as<string>();
}
- if ( params.count( "mms-name" ) ){
+ if ( params.count( "mms-name" ) ) {
_name = params["mms-name"].as<string>();
}
_secsToSleep = params["mms-interval"].as<int>();
}
-
- void run(){
- if ( _token.size() == 0 && _name.size() == 0 ){
+
+ void run() {
+ if ( _token.size() == 0 && _name.size() == 0 ) {
log(1) << "mms not configured" << endl;
return;
}
- if ( _token.size() == 0 ){
+ if ( _token.size() == 0 ) {
log() << "no token for mms - not running" << endl;
return;
}
-
- if ( _name.size() == 0 ){
+
+ if ( _name.size() == 0 ) {
log() << "no name for mms - not running" << endl;
return;
}
-
+
log() << "mms monitor staring... token:" << _token << " name:" << _name << " interval: " << _secsToSleep << endl;
Client::initThread( "mms" );
Client& c = cc();
-
-
+
+
// TODO: using direct client is bad, but easy for now
-
- while ( ! inShutdown() ){
+
+ while ( ! inShutdown() ) {
sleepsecs( _secsToSleep );
-
+
try {
stringstream url;
url << _baseurl << "?"
@@ -92,47 +92,47 @@ namespace mongo {
<< "name=" << _name << "&"
<< "ts=" << time(0)
;
-
+
BSONObjBuilder bb;
// duplicated so the post has everything
bb.append( "token" , _token );
bb.append( "name" , _name );
bb.appendDate( "ts" , jsTime() );
-
+
// any commands
_add( bb , "buildinfo" );
_add( bb , "serverStatus" );
-
+
BSONObj postData = bb.obj();
-
+
log(1) << "mms url: " << url.str() << "\n\t post: " << postData << endl;;
-
+
HttpClient c;
HttpClient::Result r;
int rc = c.post( url.str() , postData.jsonString() , &r );
log(1) << "\t response code: " << rc << endl;
- if ( rc != 200 ){
+ if ( rc != 200 ) {
log() << "mms error response code:" << rc << endl;
log(1) << "mms error body:" << r.getEntireResponse() << endl;
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log() << "mms exception: " << e.what() << endl;
}
}
-
+
c.shutdown();
}
-
- void _add( BSONObjBuilder& postData , const char* cmd ){
+
+ void _add( BSONObjBuilder& postData , const char* cmd ) {
Command * c = Command::findCommand( cmd );
- if ( ! c ){
+ if ( ! c ) {
log() << "MMS can't find command: " << cmd << endl;
postData.append( cmd , "can't find command" );
return;
}
-
- if ( c->locktype() ){
+
+ if ( c->locktype() ) {
log() << "MMS can only use noLocking commands not: " << cmd << endl;
postData.append( cmd , "not noLocking" );
return;
@@ -147,24 +147,24 @@ namespace mongo {
else
postData.append( cmd , sub.obj() );
}
-
- void init(){ go(); }
- void shutdown(){
+ void init() { go(); }
+
+ void shutdown() {
// TODO
}
private:
string _baseurl;
int _secsToSleep;
-
+
string _token;
string _name;
-
+
} /*mms*/ ;
}
-
+
diff --git a/db/mongommf.cpp b/db/mongommf.cpp
index 64c64421ee2..1f76561e513 100644
--- a/db/mongommf.cpp
+++ b/db/mongommf.cpp
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
+/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this.
*/
@@ -31,15 +31,14 @@ using namespace mongoutils;
namespace mongo {
- void MongoMMF::remapThePrivateView()
- {
+ void MongoMMF::remapThePrivateView() {
assert( cmdLine.dur && !testIntent );
privateViews.remove(_view_private);
- _view_private = remapPrivateView(_view_private);
+ _view_private = remapPrivateView(_view_private);
privateViews.add(_view_private, this);
}
- void* MongoMMF::getView() {
+ void* MongoMMF::getView() {
if( testIntent )
return _view_readonly;
return _view_private;
@@ -58,8 +57,8 @@ namespace mongo {
_views.erase(view);
}
}
-
- PointerToMMF::PointerToMMF() : _m("PointerToMMF") {
+
+ PointerToMMF::PointerToMMF() : _m("PointerToMMF") {
#if defined(SIZE_MAX)
size_t max = SIZE_MAX;
#else
@@ -74,7 +73,7 @@ namespace mongo {
/** underscore version of find is for when you are already locked
@param ofs out return our offset in the view
- @return the MongoMMF to which this pointer belongs
+ @return the MongoMMF to which this pointer belongs
*/
MongoMMF* PointerToMMF::_find(void *p, /*out*/ size_t& ofs) {
//
@@ -88,7 +87,7 @@ namespace mongo {
MongoMMF *mmf = x.second;
if( mmf ) {
size_t o = ((char *)p) - ((char*)x.first);
- if( o < mmf->length() ) {
+ if( o < mmf->length() ) {
ofs = o;
return mmf;
}
@@ -109,7 +108,7 @@ namespace mongo {
PointerToMMF privateViews;
static PointerToMMF ourReadViews; /// _TESTINTENT (testIntent) build use only (other than existance)
- /*static*/ void* MongoMMF::switchToPrivateView(void *readonly_ptr) {
+ /*static*/ void* MongoMMF::switchToPrivateView(void *readonly_ptr) {
assert( cmdLine.dur );
assert( testIntent );
@@ -138,14 +137,14 @@ namespace mongo {
return p;
}
- /* switch to _view_write. normally, this is a bad idea since your changes will not
+ /* switch to _view_write. normally, this is a bad idea since your changes will not
show up in _view_private if there have been changes there; thus the leading underscore
- as a tad of a "warning". but useful when done with some care, such as during
+ as a tad of a "warning". but useful when done with some care, such as during
initialization.
*/
- /*static*/ void* MongoMMF::_switchToWritableView(void *p) {
+ /*static*/ void* MongoMMF::_switchToWritableView(void *p) {
RARELY log() << "todo dur not done switchtowritable" << endl;
- if( debug )
+ if( debug )
return switchToPrivateView(p);
return p;
}
@@ -160,7 +159,7 @@ namespace mongo {
uassert(13520, str::stream() << "MongoMMF only supports filenames in a certain format " << f, ok);
if( suffix == "ns" )
_fileSuffixNo = dur::JEntry::DotNsSuffix;
- else
+ else
_fileSuffixNo = (int) str::toUnsigned(suffix);
_p = RelativePath::fromFullPath(prefix);
@@ -172,11 +171,11 @@ namespace mongo {
return finishOpening();
}
- bool MongoMMF::create(string fname, unsigned long long& len, bool sequentialHint) {
+ bool MongoMMF::create(string fname, unsigned long long& len, bool sequentialHint) {
setPath(fname);
bool preExisting = MemoryMappedFile::exists(fname.c_str());
_view_write = map(fname.c_str(), len, sequentialHint ? SEQUENTIAL : 0);
- if( cmdLine.dur && !testIntent && _view_write && !preExisting ) {
+ if( cmdLine.dur && !testIntent && _view_write && !preExisting ) {
getDur().createdFile(fname, len);
}
return finishOpening();
@@ -185,7 +184,7 @@ namespace mongo {
bool MongoMMF::finishOpening() {
if( _view_write ) {
if( cmdLine.dur ) {
- if( testIntent ) {
+ if( testIntent ) {
_view_private = _view_write;
_view_readonly = MemoryMappedFile::createReadOnlyMap();
ourReadViews.add(_view_readonly, this);
@@ -195,30 +194,30 @@ namespace mongo {
}
privateViews.add(_view_private, this); // note that testIntent builds use this, even though it points to view_write then...
}
- else {
+ else {
_view_private = _view_write;
}
return true;
}
return false;
}
-
+
MongoMMF::MongoMMF() : _willNeedRemap(false) {
- _view_write = _view_private = _view_readonly = 0;
+ _view_write = _view_private = _view_readonly = 0;
}
- MongoMMF::~MongoMMF() {
+ MongoMMF::~MongoMMF() {
close();
}
- namespace dur {
+ namespace dur {
void closingFileNotification();
}
/*virtual*/ void MongoMMF::close() {
{
- if( !testIntent && cmdLine.dur && _view_write/*actually was opened*/ ) {
- if( debug )
+ if( !testIntent && cmdLine.dur && _view_write/*actually was opened*/ ) {
+ if( debug )
log() << "closingFileNotication:" << filename() << endl;
dur::closingFileNotification();
}
diff --git a/db/mongommf.h b/db/mongommf.h
index cf6841afa02..0d443b7af78 100644
--- a/db/mongommf.h
+++ b/db/mongommf.h
@@ -23,10 +23,10 @@
namespace mongo {
/** MongoMMF adds some layers atop memory mapped files - specifically our handling of private views & such.
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
+ if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
not this.
*/
- class MongoMMF : private MemoryMappedFile {
+ class MongoMMF : private MemoryMappedFile {
public:
MongoMMF();
virtual ~MongoMMF();
@@ -54,32 +54,32 @@ namespace mongo {
*/
void* getView();
- /* switch to _view_write. normally, this is a bad idea since your changes will not
+ /* switch to _view_write. normally, this is a bad idea since your changes will not
show up in _view_private if there have been changes there; thus the leading underscore
- as a tad of a "warning". but useful when done with some care, such as during
+ as a tad of a "warning". but useful when done with some care, such as during
initialization.
*/
static void* _switchToWritableView(void *private_ptr);
/** for _TESTINTENT build.
- translates the read view pointer into a pointer to the corresponding
+ translates the read view pointer into a pointer to the corresponding
place in the private view.
*/
static void* switchToPrivateView(void *debug_readonly_ptr);
-
+
/** for a filename a/b/c.3
filePath() is "a/b/c"
fileSuffixNo() is 3
if the suffix is "ns", fileSuffixNo -1
*/
- RelativePath relativePath() const {
+ RelativePath relativePath() const {
DEV assert( !_p._p.empty() );
- return _p;
+ return _p;
}
int fileSuffixNo() const { return _fileSuffixNo; }
void* view_write() { return _view_write; }
- /** true if we have written.
+ /** true if we have written.
set in PREPLOGBUFFER, it is NOT set immediately on write intent declaration.
reset to false in REMAPPRIVATEVIEW
*/
@@ -102,9 +102,9 @@ namespace mongo {
bool finishOpening();
};
- /** for durability support we want to be able to map pointers to specific MongoMMF objects.
+ /** for durability support we want to be able to map pointers to specific MongoMMF objects.
*/
- class PointerToMMF : boost::noncopyable {
+ class PointerToMMF : boost::noncopyable {
public:
PointerToMMF();
diff --git a/db/mongomutex.h b/db/mongomutex.h
index 5ac544e6a05..1eab2ee57c1 100644
--- a/db/mongomutex.h
+++ b/db/mongomutex.h
@@ -20,14 +20,14 @@
// note: include concurrency.h, not this.
-namespace mongo {
+namespace mongo {
/** the 'big lock' we use for most operations. a read/write lock.
there is one of these, dbMutex.
generally if you need to declare a mutex use the right primitive class, not this.
- use readlock and writelock classes for scoped locks on this rather than direct
+ use readlock and writelock classes for scoped locks on this rather than direct
manipulation.
*/
class MongoMutex {
@@ -44,20 +44,20 @@ namespace mongo {
bool atLeastReadLocked() const { return _state.get() != 0; }
void assertAtLeastReadLocked() const { assert(atLeastReadLocked()); }
bool isWriteLocked() const { return getState() > 0; }
- void assertWriteLocked() const {
- assert( getState() > 0 );
+ void assertWriteLocked() const {
+ assert( getState() > 0 );
DEV assert( !_releasedEarly.get() );
}
// write lock. use the writelock scoped lock class, not this directly.
- void lock() {
+ void lock() {
if ( _writeLockedAlready() )
return;
_state.set(1);
Client *c = curopWaitingForLock( 1 ); // stats
- _m.lock();
+ _m.lock();
curopGotLock(c);
_minfo.entered();
@@ -68,33 +68,33 @@ namespace mongo {
}
// try write lock
- bool lock_try( int millis ) {
+ bool lock_try( int millis ) {
if ( _writeLockedAlready() )
return true;
Client *c = curopWaitingForLock( 1 );
- bool got = _m.lock_try( millis );
+ bool got = _m.lock_try( millis );
curopGotLock(c);
-
+
if ( got ) {
_minfo.entered();
_state.set(1);
MongoFile::markAllWritable(); // for _DEBUG validation -- a no op for release build
_acquiredWriteLock();
- }
-
+ }
+
return got;
}
- // un write lock
- void unlock() {
+ // un write lock
+ void unlock() {
int s = _state.get();
- if( s > 1 ) {
+ if( s > 1 ) {
_state.set(s-1); // recursive lock case
return;
}
- if( s != 1 ) {
- if( _releasedEarly.get() ) {
+ if( s != 1 ) {
+ if( _releasedEarly.get() ) {
_releasedEarly.set(false);
return;
}
@@ -104,10 +104,10 @@ namespace mongo {
MongoFile::unmarkAllWritable(); // _DEBUG validation
_state.set(0);
_minfo.leaving();
- _m.unlock();
+ _m.unlock();
}
- /* unlock (write lock), and when unlock() is called later,
+ /* unlock (write lock), and when unlock() is called later,
be smart then and don't unlock it again.
*/
void releaseEarly() {
@@ -118,14 +118,14 @@ namespace mongo {
}
// read lock. don't call directly, use readlock.
- void lock_shared() {
+ void lock_shared() {
int s = _state.get();
if( s ) {
- if( s > 0 ) {
+ if( s > 0 ) {
// already in write lock - just be recursive and stay write locked
_state.set(s+1);
}
- else {
+ else {
// already in read lock - recurse
_state.set(s-1);
}
@@ -133,15 +133,15 @@ namespace mongo {
else {
_state.set(-1);
Client *c = curopWaitingForLock( -1 );
- _m.lock_shared();
+ _m.lock_shared();
curopGotLock(c);
}
}
-
+
// try read lock
bool lock_shared_try( int millis ) {
int s = _state.get();
- if ( s ){
+ if ( s ) {
// we already have a lock, so no need to try
lock_shared();
return true;
@@ -156,23 +156,23 @@ namespace mongo {
_state.set(-1);
return got;
}
-
- void unlock_shared() {
+
+ void unlock_shared() {
int s = _state.get();
- if( s > 0 ) {
+ if( s > 0 ) {
assert( s > 1 ); /* we must have done a lock write first to have s > 1 */
_state.set(s-1);
return;
}
- if( s < -1 ) {
+ if( s < -1 ) {
_state.set(s+1);
return;
}
assert( s == -1 );
_state.set(0);
- _m.unlock_shared();
+ _m.unlock_shared();
}
-
+
MutexInfo& info() { return _minfo; }
private:
@@ -185,7 +185,7 @@ namespace mongo {
RWLock _m;
/* > 0 write lock with recurse count
- < 0 read lock
+ < 0 read lock
*/
ThreadLocalValue<int> _state;
@@ -197,7 +197,7 @@ namespace mongo {
private:
/* See the releaseEarly() method.
- we use a separate TLS value for releasedEarly - that is ok as
+ we use a separate TLS value for releasedEarly - that is ok as
our normal/common code path, we never even touch it */
ThreadLocalValue<bool> _releasedEarly;
@@ -214,12 +214,12 @@ namespace mongo {
void releasingWriteLock(); // because it's hard to include dur.h here
}
- inline void MongoMutex::_releasingWriteLock() {
+ inline void MongoMutex::_releasingWriteLock() {
dur::releasingWriteLock();
}
- inline void MongoMutex::_acquiredWriteLock() {
- if( _remapPrivateViewRequested ) {
+ inline void MongoMutex::_acquiredWriteLock() {
+ if( _remapPrivateViewRequested ) {
dur::REMAPPRIVATEVIEW();
dassert( !_remapPrivateViewRequested );
}
@@ -227,7 +227,7 @@ namespace mongo {
/* @return true if was already write locked. increments recursive lock count. */
inline bool MongoMutex::_writeLockedAlready() {
- dassert( haveClient() );
+ dassert( haveClient() );
int s = _state.get();
if( s > 0 ) {
_state.set(s+1);
diff --git a/db/namespace-inl.h b/db/namespace-inl.h
index 659e33d521b..a777ff881ec 100644
--- a/db/namespace-inl.h
+++ b/db/namespace-inl.h
@@ -20,15 +20,15 @@
#include "namespace.h"
-namespace mongo {
+namespace mongo {
inline Namespace& Namespace::operator=(const char *ns) {
- // we fill the remaining space with all zeroes here. as the full Namespace struct is in
- // the datafiles (the .ns files specifically), that is helpful as then they are deterministic
+ // we fill the remaining space with all zeroes here. as the full Namespace struct is in
+ // the datafiles (the .ns files specifically), that is helpful as then they are deterministic
// in the bytes they have for a given sequence of operations. that makes testing and debugging
// the data files easier.
//
- // if profiling indicates this method is a significant bottleneck, we could have a version we
+ // if profiling indicates this method is a significant bottleneck, we could have a version we
// use for reads which does not fill with zeroes, and keep the zeroing behavior on writes.
//
unsigned len = strlen(ns);
@@ -46,7 +46,7 @@ namespace mongo {
return s;
}
- inline bool Namespace::isExtra() const {
+ inline bool Namespace::isExtra() const {
const char *p = strstr(buf, "$extr");
return p && p[5] && p[6] == 0; //==0 important in case an index uses name "$extra_1" for example
}
@@ -71,10 +71,10 @@ namespace mongo {
}
inline IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected ) {
- if( idxNo < NIndexesBase )
+ if( idxNo < NIndexesBase )
return _indexes[idxNo];
Extra *e = extra();
- if ( ! e ){
+ if ( ! e ) {
if ( missingExpected )
throw MsgAssertionException( 13283 , "Missing Extra" );
massert(13282, "missing Extra", e);
@@ -82,7 +82,7 @@ namespace mongo {
int i = idxNo - NIndexesBase;
if( i >= NIndexesExtra ) {
e = e->next(this);
- if ( ! e ){
+ if ( ! e ) {
if ( missingExpected )
throw MsgAssertionException( 13283 , "missing extra" );
massert(13283, "missing Extra", e);
@@ -92,7 +92,7 @@ namespace mongo {
return e->details[i];
}
- inline int NamespaceDetails::idxNo(IndexDetails& idx) {
+ inline int NamespaceDetails::idxNo(IndexDetails& idx) {
IndexIterator i = ii();
while( i.more() ) {
if( &i.next() == &idx )
@@ -105,7 +105,7 @@ namespace mongo {
inline int NamespaceDetails::findIndexByKeyPattern(const BSONObj& keyPattern) {
IndexIterator i = ii();
while( i.more() ) {
- if( i.next().keyPattern() == keyPattern )
+ if( i.next().keyPattern() == keyPattern )
return i.pos()-1;
}
return -1;
@@ -121,7 +121,7 @@ namespace mongo {
return -1;
}
- inline NamespaceDetails::IndexIterator::IndexIterator(NamespaceDetails *_d) {
+ inline NamespaceDetails::IndexIterator::IndexIterator(NamespaceDetails *_d) {
d = _d;
i = 0;
n = d->nIndexes;
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 7972ec91fd2..20678770d89 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -60,9 +60,9 @@ namespace mongo {
// For capped case, signal that we are doing initial extent allocation.
if ( capped )
cappedLastDelRecLastExtent().setInvalid();
- assert( sizeof(dataFileVersion) == 2 );
- dataFileVersion = 0;
- indexFileVersion = 0;
+ assert( sizeof(dataFileVersion) == 2 );
+ dataFileVersion = 0;
+ indexFileVersion = 0;
multiKeyIndexBits = 0;
reservedA = 0;
extraOffset = 0;
@@ -76,7 +76,7 @@ namespace mongo {
bool NamespaceIndex::exists() const {
return !MMF::exists(path());
}
-
+
boost::filesystem::path NamespaceIndex::path() const {
boost::filesystem::path ret( dir_ );
if ( directoryperdb )
@@ -93,12 +93,12 @@ namespace mongo {
if ( !boost::filesystem::exists( dir ) )
BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( dir ) );
}
-
- unsigned lenForNewNsFiles = 16 * 1024 * 1024;
-
+
+ unsigned lenForNewNsFiles = 16 * 1024 * 1024;
+
#if defined(_DEBUG)
void NamespaceDetails::dump(const Namespace& k) {
- if( !cmdLine.dur )
+ if( !cmdLine.dur )
cout << "ns offsets which follow will not display correctly with --dur disabled" << endl;
size_t ofs = 1; // 1 is sentinel that the find call below failed
@@ -107,7 +107,7 @@ namespace mongo {
cout << "ns" << hex << setw(8) << ofs << ' ';
cout << k.toString() << '\n';
- if( k.isExtra() ) {
+ if( k.isExtra() ) {
cout << "ns\t extra" << endl;
return;
}
@@ -121,10 +121,10 @@ namespace mongo {
}
#endif
- void NamespaceDetails::onLoad(const Namespace& k) {
+ void NamespaceDetails::onLoad(const Namespace& k) {
//dump(k);
- if( k.isExtra() ) {
+ if( k.isExtra() ) {
/* overflow storage for indexes - so don't treat as a NamespaceDetails object. */
return;
}
@@ -134,7 +134,7 @@ namespace mongo {
if( backgroundIndexBuildInProgress || capped2.cc2_ptr ) {
assertInWriteLock();
NamespaceDetails *d = (NamespaceDetails *) MongoMMF::_switchToWritableView(this);
- if( backgroundIndexBuildInProgress ) {
+ if( backgroundIndexBuildInProgress ) {
log() << "backgroundIndexBuildInProgress was " << backgroundIndexBuildInProgress << " for " << k << ", indicating an abnormal db shutdown" << endl;
d->backgroundIndexBuildInProgress = 0;
}
@@ -142,7 +142,7 @@ namespace mongo {
}
}
- static void namespaceOnLoadCallback(const Namespace& k, NamespaceDetails& v) {
+ static void namespaceOnLoadCallback(const Namespace& k, NamespaceDetails& v) {
v.onLoad(k);
}
@@ -155,38 +155,38 @@ namespace mongo {
we need to be sure to clear any cached info for the database in
local.*.
*/
- /*
+ /*
if ( "local" != database_ ) {
DBInfo i(database_.c_str());
i.dbDropped();
}
- */
+ */
- unsigned long long len = 0;
+ unsigned long long len = 0;
boost::filesystem::path nsPath = path();
string pathString = nsPath.string();
void *p = 0;
if( MMF::exists(nsPath) ) {
if( f.open(pathString, true) ) {
len = f.length();
- if ( len % (1024*1024) != 0 ){
+ if ( len % (1024*1024) != 0 ) {
log() << "bad .ns file: " << pathString << endl;
uassert( 10079 , "bad .ns file length, cannot open database", len % (1024*1024) == 0 );
}
p = f.getView();
}
- }
- else {
- // use lenForNewNsFiles, we are making a new database
- massert( 10343, "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
+ }
+ else {
+ // use lenForNewNsFiles, we are making a new database
+ massert( 10343, "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
maybeMkdir();
- unsigned long long l = lenForNewNsFiles;
+ unsigned long long l = lenForNewNsFiles;
if( f.create(pathString, l, true) ) {
len = l;
assert( len == lenForNewNsFiles );
p = f.getView();
}
- }
+ }
if ( p == 0 ) {
/** TODO: this shouldn't terminate? */
@@ -199,7 +199,7 @@ namespace mongo {
if( checkNsFilesOnLoad )
ht->iterAll(namespaceOnLoadCallback);
}
-
+
static void namespaceGetNamespacesCallback( const Namespace& k , NamespaceDetails& v , void * extra ) {
list<string> * l = (list<string>*)extra;
if ( ! k.hasDollarSign() )
@@ -208,14 +208,14 @@ namespace mongo {
void NamespaceIndex::getNamespaces( list<string>& tofill , bool onlyCollections ) const {
assert( onlyCollections ); // TODO: need to implement this
// need boost::bind or something to make this less ugly
-
+
if ( ht )
ht->iterAll( namespaceGetNamespacesCallback , (void*)&tofill );
}
void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
getDur().assertReading(this);
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
{
Record *r = (Record *) getDur().writingPtr(d, sizeof(Record));
@@ -236,12 +236,14 @@ namespace mongo {
;
i.drec()->nextDeleted.writing() = dloc;
}
- } else {
+ }
+ else {
d->nextDeleted = cappedFirstDeletedInCurExtent();
getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = dloc;
// always compact() after this so order doesn't matter
}
- } else {
+ }
+ else {
int b = bucket(d->lengthWithHeaders);
DiskLoc& list = deletedList[b];
DiskLoc oldHead = list;
@@ -272,14 +274,14 @@ namespace mongo {
if ( capped == 0 ) {
if ( left < 24 || left < (lenToAlloc >> 3) ) {
// you get the whole thing.
- //DataFileMgr::grow(loc, regionlen);
+ //DataFileMgr::grow(loc, regionlen);
return loc;
}
}
/* split off some for further use. */
getDur().writingInt(r->lengthWithHeaders) = lenToAlloc;
- //DataFileMgr::grow(loc, lenToAlloc);
+ //DataFileMgr::grow(loc, lenToAlloc);
DiskLoc newDelLoc = loc;
newDelLoc.inc(lenToAlloc);
DeletedRecord *newDel = DataFileMgr::makeDeletedRecord(newDelLoc, left);
@@ -311,7 +313,7 @@ namespace mongo {
int a = cur.a();
if ( a < -1 || a >= 100000 ) {
problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
- " a:" << a << " b:" << b << " chain:" << chain << '\n';
+ " a:" << a << " b:" << b << " chain:" << chain << '\n';
sayDbContext();
if ( cur == *prev )
prev->Null();
@@ -347,7 +349,7 @@ namespace mongo {
cur.Null();
}
else {
- /*this defensive check only made sense for the mmap storage engine:
+ /*this defensive check only made sense for the mmap storage engine:
if ( r->nextDeleted.getOfs() == 0 ) {
problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
" b:" << b << " chain:" << chain << ", fixing.\n";
@@ -417,7 +419,7 @@ namespace mongo {
out() << '\n';
out() << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->nsDiagnostic.toString() << '\n';
out() << " fr: " << e.ext()->firstRecord.toString() <<
- " lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
+ " lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
}
assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
}
@@ -451,7 +453,7 @@ namespace mongo {
assert( i >= 0 && i <= 1 );
Namespace n(ns);
Namespace extra(n.extraName(i).c_str()); // throws userexception if ns name too long
-
+
massert( 10350 , "allocExtra: base ns missing?", d );
massert( 10351 , "allocExtra: extra already exists", ht->get(extra) == 0 );
@@ -471,7 +473,7 @@ namespace mongo {
*getDur().writing(&extraOffset) = ofs;
assert( extra() == e );
}
- else {
+ else {
Extra *hd = extra();
assert( hd->next(this) == 0 );
hd->setNext(ofs);
@@ -485,7 +487,7 @@ namespace mongo {
try {
id = &idx(nIndexes,true);
}
- catch(DBException&) {
+ catch(DBException&) {
allocExtra(thisns, nIndexes);
id = &idx(nIndexes,false);
}
@@ -497,7 +499,7 @@ namespace mongo {
}
// must be called when renaming a NS to fix up extra
- void NamespaceDetails::copyingFrom(const char *thisns, NamespaceDetails *src) {
+ void NamespaceDetails::copyingFrom(const char *thisns, NamespaceDetails *src) {
extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
Extra *se = src->extra();
int n = NIndexesBase;
@@ -511,7 +513,7 @@ namespace mongo {
Extra *nxt = allocExtra(thisns, n);
e->setNext( nxt->ofsFrom(this) );
e = nxt;
- }
+ }
assert( extraOffset );
}
}
@@ -530,30 +532,30 @@ namespace mongo {
}*/
return -1;
}
-
+
long long NamespaceDetails::storageSize( int * numExtents , BSONArrayBuilder * extentInfo ) const {
Extent * e = firstExtent.ext();
assert( e );
-
+
long long total = 0;
int n = 0;
- while ( e ){
+ while ( e ) {
total += e->length;
n++;
-
- if ( extentInfo ){
+
+ if ( extentInfo ) {
extentInfo->append( BSON( "len" << e->length << "loc: " << e->myLoc.toBSONObj() ) );
}
-
+
e = e->getNextExtent();
}
-
+
if ( numExtents )
*numExtents = n;
-
+
return total;
}
-
+
NamespaceDetails *NamespaceDetails::writingWithExtra() {
vector< pair< long long, unsigned > > writeRanges;
writeRanges.push_back( make_pair( 0, sizeof( NamespaceDetails ) ) );
@@ -562,7 +564,7 @@ namespace mongo {
}
return reinterpret_cast< NamespaceDetails* >( getDur().writingRangesAtOffsets( this, writeRanges ) );
}
-
+
/* ------------------------------------------------------------------------- */
mongo::mutex NamespaceDetailsTransient::_qcMutex("qc");
@@ -576,14 +578,14 @@ namespace mongo {
_keysComputed = false;
_indexSpecs.clear();
}
-
-/* NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
- shared_ptr< NamespaceDetailsTransient > &t = map_[ ns ];
- if ( t.get() == 0 )
- t.reset( new NamespaceDetailsTransient(ns) );
- return *t;
- }
-*/
+
+ /* NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
+ shared_ptr< NamespaceDetailsTransient > &t = map_[ ns ];
+ if ( t.get() == 0 )
+ t.reset( new NamespaceDetailsTransient(ns) );
+ return *t;
+ }
+ */
void NamespaceDetailsTransient::clearForPrefix(const char *prefix) {
assertInWriteLock();
vector< string > found;
@@ -594,7 +596,7 @@ namespace mongo {
_map[ *i ].reset();
}
}
-
+
void NamespaceDetailsTransient::computeIndexKeys() {
_keysComputed = true;
_indexKeys.clear();
@@ -636,92 +638,92 @@ namespace mongo {
void renameNamespace( const char *from, const char *to ) {
NamespaceIndex *ni = nsindex( from );
- assert( ni );
+ assert( ni );
assert( ni->details( from ) );
assert( ! ni->details( to ) );
-
- // Our namespace and index details will move to a different
- // memory location. The only references to namespace and
- // index details across commands are in cursors and nsd
- // transient (including query cache) so clear these.
- ClientCursor::invalidate( from );
- NamespaceDetailsTransient::clearForPrefix( from );
-
- NamespaceDetails *details = ni->details( from );
- ni->add_ns( to, *details );
+
+ // Our namespace and index details will move to a different
+ // memory location. The only references to namespace and
+ // index details across commands are in cursors and nsd
+ // transient (including query cache) so clear these.
+ ClientCursor::invalidate( from );
+ NamespaceDetailsTransient::clearForPrefix( from );
+
+ NamespaceDetails *details = ni->details( from );
+ ni->add_ns( to, *details );
NamespaceDetails *todetails = ni->details( to );
- try {
+ try {
todetails->copyingFrom(to, details); // fixes extraOffset
}
- catch( DBException& ) {
+ catch( DBException& ) {
// could end up here if .ns is full - if so try to clean up / roll back a little
ni->kill_ns(to);
throw;
}
- ni->kill_ns( from );
- details = todetails;
-
- BSONObj oldSpec;
- char database[MaxDatabaseNameLen];
- nsToDatabase(from, database);
- string s = database;
- s += ".system.namespaces";
- assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
-
- BSONObjBuilder newSpecB;
- BSONObjIterator i( oldSpec.getObjectField( "options" ) );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "create" ) != 0 )
- newSpecB.append( e );
- else
- newSpecB << "create" << to;
- }
- BSONObj newSpec = newSpecB.done();
- addNewNamespaceToCatalog( to, newSpec.isEmpty() ? 0 : &newSpec );
-
- deleteObjects( s.c_str(), BSON( "name" << from ), false, false, true );
- // oldSpec variable no longer valid memory
-
- BSONObj oldIndexSpec;
- s = database;
- s += ".system.indexes";
- while( Helpers::findOne( s.c_str(), BSON( "ns" << from ), oldIndexSpec ) ) {
- BSONObjBuilder newIndexSpecB;
- BSONObjIterator i( oldIndexSpec );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "ns" ) != 0 )
- newIndexSpecB.append( e );
- else
- newIndexSpecB << "ns" << to;
- }
- BSONObj newIndexSpec = newIndexSpecB.done();
- DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, BSONElement(), false );
- int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
- IndexDetails &indexDetails = details->idx(indexI);
- string oldIndexNs = indexDetails.indexNamespace();
- indexDetails.info = newIndexSpecLoc;
- string newIndexNs = indexDetails.indexNamespace();
-
- BtreeBucket::renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
- deleteObjects( s.c_str(), oldIndexSpec.getOwned(), true, false, true );
- }
- }
-
- bool legalClientSystemNS( const string& ns , bool write ){
+ ni->kill_ns( from );
+ details = todetails;
+
+ BSONObj oldSpec;
+ char database[MaxDatabaseNameLen];
+ nsToDatabase(from, database);
+ string s = database;
+ s += ".system.namespaces";
+ assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
+
+ BSONObjBuilder newSpecB;
+ BSONObjIterator i( oldSpec.getObjectField( "options" ) );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "create" ) != 0 )
+ newSpecB.append( e );
+ else
+ newSpecB << "create" << to;
+ }
+ BSONObj newSpec = newSpecB.done();
+ addNewNamespaceToCatalog( to, newSpec.isEmpty() ? 0 : &newSpec );
+
+ deleteObjects( s.c_str(), BSON( "name" << from ), false, false, true );
+ // oldSpec variable no longer valid memory
+
+ BSONObj oldIndexSpec;
+ s = database;
+ s += ".system.indexes";
+ while( Helpers::findOne( s.c_str(), BSON( "ns" << from ), oldIndexSpec ) ) {
+ BSONObjBuilder newIndexSpecB;
+ BSONObjIterator i( oldIndexSpec );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "ns" ) != 0 )
+ newIndexSpecB.append( e );
+ else
+ newIndexSpecB << "ns" << to;
+ }
+ BSONObj newIndexSpec = newIndexSpecB.done();
+ DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, BSONElement(), false );
+ int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
+ IndexDetails &indexDetails = details->idx(indexI);
+ string oldIndexNs = indexDetails.indexNamespace();
+ indexDetails.info = newIndexSpecLoc;
+ string newIndexNs = indexDetails.indexNamespace();
+
+ BtreeBucket::renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
+ deleteObjects( s.c_str(), oldIndexSpec.getOwned(), true, false, true );
+ }
+ }
+
+ bool legalClientSystemNS( const string& ns , bool write ) {
if( ns == "local.system.replset" ) return true;
if ( ns.find( ".system.users" ) != string::npos )
return true;
- if ( ns.find( ".system.js" ) != string::npos ){
+ if ( ns.find( ".system.js" ) != string::npos ) {
if ( write )
Scope::storedFuncMod();
return true;
}
-
+
return false;
}
-
+
} // namespace mongo
diff --git a/db/namespace.h b/db/namespace.h
index 3c5c649980e..33c5ad2d12c 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -27,14 +27,14 @@
namespace mongo {
- /* in the mongo source code, "client" means "database". */
+ /* in the mongo source code, "client" means "database". */
const int MaxDatabaseNameLen = 256; // max str len for the db name, including null char
- /* e.g.
- NamespaceString ns("acme.orders");
- cout << ns.coll; // "orders"
- */
+ /* e.g.
+ NamespaceString ns("acme.orders");
+ cout << ns.coll; // "orders"
+ */
class NamespaceString {
public:
string db;
@@ -45,7 +45,7 @@ namespace mongo {
string ns() const { return db + '.' + coll; }
bool isSystem() const { return strncmp(coll.c_str(), "system.", 7) == 0; }
private:
- void init(const char *ns) {
+ void init(const char *ns) {
const char *p = strchr(ns, '.');
if( p == 0 ) return;
db = string(ns, p - ns);
@@ -54,7 +54,7 @@ namespace mongo {
};
#pragma pack(1)
- /* This helper class is used to make the HashMap below in NamespaceDetails e.g. see line:
+ /* This helper class is used to make the HashMap below in NamespaceDetails e.g. see line:
HashTable<Namespace,NamespaceDetails> *ht;
*/
class Namespace {
@@ -70,14 +70,14 @@ namespace mongo {
string toString() const { return (string) buf; }
operator string() const { return (string) buf; }
- /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
- (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
+ (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
file support. */
string extraName(int i) const;
bool isExtra() const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
/** ( foo.bar ).getSisterNS( "blah" ) == foo.blah
- perhaps this should move to the NamespaceString helper?
+ perhaps this should move to the NamespaceString helper?
*/
string getSisterNS( const char * local ) const;
@@ -94,7 +94,7 @@ namespace mongo {
namespace mongo {
/** @return true if a client can modify this namespace
- things like *.system.users
+ things like *.system.users
@param write used when .system.js
*/
bool legalClientSystemNS( const string& ns , bool write );
@@ -108,7 +108,7 @@ namespace mongo {
extern int bucketSizes[];
#pragma pack(1)
- /* NamespaceDetails : this is the "header" for a collection that has all its details.
+ /* NamespaceDetails : this is the "header" for a collection that has all its details.
It's in the .ns file and this is a memory mapped region (thus the pack pragma above).
*/
class NamespaceDetails {
@@ -118,11 +118,11 @@ namespace mongo {
/*-------- data fields, as present on disk : */
DiskLoc firstExtent;
DiskLoc lastExtent;
- /* NOTE: capped collections v1 override the meaning of deletedList.
+ /* NOTE: capped collections v1 override the meaning of deletedList.
deletedList[0] points to a list of free records (DeletedRecord's) for all extents in
the capped namespace.
- deletedList[1] points to the last record in the prev extent. When the "current extent"
- changes, this value is updated. !deletedList[1].isValid() when this value is not
+ deletedList[1] points to the last record in the prev extent. When the "current extent"
+ changes, this value is updated. !deletedList[1].isValid() when this value is not
yet computed.
*/
DiskLoc deletedList[Buckets];
@@ -140,14 +140,14 @@ namespace mongo {
public:
// ofs 352 (16 byte aligned)
int capped;
- int max; // max # of objects for a capped table. TODO: should this be 64 bit?
+ int max; // max # of objects for a capped table. TODO: should this be 64 bit?
double paddingFactor; // 1.0 = no padding.
// ofs 386 (16)
int flags;
DiskLoc capExtent;
DiskLoc capFirstNewRecord;
- unsigned short dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
- unsigned short indexFileVersion;
+ unsigned short dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short indexFileVersion;
unsigned long long multiKeyIndexBits;
private:
// ofs 400 (16)
@@ -157,7 +157,7 @@ namespace mongo {
int backgroundIndexBuildInProgress; // 1 if in prog
unsigned reservedB;
// ofs 424 (8)
- struct Capped2 {
+ struct Capped2 {
unsigned long long cc2_ptr; // see capped.cpp
unsigned fileNumber;
} capped2;
@@ -166,32 +166,32 @@ namespace mongo {
explicit NamespaceDetails( const DiskLoc &loc, bool _capped );
- class Extra {
+ class Extra {
long long _next;
- public:
+ public:
IndexDetails details[NIndexesExtra];
- private:
+ private:
unsigned reserved2;
unsigned reserved3;
- Extra(const Extra&) { assert(false); }
- Extra& operator=(const Extra& r) { assert(false); return *this; }
+ Extra(const Extra&) { assert(false); }
+ Extra& operator=(const Extra& r) { assert(false); return *this; }
public:
Extra() { }
- long ofsFrom(NamespaceDetails *d) {
+ long ofsFrom(NamespaceDetails *d) {
return ((char *) this) - ((char *) d);
}
void init() { memset(this, 0, sizeof(Extra)); }
- Extra* next(NamespaceDetails *d) {
+ Extra* next(NamespaceDetails *d) {
if( _next == 0 ) return 0;
return (Extra*) (((char *) d) + _next);
}
void setNext(long ofs) { *getDur().writing(&_next) = ofs; }
- void copy(NamespaceDetails *d, const Extra& e) {
+ void copy(NamespaceDetails *d, const Extra& e) {
memcpy(this, &e, sizeof(Extra));
_next = 0;
}
};
- Extra* extra() {
+ Extra* extra() {
if( extraOffset == 0 ) return 0;
return (Extra *) (((char *) this) + extraOffset);
}
@@ -228,17 +228,17 @@ namespace mongo {
* collection. The collection cannot be completely emptied using this
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
- */
+ */
void cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive);
/** Remove all documents from the capped collection */
void emptyCappedCollection(const char *ns);
-
- /* when a background index build is in progress, we don't count the index in nIndexes until
+
+ /* when a background index build is in progress, we don't count the index in nIndexes until
complete, yet need to still use it in _indexRecord() - thus we use this function for that.
*/
int nIndexesBeingBuilt() const { return nIndexes + backgroundIndexBuildInProgress; }
- /* NOTE: be careful with flags. are we manipulating them in read locks? if so,
+ /* NOTE: be careful with flags. are we manipulating them in read locks? if so,
this isn't thread safe. TODO
*/
enum NamespaceFlags {
@@ -248,12 +248,12 @@ namespace mongo {
IndexDetails& idx(int idxNo, bool missingExpected = false );
/** get the IndexDetails for the index currently being built in the background. (there is at most one) */
- IndexDetails& backgroundIdx() {
+ IndexDetails& backgroundIdx() {
DEV assert(backgroundIndexBuildInProgress);
return idx(nIndexes);
}
- class IndexIterator {
+ class IndexIterator {
public:
int pos() { return i; } // note this is the next one to come
bool more() { return i < n; }
@@ -275,13 +275,13 @@ namespace mongo {
for these, we have to do some dedup work on queries.
*/
bool isMultikey(int i) const { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
- void setIndexIsMultikey(int i) {
+ void setIndexIsMultikey(int i) {
dassert( i < NIndexesMax );
unsigned long long x = ((unsigned long long) 1) << i;
if( multiKeyIndexBits & x ) return;
*getDur().writing(&multiKeyIndexBits) |= x;
}
- void clearIndexIsMultikey(int i) {
+ void clearIndexIsMultikey(int i) {
dassert( i < NIndexesMax );
unsigned long long x = ((unsigned long long) 1) << i;
if( (multiKeyIndexBits & x) == 0 ) return;
@@ -289,7 +289,7 @@ namespace mongo {
}
/* add a new index. does not add to system.indexes etc. - just to NamespaceDetails.
- caller must populate returned object.
+ caller must populate returned object.
*/
IndexDetails& addIndex(const char *thisns, bool resetTransient=true);
@@ -314,22 +314,22 @@ namespace mongo {
// @return offset in indexes[]
int findIndexByKeyPattern(const BSONObj& keyPattern);
-
+
void findIndexByType( const string& name , vector<int>& matches ) {
IndexIterator i = ii();
- while ( i.more() ){
+ while ( i.more() ) {
if ( i.next().getSpec().getTypeName() == name )
matches.push_back( i.pos() - 1 );
}
}
- /* @return -1 = not found
+ /* @return -1 = not found
generally id is first index, so not that expensive an operation (assuming present).
*/
int findIdIndex() {
IndexIterator i = ii();
while( i.more() ) {
- if( i.next().isIdIndex() )
+ if( i.next().isIdIndex() )
return i.pos()-1;
}
return -1;
@@ -354,18 +354,18 @@ namespace mongo {
DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
long long storageSize( int * numExtents = 0 , BSONArrayBuilder * extentInfo = 0 ) const;
- int averageObjectSize(){
+ int averageObjectSize() {
if ( stats.nrecords == 0 )
return 5;
return (int) (stats.datasize / stats.nrecords);
}
-
+
NamespaceDetails *writingWithoutExtra() {
return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) );
}
/** Make all linked Extra objects writeable as well */
NamespaceDetails *writingWithExtra();
-
+
private:
DiskLoc _alloc(const char *ns, int len);
void maybeComplain( const char *ns, int len ) const;
@@ -383,8 +383,8 @@ namespace mongo {
void cappedTruncateLastDelUpdate();
BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 );
BOOST_STATIC_ASSERT( NIndexesMax <= 64 ); // multiKey bits
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::ExtraOld) == 496 );
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) == 496 );
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::ExtraOld) == 496 );
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) == 496 );
}; // NamespaceDetails
#pragma pack()
@@ -401,7 +401,7 @@ namespace mongo {
todo: cleanup code, need abstractions and separation
*/
class NamespaceDetailsTransient : boost::noncopyable {
- BOOST_STATIC_ASSERT( sizeof(NamespaceDetails) == 496 );
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails) == 496 );
/* general ------------------------------------------------------------- */
private:
@@ -409,18 +409,18 @@ namespace mongo {
void reset();
static std::map< string, shared_ptr< NamespaceDetailsTransient > > _map;
public:
- NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount(){ }
+ NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount() { }
/* _get() is not threadsafe -- see get_inlock() comments */
static NamespaceDetailsTransient& _get(const char *ns);
/* use get_w() when doing write operations */
- static NamespaceDetailsTransient& get_w(const char *ns) {
+ static NamespaceDetailsTransient& get_w(const char *ns) {
DEV assertInWriteLock();
return _get(ns);
}
void addedIndex() { reset(); }
void deletedIndex() { reset(); }
/* Drop cached information on all namespaces beginning with the specified prefix.
- Can be useful as index namespaces share the same start as the regular collection.
+ Can be useful as index namespaces share the same start as the regular collection.
SLOW - sequential scan of all NamespaceDetailsTransient objects */
static void clearForPrefix(const char *prefix);
@@ -446,11 +446,11 @@ namespace mongo {
map<const IndexDetails*,IndexSpec> _indexSpecs;
static mongo::mutex _isMutex;
public:
- const IndexSpec& getIndexSpec( const IndexDetails * details ){
+ const IndexSpec& getIndexSpec( const IndexDetails * details ) {
IndexSpec& spec = _indexSpecs[details];
- if ( ! spec._finishedInit ){
+ if ( ! spec._finishedInit ) {
scoped_lock lk(_isMutex);
- if ( ! spec._finishedInit ){
+ if ( ! spec._finishedInit ) {
spec.reset( details );
assert( spec._finishedInit );
}
@@ -506,7 +506,7 @@ namespace mongo {
public:
NamespaceIndex(const string &dir, const string &database) :
- ht( 0 ), dir_( dir ), database_( database ) {}
+ ht( 0 ), dir_( dir ), database_( database ) {}
/* returns true if new db will be created if we init lazily */
bool exists() const;
@@ -515,13 +515,13 @@ namespace mongo {
void add_ns(const char *ns, DiskLoc& loc, bool capped) {
NamespaceDetails details( loc, capped );
- add_ns( ns, details );
+ add_ns( ns, details );
}
- void add_ns( const char *ns, const NamespaceDetails &details ) {
+ void add_ns( const char *ns, const NamespaceDetails &details ) {
init();
Namespace n(ns);
uassert( 10081 , "too many namespaces/collections", ht->put(n, details));
- }
+ }
/* just for diagnostics */
/*size_t detailsOffset(NamespaceDetails *d) {
@@ -563,7 +563,7 @@ namespace mongo {
private:
void maybeMkdir() const;
-
+
MongoMMF f;
HashTable<Namespace,NamespaceDetails> *ht;
string dir_;
@@ -577,7 +577,7 @@ namespace mongo {
// (Arguments should include db name)
void renameNamespace( const char *from, const char *to );
- // "database.a.b.c" -> "database"
+ // "database.a.b.c" -> "database"
inline void nsToDatabase(const char *ns, char *database) {
const char *p = ns;
char *q = database;
diff --git a/db/nonce.cpp b/db/nonce.cpp
index 7e2dc5ab417..5698f73af61 100644
--- a/db/nonce.cpp
+++ b/db/nonce.cpp
@@ -22,18 +22,18 @@
extern int do_md5_test(void);
namespace mongo {
-
+
BOOST_STATIC_ASSERT( sizeof(nonce) == 8 );
- Security::Security() {
- static int n;
- massert( 10352 , "Security is a singleton class", ++n == 1);
- init();
- }
+ Security::Security() {
+ static int n;
+ massert( 10352 , "Security is a singleton class", ++n == 1);
+ init();
+ }
- void Security::init(){
- if( _initialized ) return;
- _initialized = true;
+ void Security::init() {
+ if( _initialized ) return;
+ _initialized = true;
#if defined(__linux__) || defined(__sunos__)
_devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
@@ -43,20 +43,20 @@ namespace mongo {
#else
srandomdev();
#endif
-
+
#ifndef NDEBUG
if ( do_md5_test() )
- massert( 10354 , "md5 unit test fails", false);
+ massert( 10354 , "md5 unit test fails", false);
#endif
}
-
- nonce Security::getNonce(){
+
+ nonce Security::getNonce() {
static mongo::mutex m("getNonce");
scoped_lock lk(m);
- /* question/todo: /dev/random works on OS X. is it better
- to use that than random() / srandom()?
- */
+ /* question/todo: /dev/random works on OS X. is it better
+ to use that than random() / srandom()?
+ */
nonce n;
#if defined(__linux__) || defined(__sunos__)
@@ -73,8 +73,8 @@ namespace mongo {
return n;
}
unsigned getRandomNumber() { return (unsigned) security.getNonce(); }
-
- bool Security::_initialized;
+
+ bool Security::_initialized;
Security security;
-
+
} // namespace mongo
diff --git a/db/nonce.h b/db/nonce.h
index 593931f35cf..21592ab7aa7 100644
--- a/db/nonce.h
+++ b/db/nonce.h
@@ -20,23 +20,23 @@
namespace mongo {
typedef unsigned long long nonce;
-
+
struct Security {
Security();
nonce getNonce();
- /** safe during global var initialization */
- nonce getNonceInitSafe() {
- init();
- return getNonce();
- }
- private:
+ /** safe during global var initialization */
+ nonce getNonceInitSafe() {
+ init();
+ return getNonce();
+ }
+ private:
ifstream *_devrandom;
- static bool _initialized;
- void init(); // can call more than once
+ static bool _initialized;
+ void init(); // can call more than once
};
-
+
extern Security security;
-
+
} // namespace mongo
diff --git a/db/oplog.cpp b/db/oplog.cpp
index 548536d75e5..0c19dc58f21 100644
--- a/db/oplog.cpp
+++ b/db/oplog.cpp
@@ -28,13 +28,13 @@ namespace mongo {
void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt );
- int __findingStartInitialTimeout = 5; // configurable for testing
+ int __findingStartInitialTimeout = 5; // configurable for testing
// cached copies of these...so don't rename them, drop them, etc.!!!
static NamespaceDetails *localOplogMainDetails = 0;
static Database *localDB = 0;
static NamespaceDetails *rsOplogDetails = 0;
- void oplogCheckCloseDatabase( Database * db ){
+ void oplogCheckCloseDatabase( Database * db ) {
localDB = 0;
localOplogMainDetails = 0;
rsOplogDetails = 0;
@@ -45,10 +45,10 @@ namespace mongo {
uassert(13288, "replSet error write op to db before replSet initialized", str::startsWith(ns, "local.") || *opstr == 'n');
}
- /** write an op to the oplog that is already built.
+ /** write an op to the oplog that is already built.
todo : make _logOpRS() call this so we don't repeat ourself?
*/
- void _logOpObjRS(const BSONObj& op) {
+ void _logOpObjRS(const BSONObj& op) {
DEV assertInWriteLock();
const OpTime ts = op["ts"]._opTime();
@@ -83,7 +83,7 @@ namespace mongo {
}
}
- /** given a BSON object, create a new one at dst which is the existing (partial) object
+ /** given a BSON object, create a new one at dst which is the existing (partial) object
with a new object element appended at the end with fieldname "o".
@param partial already build object with everything except the o member. e.g. something like:
@@ -117,8 +117,8 @@ namespace mongo {
DEV assertInWriteLock();
// ^- static is safe as we are in write lock
static BufBuilder bufbuilder(8*1024);
-
- if ( strncmp(ns, "local.", 6) == 0 ){
+
+ if ( strncmp(ns, "local.", 6) == 0 ) {
if ( strncmp(ns, "local.slaves", 12) == 0 )
resetSlaveCache();
return;
@@ -127,7 +127,7 @@ namespace mongo {
const OpTime ts = OpTime::now();
long long hashNew;
- if( theReplSet ) {
+ if( theReplSet ) {
massert(13312, "replSet error : logOp() but not primary?", theReplSet->box.getState().primary());
hashNew = (theReplSet->lastH * 131 + ts.asLL()) * 17 + theReplSet->selfId();
}
@@ -215,9 +215,9 @@ namespace mongo {
static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
DEV assertInWriteLock();
static BufBuilder bufbuilder(8*1024);
-
- if ( strncmp(ns, "local.", 6) == 0 ){
- if ( strncmp(ns, "local.slaves", 12) == 0 ){
+
+ if ( strncmp(ns, "local.", 6) == 0 ) {
+ if ( strncmp(ns, "local.slaves", 12) == 0 ) {
resetSlaveCache();
}
return;
@@ -225,7 +225,7 @@ namespace mongo {
const OpTime ts = OpTime::now();
Client::Context context;
-
+
/* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
instead we do a single copy to the destination position in the memory mapped file.
*/
@@ -256,7 +256,8 @@ namespace mongo {
}
Client::Context ctx( logNS , localDB, false );
r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
- } else {
+ }
+ else {
Client::Context ctx( logNS, dbpath, 0, false );
assert( nsdetails( logNS ) );
// first we allocate the space, then we fill it below.
@@ -275,17 +276,17 @@ namespace mongo {
}
static void (*_logOp)(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) = _logOpOld;
- void newReplUp() {
+ void newReplUp() {
replSettings.master = true;
- _logOp = _logOpRS;
+ _logOp = _logOpRS;
}
- void newRepl() {
+ void newRepl() {
replSettings.master = true;
- _logOp = _logOpUninitialized;
+ _logOp = _logOpUninitialized;
}
void oldRepl() { _logOp = _logOpOld; }
- void logKeepalive() {
+ void logKeepalive() {
_logOp("n", "", 0, BSONObj(), 0, 0);
}
void logOpComment(const BSONObj& obj) {
@@ -306,9 +307,9 @@ namespace mongo {
if ( replSettings.master ) {
_logOp(opstr, ns, 0, obj, patt, b);
}
-
+
logOpForSharding( opstr , ns , obj , patt );
- }
+ }
void createOplog() {
dblock lk;
@@ -320,15 +321,15 @@ namespace mongo {
ns = rsoplog;
Client::Context ctx(ns);
-
+
NamespaceDetails * nsd = nsdetails( ns );
if ( nsd ) {
-
- if ( cmdLine.oplogSize != 0 ){
+
+ if ( cmdLine.oplogSize != 0 ) {
int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) );
int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) );
- if ( n != o ){
+ if ( n != o ) {
stringstream ss;
ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog";
log() << ss.str() << endl;
@@ -345,19 +346,19 @@ namespace mongo {
}
return;
}
-
+
/* create an oplog collection, if it doesn't yet exist. */
BSONObjBuilder b;
double sz;
if ( cmdLine.oplogSize != 0 )
sz = (double)cmdLine.oplogSize;
else {
- /* not specified. pick a default size */
+ /* not specified. pick a default size */
sz = 50.0 * 1000 * 1000;
if ( sizeof(int *) >= 8 ) {
#if defined(__APPLE__)
- // typically these are desktops (dev machines), so keep it smallish
- sz = (256-64) * 1000 * 1000;
+ // typically these are desktops (dev machines), so keep it smallish
+ sz = (256-64) * 1000 * 1000;
#else
sz = 990.0 * 1000 * 1000;
boost::intmax_t free = freeSpace(); //-1 if call not supported.
@@ -408,7 +409,7 @@ namespace mongo {
DEV assert( !dbMutex.isWriteLocked() );
Client *c = currentClient.get();
- if( c == 0 ) {
+ if( c == 0 ) {
Client::initThread("pretouchN");
c = &cc();
}
@@ -426,7 +427,7 @@ namespace mongo {
continue;
/* todo : other operations */
- try {
+ try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if( o.getObjectID(_id) ) {
@@ -439,7 +440,7 @@ namespace mongo {
_dummy_z += result.objsize(); // touch
}
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' ' << e.toString() << endl;
}
}
@@ -460,7 +461,7 @@ namespace mongo {
return;
/* todo : other operations */
- try {
+ try {
BSONObj o = op.getObjectField(which);
BSONElement _id;
if( o.getObjectID(_id) ) {
@@ -474,17 +475,17 @@ namespace mongo {
_dummy_z += result.objsize(); // touch
}
}
- catch( DBException& ) {
+ catch( DBException& ) {
log() << "ignoring assertion in pretouchOperation()" << endl;
}
}
- void applyOperation_inlock(const BSONObj& op , bool fromRepl ){
+ void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
- if( logLevel >= 6 )
+ if( logLevel >= 6 )
log() << "applying op: " << op << endl;
-
+
assertInWriteLock();
OpDebug debug;
@@ -516,11 +517,11 @@ namespace mongo {
else {
BSONObjBuilder b;
b.append(_id);
-
+
/* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
- /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
+ /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
then. very few upserts will not be inserts...
*/
updateObjects(ns, o, b.done(), true, false, false , debug );
@@ -529,7 +530,7 @@ namespace mongo {
}
else if ( *opType == 'u' ) {
opCounters->gotUpdate();
-
+
RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug );
}
@@ -544,7 +545,7 @@ namespace mongo {
else if ( *opType == 'n' ) {
// no op
}
- else if ( *opType == 'c' ){
+ else if ( *opType == 'c' ) {
opCounters->gotCommand();
BufBuilder bb;
@@ -556,9 +557,9 @@ namespace mongo {
ss << "unknown opType [" << opType << "]";
throw MsgAssertionException( 13141 , ss.str() );
}
-
+
}
-
+
class ApplyOpsCmd : public Command {
public:
virtual bool slaveOk() const { return false; }
@@ -568,17 +569,18 @@ namespace mongo {
help << "examples: { applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
}
virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
-
- if ( cmdObj.firstElement().type() != Array ){
+
+ if ( cmdObj.firstElement().type() != Array ) {
errmsg = "ops has to be an array";
return false;
}
-
+
BSONObj ops = cmdObj.firstElement().Obj();
-
- { // check input
+
+ {
+ // check input
BSONObjIterator i( ops );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( e.type() == Object )
continue;
@@ -587,16 +589,16 @@ namespace mongo {
return false;
}
}
-
- if ( cmdObj["preCondition"].type() == Array ){
+
+ if ( cmdObj["preCondition"].type() == Array ) {
BSONObjIterator i( cmdObj["preCondition"].Obj() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONObj f = i.next().Obj();
-
+
BSONObj realres = db.findOne( f["ns"].String() , f["q"].Obj() );
-
+
Matcher m( f["res"].Obj() );
- if ( ! m.matches( realres ) ){
+ if ( ! m.matches( realres ) ) {
result.append( "got" , realres );
result.append( "whatFailed" , f );
errmsg = "pre-condition failed";
@@ -604,24 +606,24 @@ namespace mongo {
}
}
}
-
+
// apply
int num = 0;
BSONObjIterator i( ops );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
applyOperation_inlock( e.Obj() , false );
num++;
}
-
+
result.append( "applied" , num );
- if ( ! fromRepl ){
+ if ( ! fromRepl ) {
// We want this applied atomically on slaves
// so we re-wrap without the pre-condition for speed
-
+
string tempNS = str::stream() << dbname << ".$cmd";
-
+
logOp( "c" , tempNS.c_str() , cmdObj.firstElement().wrap() );
}
@@ -629,7 +631,7 @@ namespace mongo {
}
DBDirectClient db;
-
+
} applyOpsCmd;
}
diff --git a/db/oplog.h b/db/oplog.h
index a5d27fcb0e4..c7e7e901300 100644
--- a/db/oplog.h
+++ b/db/oplog.h
@@ -16,7 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
+/*
local.oplog.$main is the default
*/
@@ -39,7 +39,7 @@ namespace mongo {
void _logOpObjRS(const BSONObj& op);
/** Write operation to the log (local.oplog.$main)
-
+
@param opstr
"i" insert
"u" update
@@ -48,30 +48,30 @@ namespace mongo {
"n" no-op
"db" declares presence of a database (ns is set to the db name + '.')
- See _logOp() in oplog.cpp for more details.
+ See _logOp() in oplog.cpp for more details.
*/
void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt = 0, bool *b = 0);
void logKeepalive();
- /** puts obj in the oplog as a comment (a no-op). Just for diags.
- convention is
+ /** puts obj in the oplog as a comment (a no-op). Just for diags.
+ convention is
{ msg : "text", ... }
*/
void logOpComment(const BSONObj& obj);
void oplogCheckCloseDatabase( Database * db );
-
- extern int __findingStartInitialTimeout; // configurable for testing
+
+ extern int __findingStartInitialTimeout; // configurable for testing
class FindingStartCursor {
public:
- FindingStartCursor( const QueryPlan & qp ) :
- _qp( qp ),
- _findingStart( true ),
- _findingStartMode(),
- _findingStartTimer( 0 ),
- _findingStartCursor( 0 )
+ FindingStartCursor( const QueryPlan & qp ) :
+ _qp( qp ),
+ _findingStart( true ),
+ _findingStartMode(),
+ _findingStartTimer( 0 ),
+ _findingStartCursor( 0 )
{ init(); }
bool done() const { return !_findingStart; }
shared_ptr<Cursor> cRelease() { return _c; }
@@ -83,54 +83,54 @@ namespace mongo {
return;
}
switch( _findingStartMode ) {
- case Initial: {
- if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStart = false; // found first record out of query range, so scan normally
- _c = _qp.newCursor( _findingStartCursor->currLoc() );
- destroyClientCursor();
- return;
- }
- _findingStartCursor->advance();
- RARELY {
- if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
- createClientCursor( startLoc( _findingStartCursor->currLoc() ) );
- _findingStartMode = FindExtent;
- return;
- }
- }
+ case Initial: {
+ if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
+ _findingStart = false; // found first record out of query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
return;
}
- case FindExtent: {
- if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStartMode = InExtent;
- return;
- }
- DiskLoc prev = prevLoc( _findingStartCursor->currLoc() );
- if ( prev.isNull() ) { // hit beginning, so start scanning from here
- createClientCursor();
- _findingStartMode = InExtent;
+ _findingStartCursor->advance();
+ RARELY {
+ if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
+ createClientCursor( startLoc( _findingStartCursor->currLoc() ) );
+ _findingStartMode = FindExtent;
return;
}
- // There might be a more efficient implementation than creating new cursor & client cursor each time,
- // not worrying about that for now
- createClientCursor( prev );
+ }
+ return;
+ }
+ case FindExtent: {
+ if ( !_matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
+ _findingStartMode = InExtent;
return;
}
- case InExtent: {
- if ( _matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
- _findingStart = false; // found first record in query range, so scan normally
- _c = _qp.newCursor( _findingStartCursor->currLoc() );
- destroyClientCursor();
- return;
- }
- _findingStartCursor->advance();
+ DiskLoc prev = prevLoc( _findingStartCursor->currLoc() );
+ if ( prev.isNull() ) { // hit beginning, so start scanning from here
+ createClientCursor();
+ _findingStartMode = InExtent;
return;
}
- default: {
- massert( 12600, "invalid _findingStartMode", false );
+ // There might be a more efficient implementation than creating new cursor & client cursor each time,
+ // not worrying about that for now
+ createClientCursor( prev );
+ return;
+ }
+ case InExtent: {
+ if ( _matcher->matches( _findingStartCursor->currKey(), _findingStartCursor->currLoc() ) ) {
+ _findingStart = false; // found first record in query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
+ return;
}
- }
- }
+ _findingStartCursor->advance();
+ return;
+ }
+ default: {
+ massert( 12600, "invalid _findingStartMode", false );
+ }
+ }
+ }
bool prepareToYield() {
if ( _findingStartCursor ) {
return _findingStartCursor->prepareToYield( _yieldData );
@@ -143,7 +143,7 @@ namespace mongo {
_findingStartCursor = 0;
}
}
- }
+ }
private:
enum FindingStartMode { Initial, FindExtent, InExtent };
const QueryPlan &_qp;
@@ -163,7 +163,7 @@ namespace mongo {
// doesn't matter if we start the extent scan with capFirstNewRecord.
return _qp.nsd()->capFirstNewRecord;
}
-
+
// should never have an empty extent in the oplog, so don't worry about that case
DiskLoc prevLoc( const DiskLoc &rec ) {
Extent *e = rec.rec()->myExtent( rec );
@@ -174,7 +174,8 @@ namespace mongo {
e = e->xprev.ext();
if ( e->myLoc != _qp.nsd()->capExtent )
return e->firstRecord;
- } else {
+ }
+ else {
if ( !e->xprev.isNull() ) {
e = e->xprev.ext();
return e->firstRecord;
diff --git a/db/oplogreader.h b/db/oplogreader.h
index 204c1c46360..54c90d908b2 100644
--- a/db/oplogreader.h
+++ b/db/oplogreader.h
@@ -8,7 +8,7 @@
namespace mongo {
- /* started abstracting out the querying of the primary/master's oplog
+ /* started abstracting out the querying of the primary/master's oplog
still fairly awkward but a start.
*/
class OplogReader {
@@ -16,9 +16,9 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor;
public:
- OplogReader() {
+ OplogReader() {
}
- ~OplogReader() {
+ ~OplogReader() {
}
void resetCursor() {
@@ -29,11 +29,11 @@ namespace mongo {
_conn.reset();
}
DBClientConnection* conn() { return _conn.get(); }
- BSONObj findOne(const char *ns, const Query& q) {
+ BSONObj findOne(const char *ns, const Query& q) {
return conn()->findOne(ns, q, 0, QueryOption_SlaveOk);
}
- BSONObj getLastOp(const char *ns) {
+ BSONObj getLastOp(const char *ns) {
return findOne(ns, Query().sort(reverseNaturalObj));
}
@@ -41,7 +41,7 @@ namespace mongo {
bool connect(string hostname);
void tailCheck() {
- if( cursor.get() && cursor->isDead() ) {
+ if( cursor.get() && cursor->isDead() ) {
log() << "repl: old cursor isDead, will initiate a new one" << endl;
resetCursor();
}
@@ -49,19 +49,19 @@ namespace mongo {
bool haveCursor() { return cursor.get() != 0; }
- void query(const char *ns, const BSONObj& query) {
+ void query(const char *ns, const BSONObj& query) {
assert( !haveCursor() );
cursor = _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk);
}
- void tailingQuery(const char *ns, const BSONObj& query) {
+ void tailingQuery(const char *ns, const BSONObj& query) {
assert( !haveCursor() );
log(2) << "repl: " << ns << ".find(" << query.toString() << ')' << endl;
- cursor = _conn->query( ns, query, 0, 0, 0,
- QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay |
- /* TODO: slaveok maybe shouldn't use? */
- QueryOption_AwaitData
- );
+ cursor = _conn->query( ns, query, 0, 0, 0,
+ QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay |
+ /* TODO: slaveok maybe shouldn't use? */
+ QueryOption_AwaitData
+ );
}
void tailingQueryGTE(const char *ns, OpTime t) {
@@ -72,34 +72,34 @@ namespace mongo {
tailingQuery(ns, query.done());
}
- bool more() {
+ bool more() {
assert( cursor.get() );
return cursor->more();
}
- bool moreInCurrentBatch() {
+ bool moreInCurrentBatch() {
assert( cursor.get() );
return cursor->moreInCurrentBatch();
}
/* old mongod's can't do the await flag... */
- bool awaitCapable() {
+ bool awaitCapable() {
return cursor->hasResultFlag(ResultFlag_AwaitCapable);
}
- void peek(vector<BSONObj>& v, int n) {
+ void peek(vector<BSONObj>& v, int n) {
if( cursor.get() )
cursor->peek(v,n);
}
BSONObj nextSafe() { return cursor->nextSafe(); }
- BSONObj next() {
+ BSONObj next() {
return cursor->next();
}
- void putBack(BSONObj op) {
+ void putBack(BSONObj op) {
cursor->putBack(op);
}
};
-
+
}
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index 591a87db3af..d78a4161a2e 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -46,11 +46,11 @@ namespace mongo {
bool inDBRepair = false;
struct doingRepair {
- doingRepair(){
+ doingRepair() {
assert( ! inDBRepair );
inDBRepair = true;
}
- ~doingRepair(){
+ ~doingRepair() {
inDBRepair = false;
}
};
@@ -63,42 +63,42 @@ namespace mongo {
return dbsInProg[db] != 0;
}
- bool BackgroundOperation::inProgForNs(const char *ns) {
+ bool BackgroundOperation::inProgForNs(const char *ns) {
assertInWriteLock();
return nsInProg.count(ns) != 0;
}
- void BackgroundOperation::assertNoBgOpInProgForDb(const char *db) {
+ void BackgroundOperation::assertNoBgOpInProgForDb(const char *db) {
uassert(12586, "cannot perform operation: a background operation is currently running for this database",
- !inProgForDb(db));
+ !inProgForDb(db));
}
- void BackgroundOperation::assertNoBgOpInProgForNs(const char *ns) {
+ void BackgroundOperation::assertNoBgOpInProgForNs(const char *ns) {
uassert(12587, "cannot perform operation: a background operation is currently running for this collection",
- !inProgForNs(ns));
- }
+ !inProgForNs(ns));
+ }
- BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
+ BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
assertInWriteLock();
dbsInProg[_ns.db]++;
assert( nsInProg.count(_ns.ns()) == 0 );
nsInProg.insert(_ns.ns());
}
- BackgroundOperation::~BackgroundOperation() {
+ BackgroundOperation::~BackgroundOperation() {
assertInWriteLock();
dbsInProg[_ns.db]--;
nsInProg.erase(_ns.ns());
}
void BackgroundOperation::dump(stringstream& ss) {
- if( nsInProg.size() ) {
+ if( nsInProg.size() ) {
ss << "\n<b>Background Jobs in Progress</b>\n";
for( set<string>::iterator i = nsInProg.begin(); i != nsInProg.end(); i++ )
ss << " " << *i << '\n';
}
- for( map<string,unsigned>::iterator i = dbsInProg.begin(); i != dbsInProg.end(); i++ ) {
- if( i->second )
+ for( map<string,unsigned>::iterator i = dbsInProg.begin(); i != dbsInProg.end(); i++ ) {
+ if( i->second )
ss << "database " << i->first << ": " << i->second << '\n';
}
}
@@ -118,18 +118,18 @@ namespace mongo {
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0);
void ensureIdIndexForNewNs(const char *ns) {
if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
- strstr( ns, ".$freelist" ) == 0 ){
+ strstr( ns, ".$freelist" ) == 0 ) {
log( 1 ) << "adding _id index for collection " << ns << endl;
ensureHaveIdIndex( ns );
- }
+ }
}
string getDbContext() {
stringstream ss;
Client * c = currentClient.get();
- if ( c ){
+ if ( c ) {
Client::Context * cx = c->getContext();
- if ( cx ){
+ if ( cx ) {
Database *database = cx->db();
if ( database ) {
ss << database->name << ' ';
@@ -204,7 +204,7 @@ namespace mongo {
size &= 0xffffffffffffff00LL;
}
}
-
+
uassert( 10083 , "invalid size spec", size > 0 );
bool newCapped = false;
@@ -231,9 +231,10 @@ namespace mongo {
// $nExtents is just for testing - always allocate new extents
// rather than reuse existing extents so we have some predictibility
// in the extent size used by our tests
- database->suitableFile( (int) size, false )->createExtent( ns, (int) size, newCapped );
+ database->suitableFile( (int) size, false )->createExtent( ns, (int) size, newCapped );
}
- } else if ( int( e.number() ) > 0 ) {
+ }
+ else if ( int( e.number() ) > 0 ) {
// We create '$nExtents' extents, each of size 'size'.
int nExtents = int( e.number() );
assert( size <= 0x7fffffff );
@@ -244,7 +245,8 @@ namespace mongo {
// in the extent size used by our tests
database->suitableFile( (int) size, false )->createExtent( ns, (int) size, newCapped );
}
- } else {
+ }
+ else {
// This is the non test case, where we don't have a $nExtents spec.
while ( size > 0 ) {
int max = MongoDataFile::maxSize() - DataFileHeader::HeaderSize;
@@ -259,15 +261,16 @@ namespace mongo {
bool ensure = false;
if ( options.getField( "autoIndexId" ).type() ) {
- if ( options["autoIndexId"].trueValue() ){
+ if ( options["autoIndexId"].trueValue() ) {
ensure = true;
}
- } else {
+ }
+ else {
if ( !newCapped ) {
ensure=true;
}
}
- if( ensure ) {
+ if( ensure ) {
if( deferIdIndex )
*deferIdIndex = true;
else
@@ -308,20 +311,22 @@ namespace mongo {
int MongoDataFile::maxSize() {
if ( sizeof( int* ) == 4 ) {
return 512 * 1024 * 1024;
- } else if ( cmdLine.smallfiles ) {
+ }
+ else if ( cmdLine.smallfiles ) {
return 0x7ff00000 >> 2;
- } else {
+ }
+ else {
return 0x7ff00000;
}
}
- void MongoDataFile::badOfs2(int ofs) const {
+ void MongoDataFile::badOfs2(int ofs) const {
stringstream ss;
ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
uasserted(13441, ss.str());
}
- void MongoDataFile::badOfs(int ofs) const {
+ void MongoDataFile::badOfs(int ofs) const {
stringstream ss;
ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
uasserted(13440, ss.str());
@@ -342,11 +347,11 @@ namespace mongo {
size = 1024 * 512 * mult;
log() << "Warning : using small files for _hudsonSmall" << endl;
}
- else if ( cmdLine.smallfiles ){
+ else if ( cmdLine.smallfiles ) {
size = size >> 2;
}
-
-
+
+
return size;
}
@@ -400,18 +405,18 @@ namespace mongo {
size = (int) sz;
}
//header = (DataFileHeader *) _p;
- if( sizeof(char *) == 4 )
+ if( sizeof(char *) == 4 )
uassert( 10084 , "can't map file memory - mongo requires 64 bit build for larger datasets", _mb != 0);
else
uassert( 10085 , "can't map file memory", _mb != 0);
header()->init(fileNo, size);
}
- void MongoDataFile::flush( bool sync ){
+ void MongoDataFile::flush( bool sync ) {
mmf.flush( sync );
}
- void addNewExtentToNamespace(const char *ns, Extent *e, DiskLoc eloc, DiskLoc emptyLoc, bool capped) {
+ void addNewExtentToNamespace(const char *ns, Extent *e, DiskLoc eloc, DiskLoc emptyLoc, bool capped) {
NamespaceIndex *ni = nsindex(ns);
NamespaceDetails *details = ni->details(ns);
if ( details ) {
@@ -466,7 +471,7 @@ namespace mongo {
return e;
}
- Extent* DataFileMgr::allocFromFreeList(const char *ns, int approxSize, bool capped) {
+ Extent* DataFileMgr::allocFromFreeList(const char *ns, int approxSize, bool capped) {
string s = cc().database()->name + ".$freelist";
NamespaceDetails *f = nsdetails(s.c_str());
if( f ) {
@@ -477,7 +482,7 @@ namespace mongo {
if( low > 2048 ) low -= 256;
high = (int) (approxSize * 1.05) + 256;
}
- else {
+ else {
low = (int) (approxSize * 0.8);
high = (int) (approxSize * 1.4);
}
@@ -487,20 +492,20 @@ namespace mongo {
int bestDiff = 0x7fffffff;
{
DiskLoc L = f->firstExtent;
- while( !L.isNull() ) {
+ while( !L.isNull() ) {
Extent * e = L.ext();
- if( e->length >= low && e->length <= high ) {
+ if( e->length >= low && e->length <= high ) {
int diff = abs(e->length - approxSize);
- if( diff < bestDiff ) {
+ if( diff < bestDiff ) {
bestDiff = diff;
best = e;
- if( diff == 0 )
+ if( diff == 0 )
break;
}
}
L = e->xnext;
++n;
-
+
}
}
OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned\n";
@@ -530,10 +535,10 @@ namespace mongo {
/*---------------------------------------------------------------------*/
- DiskLoc Extent::reuse(const char *nsname) {
+ DiskLoc Extent::reuse(const char *nsname) {
return getDur().writing(this)->_reuse(nsname);
}
- DiskLoc Extent::_reuse(const char *nsname) {
+ DiskLoc Extent::_reuse(const char *nsname) {
log(3) << "reset extent was:" << nsDiagnostic.toString() << " now:" << nsname << '\n';
massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
xnext.Null();
@@ -630,7 +635,7 @@ namespace mongo {
}
return maxExtentSize;
}
-
+
/*---------------------------------------------------------------------*/
shared_ptr<Cursor> DataFileMgr::findAll(const char *ns, const DiskLoc &startLoc) {
@@ -660,12 +665,12 @@ namespace mongo {
d->dumpDeleted(&extents);
}
- if ( d->capped )
+ if ( d->capped )
return shared_ptr<Cursor>( new ForwardCappedCursor( d , startLoc ) );
-
+
if ( !startLoc.isNull() )
- return shared_ptr<Cursor>(new BasicCursor( startLoc ));
-
+ return shared_ptr<Cursor>(new BasicCursor( startLoc ));
+
while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
/* todo: if extent is empty, free it for reuse elsewhere.
that is a bit complicated have to clean up the freelists.
@@ -686,37 +691,38 @@ namespace mongo {
if ( el.number() >= 0 )
return DataFileMgr::findAll(ns, startLoc);
-
+
// "reverse natural order"
NamespaceDetails *d = nsdetails(ns);
-
+
if ( !d )
return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
-
+
if ( !d->capped ) {
if ( !startLoc.isNull() )
- return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
+ return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
Extent *e = d->lastExtent.ext();
while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
e = e->getPrevExtent();
}
return shared_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
- } else {
+ }
+ else {
return shared_ptr<Cursor>( new ReverseCappedCursor( d, startLoc ) );
}
}
- void printFreeList() {
+ void printFreeList() {
string s = cc().database()->name + ".$freelist";
log() << "dump freelist " << s << '\n';
NamespaceDetails *freeExtents = nsdetails(s.c_str());
- if( freeExtents == 0 ) {
+ if( freeExtents == 0 ) {
log() << " freeExtents==0" << endl;
return;
}
DiskLoc a = freeExtents->firstExtent;
- while( !a.isNull() ) {
+ while( !a.isNull() ) {
Extent *e = a.ext();
log() << " " << a.toString() << " len:" << e->length << " prev:" << e->xprev.toString() << '\n';
a = e->xnext;
@@ -735,7 +741,7 @@ namespace mongo {
NamespaceString s(nsToDrop);
assert( s.db == cc().database()->name );
if( s.isSystem() ) {
- if( s.coll == "system.profile" )
+ if( s.coll == "system.profile" )
uassert( 10087 , "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
else
uasserted( 12502, "can't drop system ns" );
@@ -746,24 +752,24 @@ namespace mongo {
BSONObj cond = BSON( "name" << nsToDrop ); // { name: "colltodropname" }
string system_namespaces = cc().database()->name + ".system.namespaces";
/*int n = */ deleteObjects(system_namespaces.c_str(), cond, false, false, true);
- // no check of return code as this ns won't exist for some of the new storage engines
+ // no check of return code as this ns won't exist for some of the new storage engines
}
// free extents
if( !d->firstExtent.isNull() ) {
string s = cc().database()->name + ".$freelist";
NamespaceDetails *freeExtents = nsdetails(s.c_str());
- if( freeExtents == 0 ) {
+ if( freeExtents == 0 ) {
string err;
_userCreateNS(s.c_str(), BSONObj(), err, 0);
freeExtents = nsdetails(s.c_str());
massert( 10361 , "can't create .$freelist", freeExtents);
}
- if( freeExtents->firstExtent.isNull() ) {
+ if( freeExtents->firstExtent.isNull() ) {
freeExtents->firstExtent.writing() = d->firstExtent;
freeExtents->lastExtent.writing() = d->lastExtent;
}
- else {
+ else {
DiskLoc a = freeExtents->firstExtent;
assert( a.ext()->xprev.isNull() );
getDur().writingDiskLoc( a.ext()->xprev ) = d->lastExtent;
@@ -787,7 +793,7 @@ namespace mongo {
BackgroundOperation::assertNoBgOpInProgForNs(name.c_str());
if ( d->nIndexes != 0 ) {
- try {
+ try {
assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
}
catch( DBException& e ) {
@@ -802,9 +808,9 @@ namespace mongo {
result.append("ns", name.c_str());
ClientCursor::invalidate(name.c_str());
Top::global.collectionDropped( name );
- dropNS(name);
+ dropNS(name);
}
-
+
int nUnindexes = 0;
/* unindex all keys in index for this record. */
@@ -845,15 +851,14 @@ namespace mongo {
_unindexRecord(d->idx(i), obj, dl, !noWarn);
if( d->backgroundIndexBuildInProgress ) {
// always pass nowarn here, as this one may be missing for valid reasons as we are concurrently building it
- _unindexRecord(d->idx(n), obj, dl, false);
+ _unindexRecord(d->idx(n), obj, dl, false);
}
}
- /* deletes a record, just the pdfile portion -- no index cleanup, no cursor cleanup, etc.
+ /* deletes a record, just the pdfile portion -- no index cleanup, no cursor cleanup, etc.
caller must check if capped
*/
- void DataFileMgr::_deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl)
- {
+ void DataFileMgr::_deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl) {
/* remove ourself from the record next/prev chain */
{
if ( todelete->prevOfs != DiskLoc::NullOfs )
@@ -906,8 +911,7 @@ namespace mongo {
}
}
- void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK, bool noWarn)
- {
+ void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK, bool noWarn) {
dassert( todelete == dl.rec() );
NamespaceDetails* d = nsdetails(ns);
@@ -934,8 +938,7 @@ namespace mongo {
NamespaceDetails *d,
NamespaceDetailsTransient *nsdt,
Record *toupdate, const DiskLoc& dl,
- const char *_buf, int _len, OpDebug& debug, bool god)
- {
+ const char *_buf, int _len, OpDebug& debug, bool god) {
StringBuilder& ss = debug.str;
dassert( toupdate == dl.rec() );
@@ -945,7 +948,7 @@ namespace mongo {
DEV assert( objNew.objdata() == _buf );
if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
- /* add back the old _id value if the update removes it. Note this implementation is slow
+ /* add back the old _id value if the update removes it. Note this implementation is slow
(copies entire object multiple times), but this shouldn't happen often, so going for simple
code, not speed.
*/
@@ -957,7 +960,7 @@ namespace mongo {
objNew = b.obj();
}
- /* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
+ /* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
vector<IndexChanges> changes;
@@ -1002,8 +1005,8 @@ namespace mongo {
try {
/* we did the dupCheck() above. so we don't have to worry about it here. */
idx.head.btree()->bt_insert(
- idx.head,
- dl, *changes[x].added[i], ordering, /*dupsAllowed*/true, idx);
+ idx.head,
+ dl, *changes[x].added[i], ordering, /*dupsAllowed*/true, idx);
}
catch (AssertionException& e) {
ss << " exception update index ";
@@ -1015,7 +1018,7 @@ namespace mongo {
ss << '\n' << keyUpdates << " key updates ";
}
- // update in place
+ // update in place
int sz = objNew.objsize();
memcpy(getDur().writingPtr(toupdate->data, sz), objNew.objdata(), sz);
return dl;
@@ -1027,18 +1030,18 @@ namespace mongo {
int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
int sz = y > x ? y : x;
- if ( sz < lastExtentLen ){
+ if ( sz < lastExtentLen ) {
// this means there was an int overflow
// so we should turn it into maxSize
sz = Extent::maxSize();
}
- else if ( sz > Extent::maxSize() ){
+ else if ( sz > Extent::maxSize() ) {
sz = Extent::maxSize();
}
-
+
sz = ((int)sz) & 0xffffff00;
assert( sz > len );
-
+
return sz;
}
@@ -1051,7 +1054,7 @@ namespace mongo {
Ordering ordering = Ordering::make(order);
int n = 0;
for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
- if( ++n == 2 ) {
+ if( ++n == 2 ) {
d->setIndexIsMultikey(idxNo);
}
assert( !recordLoc.isNull() );
@@ -1060,7 +1063,7 @@ namespace mongo {
*i, ordering, dupsAllowed, idx);
}
catch (AssertionException& e) {
- if( e.getCode() == 10287 && idxNo == d->nIndexes ) {
+ if( e.getCode() == 10287 && idxNo == d->nIndexes ) {
DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
continue;
}
@@ -1073,8 +1076,7 @@ namespace mongo {
}
}
- void testSorting()
- {
+ void testSorting() {
BSONObjBuilder b;
b.appendNull("");
BSONObj x = b.obj();
@@ -1088,9 +1090,9 @@ namespace mongo {
sorter.add(x, DiskLoc(3,77));
sorter.sort();
-
+
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
- while( i->more() ) {
+ while( i->more() ) {
BSONObjExternalSorter::Data d = i->next();
/*cout << d.second.toString() << endl;
cout << d.first.objsize() << endl;
@@ -1112,7 +1114,7 @@ namespace mongo {
BSONObj order = idx.keyPattern();
getDur().writingDiskLoc(idx.head).Null();
-
+
if ( logLevel > 1 ) printMemInfo( "before index start" );
/* get and sort all the keys ----- */
@@ -1136,11 +1138,11 @@ namespace mongo {
sorter.add(*i, loc);
nkeys++;
}
-
+
c->advance();
n++;
pm.hit();
- if ( logLevel > 1 && n % 10000 == 0 ){
+ if ( logLevel > 1 && n % 10000 == 0 ) {
printMemInfo( "\t iterating objects" );
}
@@ -1150,37 +1152,37 @@ namespace mongo {
if ( logLevel > 1 ) printMemInfo( "before final sort" );
sorter.sort();
if ( logLevel > 1 ) printMemInfo( "after final sort" );
-
+
log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
list<DiskLoc> dupsToDrop;
- /* build index --- */
+ /* build index --- */
{
BtreeBuilder btBuilder(dupsAllowed, idx);
BSONObj keyLast;
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
assert( pm == op->setMessage( "index: (2/3) btree bottom up" , nkeys , 10 ) );
- while( i->more() ) {
+ while( i->more() ) {
RARELY killCurrentOp.checkForInterrupt();
BSONObjExternalSorter::Data d = i->next();
- try {
+ try {
btBuilder.addKey(d.first, d.second);
}
- catch( AssertionException& e ) {
- if ( dupsAllowed ){
+ catch( AssertionException& e ) {
+ if ( dupsAllowed ) {
// unknow exception??
throw;
}
-
+
if( e.interrupted() )
throw;
if ( ! dropDups )
throw;
- /* we could queue these on disk, but normally there are very few dups, so instead we
+ /* we could queue these on disk, but normally there are very few dups, so instead we
keep in ram and have a limit.
*/
dupsToDrop.push_back(d.second);
@@ -1192,9 +1194,9 @@ namespace mongo {
op->setMessage( "index: (3/3) btree-middle" );
log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
btBuilder.commit();
- wassert( btBuilder.getn() == nkeys || dropDups );
+ wassert( btBuilder.getn() == nkeys || dropDups );
}
-
+
log(1) << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
for( list<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ )
@@ -1203,7 +1205,7 @@ namespace mongo {
return n;
}
- class BackgroundIndexBuildJob : public BackgroundOperation {
+ class BackgroundIndexBuildJob : public BackgroundOperation {
unsigned long long addExistingToIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
bool dupsAllowed = !idx.unique();
@@ -1221,10 +1223,11 @@ namespace mongo {
while ( cc->ok() ) {
BSONObj js = cc->current();
- try {
+ try {
_indexRecord(d, idxNo, js, cc->currLoc(), dupsAllowed);
cc->advance();
- } catch( AssertionException& e ) {
+ }
+ catch( AssertionException& e ) {
if( e.interrupted() )
throw;
@@ -1235,7 +1238,7 @@ namespace mongo {
theDataFileMgr.deleteRecord( ns, toDelete.rec(), toDelete, false, true );
if( ClientCursor::find(id, false) == 0 ) {
cc.release();
- if( !ok ) {
+ if( !ok ) {
/* we were already at the end. normal. */
}
else {
@@ -1243,7 +1246,8 @@ namespace mongo {
}
break;
}
- } else {
+ }
+ else {
log() << "background addExistingToIndex exception " << e.what() << endl;
throw;
}
@@ -1261,7 +1265,7 @@ namespace mongo {
return n;
}
- /* we do set a flag in the namespace for quick checking, but this is our authoritative info -
+ /* we do set a flag in the namespace for quick checking, but this is our authoritative info -
that way on a crash/restart, we don't think we are still building one. */
set<NamespaceDetails*> bgJobsInProgress;
@@ -1282,16 +1286,16 @@ namespace mongo {
public:
BackgroundIndexBuildJob(const char *ns) : BackgroundOperation(ns) { }
- unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
+ unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
unsigned long long n = 0;
prep(ns.c_str(), d);
assert( idxNo == d->nIndexes );
- try {
+ try {
idx.head = BtreeBucket::addBucket(idx);
n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
}
- catch(...) {
+ catch(...) {
if( cc().database() && nsdetails(ns.c_str()) == d ) {
assert( idxNo == d->nIndexes );
done(ns.c_str(), d);
@@ -1308,10 +1312,10 @@ namespace mongo {
};
// throws DBException
- static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
+ static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
tlog() << "building new index on " << idx.keyPattern() << " for " << ns << ( background ? " background" : "" ) << endl;
Timer t;
- unsigned long long n;
+ unsigned long long n;
if( background ) {
log(2) << "buildAnIndex: background=true\n";
@@ -1319,13 +1323,13 @@ namespace mongo {
assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
if( inDBRepair || !background ) {
- n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
- assert( !idx.head.isNull() );
- }
- else {
+ n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
+ assert( !idx.head.isNull() );
+ }
+ else {
BackgroundIndexBuildJob j(ns.c_str());
n = j.go(ns, d, idx, idxNo);
- }
+ }
tlog() << "done for " << n << " records " << t.millis() / 1000.0 << "secs" << endl;
}
@@ -1333,20 +1337,20 @@ namespace mongo {
static void indexRecord(NamespaceDetails *d, BSONObj obj, DiskLoc loc) {
int n = d->nIndexesBeingBuilt();
for ( int i = 0; i < n; i++ ) {
- try {
+ try {
bool unique = d->idx(i).unique();
_indexRecord(d, i, obj, loc, /*dupsAllowed*/!unique);
}
- catch( DBException& ) {
+ catch( DBException& ) {
/* try to roll back previously added index entries
note <= i (not < i) is important here as the index we were just attempted
may be multikey and require some cleanup.
*/
- for( int j = 0; j <= i; j++ ) {
+ for( int j = 0; j <= i; j++ ) {
try {
_unindexRecord(d->idx(j), obj, loc, false);
}
- catch(...) {
+ catch(...) {
log(3) << "unindex fails on rollback after unique failure\n";
}
}
@@ -1385,7 +1389,7 @@ namespace mongo {
}
#pragma pack(1)
- struct IDToInsert_ {
+ struct IDToInsert_ {
char type;
char _id[4];
OID oid;
@@ -1399,13 +1403,13 @@ namespace mongo {
IDToInsert() : BSONElement( ( char * )( &idToInsert_ ) ) {}
} idToInsert;
#pragma pack()
-
+
void DataFileMgr::insertAndLog( const char *ns, const BSONObj &o, bool god ) {
BSONObj tmp = o;
insertWithObjMod( ns, tmp, god );
logOp( "i", ns, tmp );
}
-
+
DiskLoc DataFileMgr::insertWithObjMod(const char *ns, BSONObj &o, bool god) {
DiskLoc loc = insert( ns, o.objdata(), o.objsize(), god );
if ( !loc.isNull() )
@@ -1422,7 +1426,7 @@ namespace mongo {
// We are now doing two btree scans for all unique indexes (one here, and one when we've
// written the record to the collection. This could be made more efficient inserting
// dummy data here, keeping pointers to the btree nodes holding the dummy data and then
- // updating the dummy data with the DiskLoc of the real record.
+ // updating the dummy data with the DiskLoc of the real record.
void checkNoIndexConflicts( NamespaceDetails *d, const BSONObj &obj ) {
for ( int idxNo = 0; idxNo < d->nIndexes; idxNo++ ) {
if( d->idx(idxNo).unique() ) {
@@ -1432,13 +1436,13 @@ namespace mongo {
BSONObj order = idx.keyPattern();
for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
uassert( 12582, "duplicate key insert for unique index of capped collection",
- idx.head.btree()->findSingle(idx, idx.head, *i ).isNull() );
+ idx.head.btree()->findSingle(idx, idx.head, *i ).isNull() );
}
}
- }
+ }
}
- /* note: if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
+ /* note: if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
*/
DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, const BSONElement &writeId, bool mayAddIndex) {
@@ -1489,17 +1493,17 @@ namespace mongo {
if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) )
return DiskLoc();
- if ( ! fixedIndexObject.isEmpty() ){
+ if ( ! fixedIndexObject.isEmpty() ) {
obuf = fixedIndexObject.objdata();
len = fixedIndexObject.objsize();
}
-
+
}
const BSONElement *newId = &writeId;
int addID = 0;
if( !god ) {
- /* Check if we have an _id field. If we don't, we'll add it.
+ /* Check if we have an _id field. If we don't, we'll add it.
Note that btree buckets which we insert aren't BSONObj's, but in that case god==true.
*/
BSONObj io((const char *) obuf);
@@ -1514,7 +1518,7 @@ namespace mongo {
}
len += newId->size();
}
-
+
BSONElementManipulator::lookForTimestamps( io );
}
@@ -1527,13 +1531,13 @@ namespace mongo {
*getDur().writing(&d->paddingFactor) = 1.0;
lenWHdr = len + Record::HeaderSize;
}
-
+
// If the collection is capped, check if the new object will violate a unique index
// constraint before allocating space.
if ( d->nIndexes && d->capped && !god ) {
checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
}
-
+
DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
if ( loc.isNull() ) {
// out of space
@@ -1541,9 +1545,9 @@ namespace mongo {
log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor << " lenWHdr: " << lenWHdr << endl;
cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false);
loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( loc.isNull() ){
+ if ( loc.isNull() ) {
log() << "WARNING: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize << "; trying again\n";
- for ( int zzz=0; zzz<10 && lenWHdr > d->lastExtentSize; zzz++ ){
+ for ( int zzz=0; zzz<10 && lenWHdr > d->lastExtentSize; zzz++ ) {
log() << "try #" << zzz << endl;
cc().database()->allocExtent(ns, Extent::followupSize(len, d->lastExtentSize), false);
loc = d->alloc(ns, lenWHdr, extentLoc);
@@ -1563,7 +1567,7 @@ namespace mongo {
{
assert( r->lengthWithHeaders >= lenWHdr );
r = (Record*) getDur().writingPtr(r, lenWHdr);
- if( addID ) {
+ if( addID ) {
/* a little effort was made here to avoid a double copy when we add an ID */
((int&)*r->data) = *((int*) obuf) + newId->size();
memcpy(r->data+4, newId->rawdata(), newId->size());
@@ -1601,14 +1605,14 @@ namespace mongo {
// we don't bother clearing those stats for the god tables - also god is true when adidng a btree bucket
if ( !god )
NamespaceDetailsTransient::get_w( ns ).notifyOfWriteOp();
-
+
if ( tableToIndex ) {
uassert( 13143 , "can't create index on system.indexes" , tabletoidxns.find( ".system.indexes" ) == string::npos );
BSONObj info = loc.obj();
bool background = info["background"].trueValue();
- if( background && cc().isSyncThread() ) {
- /* don't do background indexing on slaves. there are nuances. this could be added later
+ if( background && cc().isSyncThread() ) {
+ /* don't do background indexing on slaves. there are nuances. this could be added later
but requires more code.
*/
log() << "info: indexing in foreground on this replica; was a background index build on the primary" << endl;
@@ -1620,7 +1624,8 @@ namespace mongo {
getDur().writingDiskLoc(idx.info) = loc;
try {
buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
- } catch( DBException& e ) {
+ }
+ catch( DBException& e ) {
// save our error msg string as an exception or dropIndexes will overwrite our message
LastError *le = lastError.get();
int savecode = 0;
@@ -1642,7 +1647,7 @@ namespace mongo {
if( !ok ) {
log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
}
-
+
assert( le && !saveerrmsg.empty() );
raiseError(savecode,saveerrmsg.c_str());
throw;
@@ -1651,11 +1656,11 @@ namespace mongo {
/* add this record to our indexes */
if ( d->nIndexes ) {
- try {
+ try {
BSONObj obj(r->data);
indexRecord(d, obj, loc);
- }
- catch( AssertionException& e ) {
+ }
+ catch( AssertionException& e ) {
// should be a dup key error on _id index
if( tableToIndex || d->capped ) {
massert( 12583, "unexpected index insertion failure on capped collection", !d->capped );
@@ -1664,7 +1669,7 @@ namespace mongo {
uassert_nothrow(s.c_str());
error() << s << endl;
}
- else {
+ else {
// normal case -- we can roll back
_deleteRecord(d, ns, r, loc);
throw;
@@ -1672,7 +1677,7 @@ namespace mongo {
}
}
- // out() << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
+ // out() << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
return loc;
}
@@ -1728,7 +1733,7 @@ namespace mongo {
namespace mongo {
- void dropAllDatabasesExceptLocal() {
+ void dropAllDatabasesExceptLocal() {
writelock lk("");
vector<string> n;
@@ -1762,13 +1767,14 @@ namespace mongo {
void boostRenameWrapper( const Path &from, const Path &to ) {
try {
boost::filesystem::rename( from, to );
- } catch ( const boost::filesystem::filesystem_error & ) {
+ }
+ catch ( const boost::filesystem::filesystem_error & ) {
// boost rename doesn't work across partitions
boost::filesystem::copy_file( from, to);
boost::filesystem::remove( from );
}
}
-
+
// back up original database files to 'temp' dir
void _renameForBackup( const char *database, const Path &reservedPath ) {
Path newPath( reservedPath );
@@ -1826,7 +1832,8 @@ namespace mongo {
ss << prefix << "_repairDatabase_" << i++;
reservedPath = repairPath / ss.str();
BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
- } while ( exists );
+ }
+ while ( exists );
return reservedPath;
}
@@ -1878,7 +1885,7 @@ namespace mongo {
stringstream ss;
ss << "localhost:" << cmdLine.port;
string localhost = ss.str();
-
+
problem() << "repairDatabase " << dbName << endl;
assert( cc().database()->name == dbName );
assert( cc().database()->path == dbpath );
@@ -1901,14 +1908,15 @@ namespace mongo {
"backup" : "$tmp" );
BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
string reservedPathString = reservedPath.native_directory_string();
-
+
bool res;
- { // clone to temp location, which effectively does repair
+ {
+ // clone to temp location, which effectively does repair
Client::Context ctx( dbName, reservedPathString );
assert( ctx.justCreated() );
-
- res = cloneFrom(localhost.c_str(), errmsg, dbName,
- /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false, /*snapshot*/false);
+
+ res = cloneFrom(localhost.c_str(), errmsg, dbName,
+ /*logForReplication=*/false, /*slaveok*/false, /*replauth*/false, /*snapshot*/false);
Database::closeDatabase( dbName, reservedPathString.c_str() );
}
@@ -1924,7 +1932,8 @@ namespace mongo {
if ( backupOriginalFiles ) {
_renameForBackup( dbName, reservedPath );
- } else {
+ }
+ else {
_deleteDataFiles( dbName );
BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( Path( dbpath ) / dbName ) );
}
@@ -1960,7 +1969,7 @@ namespace mongo {
q = p / ss.str();
BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
if ( ok ) {
- if ( extra != 10 ){
+ if ( extra != 10 ) {
log(1) << fo.op() << " file " << q.string() << endl;
log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
}
@@ -1972,20 +1981,20 @@ namespace mongo {
}
NamespaceDetails* nsdetails_notinline(const char *ns) { return nsdetails(ns); }
-
- bool DatabaseHolder::closeAll( const string& path , BSONObjBuilder& result , bool force ){
+
+ bool DatabaseHolder::closeAll( const string& path , BSONObjBuilder& result , bool force ) {
log() << "DatabaseHolder::closeAll path:" << path << endl;
dbMutex.assertWriteLocked();
-
+
map<string,Database*>& m = _paths[path];
_size -= m.size();
-
+
set< string > dbs;
for ( map<string,Database*>::iterator i = m.begin(); i != m.end(); i++ ) {
wassert( i->second->path == path );
dbs.insert( i->first );
}
-
+
currentClient.get()->getContext()->clear();
BSONObjBuilder bb( result.subarrayStart( "dbs" ) );
@@ -2014,16 +2023,16 @@ namespace mongo {
return true;
}
- bool isValidNS( const StringData& ns ){
+ bool isValidNS( const StringData& ns ) {
// TODO: should check for invalid characters
const char * x = strchr( ns.data() , '.' );
if ( ! x )
return false;
-
+
x++;
return *x > 0;
}
-
+
} // namespace mongo
diff --git a/db/pdfile.h b/db/pdfile.h
index ed8406c0ee7..29bd311d6b3 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -46,9 +46,9 @@ namespace mongo {
/* low level - only drops this ns */
void dropNS(const string& dropNs);
-
+
/* deletes this ns, indexes and cursors */
- void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result );
+ void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result );
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc());
@@ -66,7 +66,7 @@ namespace mongo {
MongoDataFile(int fn) : _mb(0), fileNo(fn) { }
void open(const char *filename, int requestedDataSize = 0, bool preallocateOnly = false);
- /* allocate a new extent from this datafile.
+ /* allocate a new extent from this datafile.
@param capped - true if capped collection
@param loops is our recursion check variable - you want to pass in zero
*/
@@ -78,10 +78,10 @@ namespace mongo {
/* return max size an extent may be */
static int maxSize();
-
+
/** fsync */
void flush( bool sync );
-
+
private:
void badOfs(int) const;
void badOfs2(int) const;
@@ -91,7 +91,7 @@ namespace mongo {
Extent* _getExtent(DiskLoc loc);
Record* recordAt(DiskLoc dl);
Record* makeRecord(DiskLoc dl, int size);
- void grow(DiskLoc dl, int size);
+ void grow(DiskLoc dl, int size);
char* p() { return (char *) _mb; }
DataFileHeader* header() { return (DataFileHeader*) _mb; }
@@ -118,7 +118,7 @@ namespace mongo {
Record *toupdate, const DiskLoc& dl,
const char *buf, int len, OpDebug& debug, bool god=false);
- // The object o may be updated if modified on insert.
+ // The object o may be updated if modified on insert.
void insertAndLog( const char *ns, const BSONObj &o, bool god = false );
/** @param obj both and in and out param -- insert can sometimes modify an object (such as add _id). */
@@ -202,7 +202,7 @@ namespace mongo {
DiskLoc getNext(const DiskLoc& myLoc);
DiskLoc getPrev(const DiskLoc& myLoc);
- struct NP {
+ struct NP {
int nextOfs;
int prevOfs;
};
@@ -221,10 +221,10 @@ namespace mongo {
DiskLoc myLoc;
DiskLoc xnext, xprev; /* next/prev extent for this namespace */
- /* which namespace this extent is for. this is just for troubleshooting really
+ /* which namespace this extent is for. this is just for troubleshooting really
and won't even be correct if the collection were renamed!
*/
- Namespace nsDiagnostic;
+ Namespace nsDiagnostic;
int length; /* size of the extent, including these fields */
DiskLoc firstRecord;
@@ -267,24 +267,24 @@ namespace mongo {
Extent* getNextExtent() { return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext); }
Extent* getPrevExtent() { return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); }
-
+
static int maxSize();
/**
* @param len lengt of record we need
* @param lastRecord size of last extent which is a factor in next extent size
*/
static int followupSize(int len, int lastExtentLen);
-
+
/**
* @param len lengt of record we need
*/
static int initialSize(int len);
- struct FL {
+ struct FL {
DiskLoc firstRecord;
DiskLoc lastRecord;
};
- /** often we want to update just the firstRecord and lastRecord fields.
+ /** often we want to update just the firstRecord and lastRecord fields.
this helper is for that -- for use with getDur().writing() method
*/
FL* fl() { return (FL*) &firstRecord; }
@@ -335,7 +335,7 @@ namespace mongo {
h->unusedLength = fileLength - HeaderSize - 16;
}
}
-
+
bool isEmpty() const {
return uninitialized() || ( unusedLength == fileLength - HeaderSize - 16 );
}
@@ -367,8 +367,8 @@ namespace mongo {
return (Record*) (p()+ofs);
}
- inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
- int ofs = dl.getOfs();
+ inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
+ int ofs = dl.getOfs();
if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
return (Record*) (p()+ofs);
}
@@ -460,15 +460,15 @@ namespace mongo {
return cc().database()->getFile(dl.a())->recordAt(dl);
}
- BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
+ BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
- inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
+ inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
assert( dl.a() != -1 );
return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
}
-
+
void ensureHaveIdIndex(const char *ns);
-
+
bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
@@ -476,9 +476,9 @@ namespace mongo {
* @return true if ns is 'normal'. $ used for collections holding index data, which do not contain BSON objects in their records.
* special case for the local.oplog.$main ns -- naming it as such was a mistake.
*/
- inline bool isANormalNSName( const char* ns ){
+ inline bool isANormalNSName( const char* ns ) {
if ( strchr( ns , '$' ) == 0 )
- return true;
+ return true;
return strcmp( ns, "local.oplog.$main" ) == 0;
}
diff --git a/db/projection.cpp b/db/projection.cpp
index 4b1fa821f88..3dcfef73d08 100644
--- a/db/projection.cpp
+++ b/db/projection.cpp
@@ -21,30 +21,31 @@
namespace mongo {
- void Projection::init( const BSONObj& o ){
+ void Projection::init( const BSONObj& o ) {
massert( 10371 , "can only add to Projection once", _source.isEmpty());
_source = o;
BSONObjIterator i( o );
int true_false = -1;
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( ! e.isNumber() )
_hasNonSimple = true;
- if (e.type() == Object){
+ if (e.type() == Object) {
BSONObj obj = e.embeddedObject();
BSONElement e2 = obj.firstElement();
- if ( strcmp(e2.fieldName(), "$slice") == 0 ){
- if (e2.isNumber()){
+ if ( strcmp(e2.fieldName(), "$slice") == 0 ) {
+ if (e2.isNumber()) {
int i = e2.numberInt();
if (i < 0)
add(e.fieldName(), i, -i); // limit is now positive
else
add(e.fieldName(), 0, i);
- } else if (e2.type() == Array) {
+ }
+ else if (e2.type() == Array) {
BSONObj arr = e2.embeddedObject();
uassert(13099, "$slice array wrong size", arr.nFields() == 2 );
@@ -54,43 +55,47 @@ namespace mongo {
uassert(13100, "$slice limit must be positive", limit > 0 );
add(e.fieldName(), skip, limit);
- } else {
+ }
+ else {
uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
}
- } else {
+ }
+ else {
uassert(13097, string("Unsupported projection option: ") + obj.firstElement().fieldName(), false);
}
- } else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()){
+ }
+ else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
_includeID = false;
- } else {
+ }
+ else {
add (e.fieldName(), e.trueValue());
// validate input
- if (true_false == -1){
+ if (true_false == -1) {
true_false = e.trueValue();
_include = !e.trueValue();
}
- else{
- uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
+ else {
+ uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
(bool)true_false == e.trueValue() );
}
}
}
}
- void Projection::add(const string& field, bool include){
- if (field.empty()){ // this is the field the user referred to
+ void Projection::add(const string& field, bool include) {
+ if (field.empty()) { // this is the field the user referred to
_include = include;
- }
+ }
else {
_include = !include;
const size_t dot = field.find('.');
const string subfield = field.substr(0,dot);
- const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
+ const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
boost::shared_ptr<Projection>& fm = _fields[subfield];
if (!fm)
@@ -100,13 +105,14 @@ namespace mongo {
}
}
- void Projection::add(const string& field, int skip, int limit){
+ void Projection::add(const string& field, int skip, int limit) {
_special = true; // can't include or exclude whole object
- if (field.empty()){ // this is the field the user referred to
+ if (field.empty()) { // this is the field the user referred to
_skip = skip;
_limit = limit;
- } else {
+ }
+ else {
const size_t dot = field.find('.');
const string subfield = field.substr(0,dot);
const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
@@ -118,12 +124,12 @@ namespace mongo {
fm->add(rest, skip, limit);
}
}
-
+
void Projection::transform( const BSONObj& in , BSONObjBuilder& b ) const {
BSONObjIterator i(in);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( mongoutils::str::equals( "_id" , e.fieldName() ) ){
+ if ( mongoutils::str::equals( "_id" , e.fieldName() ) ) {
if ( _includeID )
b.append( e );
}
@@ -138,77 +144,77 @@ namespace mongo {
transform( in , b );
return b.obj();
}
-
+
//b will be the value part of an array-typed BSONElement
void Projection::appendArray( BSONObjBuilder& b , const BSONObj& a , bool nested) const {
int skip = nested ? 0 : _skip;
int limit = nested ? -1 : _limit;
- if (skip < 0){
+ if (skip < 0) {
skip = max(0, skip + a.nFields());
}
int i=0;
BSONObjIterator it(a);
- while (it.more()){
+ while (it.more()) {
BSONElement e = it.next();
- if (skip){
+ if (skip) {
skip--;
continue;
}
- if (limit != -1 && (limit-- == 0)){
+ if (limit != -1 && (limit-- == 0)) {
break;
}
- switch(e.type()){
- case Array:{
- BSONObjBuilder subb;
- appendArray(subb , e.embeddedObject(), true);
- b.appendArray(b.numStr(i++), subb.obj());
- break;
- }
- case Object:{
- BSONObjBuilder subb;
- BSONObjIterator jt(e.embeddedObject());
- while (jt.more()){
- append(subb , jt.next());
- }
- b.append(b.numStr(i++), subb.obj());
- break;
+ switch(e.type()) {
+ case Array: {
+ BSONObjBuilder subb;
+ appendArray(subb , e.embeddedObject(), true);
+ b.appendArray(b.numStr(i++), subb.obj());
+ break;
+ }
+ case Object: {
+ BSONObjBuilder subb;
+ BSONObjIterator jt(e.embeddedObject());
+ while (jt.more()) {
+ append(subb , jt.next());
}
- default:
- if (_include)
- b.appendAs(e, b.numStr(i++));
+ b.append(b.numStr(i++), subb.obj());
+ break;
+ }
+ default:
+ if (_include)
+ b.appendAs(e, b.numStr(i++));
}
}
}
void Projection::append( BSONObjBuilder& b , const BSONElement& e ) const {
FieldMap::const_iterator field = _fields.find( e.fieldName() );
-
- if (field == _fields.end()){
+
+ if (field == _fields.end()) {
if (_include)
b.append(e);
- }
+ }
else {
Projection& subfm = *field->second;
-
- if ((subfm._fields.empty() && !subfm._special) || !(e.type()==Object || e.type()==Array) ){
+
+ if ((subfm._fields.empty() && !subfm._special) || !(e.type()==Object || e.type()==Array) ) {
if (subfm._include)
b.append(e);
}
- else if (e.type() == Object){
+ else if (e.type() == Object) {
BSONObjBuilder subb;
BSONObjIterator it(e.embeddedObject());
- while (it.more()){
+ while (it.more()) {
subfm.append(subb, it.next());
}
b.append(e.fieldName(), subb.obj());
- }
+ }
else { //Array
BSONObjBuilder subb;
subfm.appendArray(subb, e.embeddedObject());
@@ -218,36 +224,36 @@ namespace mongo {
}
Projection::KeyOnly* Projection::checkKey( const BSONObj& keyPattern ) const {
- if ( _include ){
+ if ( _include ) {
// if we default to including then we can't
// use an index because we don't know what we're missing
return 0;
}
-
+
if ( _hasNonSimple )
return 0;
-
+
if ( _includeID && keyPattern["_id"].eoo() )
return 0;
// at this point we know its all { x : 1 } style
-
+
auto_ptr<KeyOnly> p( new KeyOnly() );
int got = 0;
BSONObjIterator i( keyPattern );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement k = i.next();
- if ( _source[k.fieldName()].type() ){
+ if ( _source[k.fieldName()].type() ) {
- if ( strchr( k.fieldName() , '.' ) ){
+ if ( strchr( k.fieldName() , '.' ) ) {
// TODO we currently don't support dotted fields
// SERVER-2104
return 0;
}
- if ( ! _includeID && mongoutils::str::equals( k.fieldName() , "_id" ) ){
+ if ( ! _includeID && mongoutils::str::equals( k.fieldName() , "_id" ) ) {
p->addNo();
}
else {
@@ -255,7 +261,7 @@ namespace mongo {
got++;
}
}
- else if ( mongoutils::str::equals( "_id" , k.fieldName() ) && _includeID ){
+ else if ( mongoutils::str::equals( "_id" , k.fieldName() ) && _includeID ) {
p->addYes( "_id" );
}
else {
@@ -263,7 +269,7 @@ namespace mongo {
}
}
-
+
int need = _source.nFields();
if ( ! _includeID )
need--;
@@ -281,15 +287,15 @@ namespace mongo {
BSONObjIterator i(key);
unsigned n=0;
- while ( i.more() ){
+ while ( i.more() ) {
assert( n < _include.size() );
BSONElement e = i.next();
- if ( _include[n] ){
+ if ( _include[n] ) {
b.appendAs( e , _names[n] );
}
n++;
}
-
+
return b.obj();
}
}
diff --git a/db/projection.h b/db/projection.h
index 427562b9747..fd3b85629c5 100644
--- a/db/projection.h
+++ b/db/projection.h
@@ -32,25 +32,25 @@ namespace mongo {
class KeyOnly {
public:
-
- KeyOnly() : _stringSize(0){}
+
+ KeyOnly() : _stringSize(0) {}
BSONObj hydrate( const BSONObj& key ) const;
-
+
void addNo() { _add( false , "" ); }
void addYes( const string& name ) { _add( true , name ); }
private:
-
- void _add( bool b , const string& name ) {
- _include.push_back( b );
- _names.push_back( name );
- _stringSize += name.size();
+
+ void _add( bool b , const string& name ) {
+ _include.push_back( b );
+ _names.push_back( name );
+ _stringSize += name.size();
}
vector<bool> _include; // one entry per field in key. true iff should be in output
vector<string> _names; // name of field since key doesn't have names
-
+
int _stringSize;
};
@@ -60,31 +60,31 @@ namespace mongo {
_includeID(true) ,
_skip(0) ,
_limit(-1) ,
- _hasNonSimple(false){
+ _hasNonSimple(false) {
}
-
+
/**
* called once per lifetime
* e.g. { "x" : 1 , "a.y" : 1 }
*/
void init( const BSONObj& spec );
-
+
/**
* @return the spec init was called with
*/
BSONObj getSpec() const { return _source; }
-
+
/**
* transforms in according to spec
*/
BSONObj transform( const BSONObj& in ) const;
-
+
/**
* transforms in according to spec
*/
void transform( const BSONObj& in , BSONObjBuilder& b ) const;
-
+
/**
* @return if the keyPattern has all the information needed to return then
@@ -93,7 +93,7 @@ namespace mongo {
* which has to be handled above this (arrays, geo)
*/
KeyOnly* checkKey( const BSONObj& keyPattern ) const;
-
+
private:
/**
diff --git a/db/query.cpp b/db/query.cpp
index 21a862d83e4..9060428a808 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -67,7 +67,7 @@ namespace mongo {
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c_ , qp().ns() ) );
}
return _cc->prepareToYield( _yieldData );
- }
+ }
virtual void recoverFromYield() {
if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
_cc.reset();
@@ -84,9 +84,9 @@ namespace mongo {
setComplete();
return;
}
-
+
DiskLoc rloc = c_->currLoc();
-
+
if ( matcher()->matches(c_->currKey(), rloc ) ) {
if ( !c_->getsetdup(rloc) )
++count_;
@@ -96,7 +96,7 @@ namespace mongo {
_nscanned = c_->nscanned();
if ( count_ > bestCount_ )
bestCount_ = count_;
-
+
if ( count_ > 0 ) {
if ( justOne_ )
setComplete();
@@ -119,7 +119,7 @@ namespace mongo {
ClientCursor::CleanupPointer _cc;
ClientCursor::YieldData _yieldData;
};
-
+
/* ns: namespace, e.g. <database>.<collection>
pattern: the "where" clause / criteria
justOne: stop after 1 match
@@ -128,13 +128,13 @@ namespace mongo {
long long deleteObjects(const char *ns, BSONObj pattern, bool justOneOrig, bool logop, bool god, RemoveSaver * rs ) {
if( !god ) {
if ( strstr(ns, ".system.") ) {
- /* note a delete from system.indexes would corrupt the db
- if done here, as there are pointers into those objects in
+ /* note a delete from system.indexes would corrupt the db
+ if done here, as there are pointers into those objects in
NamespaceDetails.
*/
uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
}
- if ( strchr( ns , '$' ) ){
+ if ( strchr( ns , '$' ) ) {
log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
}
@@ -150,21 +150,21 @@ namespace mongo {
int best = 0;
shared_ptr< MultiCursor::CursorOp > opPtr( new DeleteOp( justOneOrig, best ) );
shared_ptr< MultiCursor > creal( new MultiCursor( ns, pattern, BSONObj(), opPtr, !god ) );
-
+
if( !creal->ok() )
return nDeleted;
-
+
shared_ptr< Cursor > cPtr = creal;
auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) );
cc->setDoingDeletes( true );
-
+
CursorId id = cc->cursorid();
-
+
bool justOne = justOneOrig;
bool canYield = !god && !creal->matcher()->docMatcher().atomic();
do {
- if ( canYield && ! cc->yieldSometimes() ){
+ if ( canYield && ! cc->yieldSometimes() ) {
cc.release(); // has already been deleted elsewhere
// TODO should we assert or something?
break;
@@ -172,33 +172,33 @@ namespace mongo {
if ( !cc->ok() ) {
break; // if we yielded, could have hit the end
}
-
+
// this way we can avoid calling updateLocation() every time (expensive)
// as well as some other nuances handled
cc->setDoingDeletes( true );
-
+
DiskLoc rloc = cc->currLoc();
BSONObj key = cc->currKey();
- // NOTE Calling advance() may change the matcher, so it's important
+ // NOTE Calling advance() may change the matcher, so it's important
// to try to match first.
bool match = creal->matcher()->matches( key , rloc );
-
+
if ( ! cc->advance() )
justOne = true;
-
+
if ( ! match )
continue;
-
+
assert( !cc->c()->getsetdup(rloc) ); // can't be a dup, we deleted it!
-
+
if ( !justOne ) {
/* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
blocks. here we might call millions of times which would be bad.
*/
cc->c()->noteLocation();
}
-
+
if ( logop ) {
BSONElement e;
if( BSONObj( rloc.rec() ).getObjectID( e ) ) {
@@ -206,7 +206,8 @@ namespace mongo {
b.append( e );
bool replJustOne = true;
logOp( "d", ns, b.done(), 0, &replJustOne );
- } else {
+ }
+ else {
problem() << "deleted object without id, not logging" << endl;
}
}
@@ -220,13 +221,14 @@ namespace mongo {
break;
}
cc->c()->checkLocation();
-
- } while ( cc->ok() );
- if ( cc.get() && ClientCursor::find( id , false ) == 0 ){
+ }
+ while ( cc->ok() );
+
+ if ( cc.get() && ClientCursor::find( id , false ) == 0 ) {
cc.release();
}
-
+
return nDeleted;
}
@@ -276,9 +278,9 @@ namespace mongo {
exhaust = false;
ClientCursor::Pointer p(cursorid);
ClientCursor *cc = p.c();
-
+
int bufSize = 512;
- if ( cc ){
+ if ( cc ) {
bufSize += sizeof( QueryResult );
bufSize += MaxBytesToReturnToClientAtOnce;
}
@@ -286,11 +288,11 @@ namespace mongo {
BufBuilder b( bufSize );
b.skip(sizeof(QueryResult));
-
+
int resultFlags = ResultFlag_AwaitCapable;
int start = 0;
int n = 0;
-
+
if ( !cc ) {
log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
cursorid = 0;
@@ -306,7 +308,7 @@ namespace mongo {
StringBuilder& ss = curop.debug().str;
ss << " getMore: " << cc->query().toString() << " ";
}
-
+
start = cc->pos();
Cursor *c = cc->c();
c->checkLocation();
@@ -319,8 +321,8 @@ namespace mongo {
while ( 1 ) {
if ( !c->ok() ) {
if ( c->tailable() ) {
- /* when a tailable cursor hits "EOF", ok() goes false, and current() is null. however
- advance() can still be retries as a reactivation attempt. when there is new data, it will
+ /* when a tailable cursor hits "EOF", ok() goes false, and current() is null. however
+ advance() can still be retries as a reactivation attempt. when there is new data, it will
return true. that's what we are doing here.
*/
if ( c->advance() )
@@ -355,8 +357,8 @@ namespace mongo {
else {
last = c->currLoc();
n++;
-
- if ( keyFieldsOnly ){
+
+ if ( keyFieldsOnly ) {
fillQueryResultFromObj(b, 0, keyFieldsOnly->hydrate( c->currKey() ) );
}
else {
@@ -365,7 +367,7 @@ namespace mongo {
fillQueryResultFromObj(b, cc->fields.get(), js, ( cc->pq.get() && cc->pq->showDiskLoc() ? &last : 0));
}
- if ( ( ntoreturn && n >= ntoreturn ) || b.len() > MaxBytesToReturnToClientAtOnce ){
+ if ( ( ntoreturn && n >= ntoreturn ) || b.len() > MaxBytesToReturnToClientAtOnce ) {
c->advance();
cc->incPos( n );
break;
@@ -374,7 +376,7 @@ namespace mongo {
}
c->advance();
}
-
+
if ( cc ) {
cc->updateLocation();
cc->mayUpgradeStorage();
@@ -401,9 +403,9 @@ namespace mongo {
_ns(ns), _capped(false), _count(), _myCount(),
_skip( spec["skip"].numberLong() ),
_limit( spec["limit"].numberLong() ),
- _bc(){
+ _bc() {
}
-
+
virtual void _init() {
_c = qp().newCursor();
_capped = _c->capped();
@@ -418,20 +420,20 @@ namespace mongo {
assert( _c.get() );
return _c->nscanned();
}
-
+
virtual bool prepareToYield() {
if ( ! _cc ) {
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , _ns.c_str() ) );
}
return _cc->prepareToYield( _yieldData );
}
-
+
virtual void recoverFromYield() {
if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
_c.reset();
_cc.reset();
- if ( _capped ){
+ if ( _capped ) {
msgassertedNoTrace( 13337, str::stream() << "capped cursor overrun during count: " << _ns );
}
else {
@@ -439,7 +441,7 @@ namespace mongo {
}
}
}
-
+
virtual void next() {
if ( ! _c || !_c->ok() ) {
setComplete();
@@ -455,20 +457,21 @@ namespace mongo {
return;
}
_gotOne();
- } else {
+ }
+ else {
if ( ! _firstMatch.woEqual( _bc->currKeyNode().key ) ) {
setComplete();
return;
}
_gotOne();
}
- }
+ }
else {
if ( !matcher()->matches(_c->currKey(), _c->currLoc() ) ) {
}
else if( !_c->getsetdup(_c->currLoc()) ) {
_gotOne();
- }
+ }
}
_c->advance();
}
@@ -484,14 +487,14 @@ namespace mongo {
return ( _myCount > _limit / 2 ) || ( complete() && !stopRequested() );
}
private:
-
- void _gotOne(){
- if ( _skip ){
+
+ void _gotOne() {
+ if ( _skip ) {
_skip--;
return;
}
-
- if ( _limit > 0 && _count >= _limit ){
+
+ if ( _limit > 0 && _count >= _limit ) {
setStop();
return;
}
@@ -518,7 +521,7 @@ namespace mongo {
/* { count: "collectionname"[, query: <query>] }
returns -1 on ns does not exist error.
- */
+ */
long long runCount( const char *ns, const BSONObj &cmd, string &err ) {
Client::Context cx(ns);
NamespaceDetails *d = nsdetails( ns );
@@ -529,7 +532,7 @@ namespace mongo {
BSONObj query = cmd.getObjectField("query");
// count of all objects
- if ( query.isEmpty() ){
+ if ( query.isEmpty() ) {
return applySkipLimit( d->stats.nrecords , cmd );
}
MultiPlanScanner mps( ns, query, BSONObj(), 0, true, BSONObj(), BSONObj(), false, true );
@@ -543,7 +546,7 @@ namespace mongo {
}
return res->count();
}
-
+
class ExplainBuilder {
// Note: by default we filter out allPlans and oldPlan in the shell's
// explain() function. If you add any recursive structures, make sure to
@@ -560,7 +563,7 @@ namespace mongo {
b << "cursor" << c->toString() << "indexBounds" << c->prettyIndexBounds();
b.done();
}
- void noteScan( Cursor *c, long long nscanned, long long nscannedObjects, int n, bool scanAndOrder,
+ void noteScan( Cursor *c, long long nscanned, long long nscannedObjects, int n, bool scanAndOrder,
int millis, bool hint, int nYields , int nChunkSkips , bool indexOnly ) {
if ( _i == 1 ) {
_c.reset( new BSONArrayBuilder() );
@@ -568,7 +571,8 @@ namespace mongo {
}
if ( _i == 0 ) {
_b.reset( new BSONObjBuilder() );
- } else {
+ }
+ else {
_b.reset( new BSONObjBuilder( _c->subobjStart() ) );
}
*_b << "cursor" << c->toString();
@@ -580,7 +584,7 @@ namespace mongo {
*_b << "scanAndOrder" << true;
*_b << "millis" << millis;
-
+
*_b << "nYields" << nYields;
*_b << "nChunkSkips" << nChunkSkips;
*_b << "isMultiKey" << c->isMultiKey();
@@ -597,7 +601,7 @@ namespace mongo {
_a.reset( 0 );
++_i;
}
- BSONObj finishWithSuffix( long long nscanned, long long nscannedObjects, int n, int millis, const BSONObj &suffix ) {
+ BSONObj finishWithSuffix( long long nscanned, long long nscannedObjects, int n, int millis, const BSONObj &suffix ) {
if ( _i > 1 ) {
BSONObjBuilder b;
b << "clauses" << _c->arr();
@@ -607,9 +611,10 @@ namespace mongo {
b << "millis" << millis;
b.appendElements( suffix );
return b.obj();
- } else {
+ }
+ else {
_b->appendElements( suffix );
- return _b->obj();
+ return _b->obj();
}
}
private:
@@ -618,11 +623,11 @@ namespace mongo {
auto_ptr< BSONArrayBuilder > _c;
int _i;
};
-
+
// Implements database 'query' requests using the query optimizer's QueryOp interface
class UserQueryOp : public QueryOp {
public:
-
+
UserQueryOp( const ParsedQuery& pq, Message &response, ExplainBuilder &eb, CurOp &curop ) :
_buf( 32768 ) , // TODO be smarter here
_pq( pq ) ,
@@ -632,7 +637,7 @@ namespace mongo {
_oldN(0),
_nYields(),
_nChunkSkips(),
- _chunkManager( shardingState.needShardChunkManager(pq.ns()) ?
+ _chunkManager( shardingState.needShardChunkManager(pq.ns()) ?
shardingState.getShardChunkManager(pq.ns()) : ShardChunkManagerPtr() ),
_inMemSort(false),
_capped(false),
@@ -643,22 +648,23 @@ namespace mongo {
_eb( eb ),
_curop( curop )
{}
-
+
virtual void _init() {
// only need to put the QueryResult fields there if we're building the first buffer in the message.
if ( _response.empty() ) {
_buf.skip( sizeof( QueryResult ) );
}
-
+
if ( _oplogReplay ) {
_findingStartCursor.reset( new FindingStartCursor( qp() ) );
_capped = true;
- } else {
+ }
+ else {
_c = qp().newCursor( DiskLoc() , _pq.getNumToReturn() + _pq.getSkip() );
_capped = _c->capped();
-
+
// setup check for if we can only use index to extract
- if ( _c->modifiedKeys() == false && _c->isMultiKey() == false && _pq.getFields() ){
+ if ( _c->modifiedKeys() == false && _c->isMultiKey() == false && _pq.getFields() ) {
_keyFieldsOnly.reset( _pq.getFields()->checkKey( _c->indexKeyPattern() ) );
}
}
@@ -667,36 +673,37 @@ namespace mongo {
_inMemSort = true;
_so.reset( new ScanAndOrder( _pq.getSkip() , _pq.getNumToReturn() , _pq.getOrder() ) );
}
-
+
if ( _pq.isExplain() ) {
_eb.noteCursor( _c.get() );
}
}
-
+
virtual bool prepareToYield() {
if ( _findingStartCursor.get() ) {
return _findingStartCursor->prepareToYield();
- } else {
+ }
+ else {
if ( ! _cc ) {
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , _pq.ns() ) );
}
return _cc->prepareToYield( _yieldData );
}
}
-
+
virtual void recoverFromYield() {
_nYields++;
-
+
if ( _findingStartCursor.get() ) {
_findingStartCursor->recoverFromYield();
- }
+ }
else if ( ! ClientCursor::recoverFromYield( _yieldData ) ) {
_c.reset();
_cc.reset();
_so.reset();
- if ( _capped ){
+ if ( _capped ) {
msgassertedNoTrace( 13338, str::stream() << "capped cursor overrun during query: " << _pq.ns() );
}
else {
@@ -705,7 +712,7 @@ namespace mongo {
}
}
-
+
virtual long long nscanned() {
if ( _findingStartCursor.get() ) {
return 0; // should only be one query plan, so value doesn't really matter.
@@ -713,31 +720,32 @@ namespace mongo {
assert( _c.get() );
return _c->nscanned();
}
-
+
virtual void next() {
if ( _findingStartCursor.get() ) {
if ( _findingStartCursor->done() ) {
_c = _findingStartCursor->cRelease();
_findingStartCursor.reset( 0 );
- } else {
+ }
+ else {
_findingStartCursor->next();
}
_capped = true;
return;
}
-
+
if ( !_c || !_c->ok() ) {
finish( false );
return;
}
bool mayCreateCursor1 = _pq.wantMore() && ! _inMemSort && _pq.getNumToReturn() != 1 && useCursors;
-
- if( 0 ) {
+
+ if( 0 ) {
cout << "SCANNING this: " << this << " key: " << _c->currKey() << " obj: " << _c->current() << endl;
}
-
- if ( _pq.getMaxScan() && _nscanned >= _pq.getMaxScan() ){
+
+ if ( _pq.getMaxScan() && _nscanned >= _pq.getMaxScan() ) {
finish( true ); //?
return;
}
@@ -751,23 +759,23 @@ namespace mongo {
else {
_nscannedObjects++;
DiskLoc cl = _c->currLoc();
- if ( _chunkManager && ! _chunkManager->belongsToMe( cl.obj() ) ){
+ if ( _chunkManager && ! _chunkManager->belongsToMe( cl.obj() ) ) {
_nChunkSkips++;
// log() << "TEMP skipping un-owned chunk: " << _c->current() << endl;
}
- else if( _c->getsetdup(cl) ) {
+ else if( _c->getsetdup(cl) ) {
// dup
}
else {
// got a match.
-
+
if ( _inMemSort ) {
// note: no cursors for non-indexed, ordered results. results must be fairly small.
_so->add( _pq.returnKey() ? _c->currKey() : _c->current(), _pq.showDiskLoc() ? &cl : 0 );
}
else if ( _ntoskip > 0 ) {
_ntoskip--;
- }
+ }
else {
if ( _pq.isExplain() ) {
_n++;
@@ -779,19 +787,19 @@ namespace mongo {
}
else {
- if ( _pq.returnKey() ){
+ if ( _pq.returnKey() ) {
BSONObjBuilder bb( _buf );
bb.appendKeys( _c->indexKeyPattern() , _c->currKey() );
bb.done();
}
- else if ( _keyFieldsOnly ){
+ else if ( _keyFieldsOnly ) {
fillQueryResultFromObj( _buf , 0 , _keyFieldsOnly->hydrate( _c->currKey() ) );
}
else {
BSONObj js = _c->current();
assert( js.isValid() );
- if ( _oplogReplay ){
+ if ( _oplogReplay ) {
BSONElement e = js["ts"];
if ( e.type() == Date || e.type() == Timestamp )
_slaveReadTill = e._opTime();
@@ -800,13 +808,13 @@ namespace mongo {
fillQueryResultFromObj( _buf , _pq.getFields() , js , (_pq.showDiskLoc() ? &cl : 0));
}
_n++;
- if ( ! _c->supportGetMore() ){
- if ( _pq.enough( n() ) || _buf.len() >= MaxBytesToReturnToClientAtOnce ){
+ if ( ! _c->supportGetMore() ) {
+ if ( _pq.enough( n() ) || _buf.len() >= MaxBytesToReturnToClientAtOnce ) {
finish( true );
return;
}
}
- else if ( _pq.enoughForFirstBatch( n() , _buf.len() ) ){
+ else if ( _pq.enoughForFirstBatch( n() , _buf.len() ) ) {
/* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
if ( mayCreateCursor1 ) {
_wouldSaveClientCursor = true;
@@ -822,15 +830,15 @@ namespace mongo {
}
}
}
- _c->advance();
+ _c->advance();
}
// this plan won, so set data for response broadly
void finish( bool stop ) {
-
+
if ( _pq.isExplain() ) {
_n = _inMemSort ? _so->size() : _n;
- }
+ }
else if ( _inMemSort ) {
if( _so.get() )
_so->fill( _buf, _pq.getFields() , _n );
@@ -838,20 +846,20 @@ namespace mongo {
if ( _c.get() ) {
_nscanned = _c->nscanned();
-
+
if ( _pq.hasOption( QueryOption_CursorTailable ) && _pq.getNumToReturn() != 1 )
_c->setTailable();
-
+
// If the tailing request succeeded.
if ( _c->tailable() )
_saveClientCursor = true;
}
if ( _pq.isExplain() ) {
- _eb.noteScan( _c.get(), _nscanned, _nscannedObjects, _n, scanAndOrderRequired(),
- _curop.elapsedMillis(), useHints && !_pq.getHint().eoo(), _nYields ,
+ _eb.noteScan( _c.get(), _nscanned, _nscannedObjects, _n, scanAndOrderRequired(),
+ _curop.elapsedMillis(), useHints && !_pq.getHint().eoo(), _nYields ,
_nChunkSkips, _keyFieldsOnly.get() > 0 );
- }
+ }
else {
if ( _buf.len() ) {
_response.appendData( _buf.buf(), _buf.len() );
@@ -861,13 +869,13 @@ namespace mongo {
if ( stop ) {
setStop();
- }
+ }
else {
setComplete();
}
}
-
+
void finishExplain( const BSONObj &suffix ) {
BSONObj obj = _eb.finishWithSuffix( totalNscanned(), nscannedObjects(), n(), _curop.elapsedMillis(), suffix);
fillQueryResultFromObj(_buf, 0, obj);
@@ -876,11 +884,11 @@ namespace mongo {
_response.appendData( _buf.buf(), _buf.len() );
_buf.decouple();
}
-
+
virtual bool mayRecordPlan() const {
return ( _pq.getNumToReturn() != 1 ) && ( ( _n > _pq.getNumToReturn() / 2 ) || ( complete() && !stopRequested() ) );
}
-
+
virtual QueryOp *_createChild() const {
if ( _pq.isExplain() ) {
_eb.ensureStartScan();
@@ -900,8 +908,8 @@ namespace mongo {
long long nscannedObjects() const { return _nscannedObjects + _oldNscannedObjects; }
bool saveClientCursor() const { return _saveClientCursor; }
bool wouldSaveClientCursor() const { return _wouldSaveClientCursor; }
-
- void finishForOplogReplay( ClientCursor * cc ){
+
+ void finishForOplogReplay( ClientCursor * cc ) {
if ( _oplogReplay && ! _slaveReadTill.isNull() )
cc->slaveReadTill( _slaveReadTill );
@@ -918,17 +926,17 @@ namespace mongo {
long long _oldNscannedObjects;
int _n; // found so far
int _oldN;
-
+
int _nYields;
int _nChunkSkips;
-
+
MatchDetails _details;
ShardChunkManagerPtr _chunkManager;
-
+
bool _inMemSort;
auto_ptr< ScanAndOrder > _so;
-
+
shared_ptr<Cursor> _c;
ClientCursor::CleanupPointer _cc;
ClientCursor::YieldData _yieldData;
@@ -938,13 +946,13 @@ namespace mongo {
bool _wouldSaveClientCursor;
bool _oplogReplay;
auto_ptr< FindingStartCursor > _findingStartCursor;
-
+
Message &_response;
ExplainBuilder &_eb;
CurOp &_curop;
OpTime _slaveReadTill;
};
-
+
/* run a query -- includes checking for and running a Command \
@return points to ns if exhaust mode. 0=normal mode
*/
@@ -956,19 +964,19 @@ namespace mongo {
BSONObj jsobj = q.query;
int queryOptions = q.queryOptions;
const char *ns = q.ns;
-
+
if( logLevel >= 2 )
log() << "query: " << ns << jsobj << endl;
-
+
ss << ns;
{
- // only say ntoreturn if nonzero.
+ // only say ntoreturn if nonzero.
int n = pq.getNumToReturn();
- if( n )
+ if( n )
ss << " ntoreturn:" << n;
}
curop.setQuery(jsobj);
-
+
if ( pq.couldBeCommand() ) {
BufBuilder bb;
bb.skip(sizeof(QueryResult));
@@ -989,12 +997,12 @@ namespace mongo {
qr->nReturned = 1;
result.setData( qr.release(), true );
}
- else {
+ else {
uasserted(13530, "bad or malformed command request?");
}
return 0;
}
-
+
/* --- regular query --- */
int n = 0;
@@ -1014,7 +1022,7 @@ namespace mongo {
out() << query.toString() << endl;
uassert( 10110 , "bad query object", false);
}
-
+
/* --- read lock --- */
mongolock lk(false);
@@ -1029,17 +1037,18 @@ namespace mongo {
const BSONObj nat1 = BSON( "$natural" << 1 );
if ( order.isEmpty() ) {
order = nat1;
- } else {
+ }
+ else {
uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == nat1 );
}
}
-
+
BSONObj snapshotHint; // put here to keep the data in scope
- if( snapshot ) {
+ if( snapshot ) {
NamespaceDetails *d = nsdetails(ns);
- if ( d ){
+ if ( d ) {
int i = d->findIdIndex();
- if( i < 0 ) {
+ if( i < 0 ) {
if ( strstr( ns , ".system." ) == 0 )
log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
}
@@ -1055,7 +1064,7 @@ namespace mongo {
}
}
}
-
+
if ( ! (explain || pq.showDiskLoc()) && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) {
bool nsFound = false;
bool indexFound = false;
@@ -1063,12 +1072,12 @@ namespace mongo {
BSONObj resObject;
Client& c = cc();
bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
- if ( nsFound == false || indexFound == true ){
+ if ( nsFound == false || indexFound == true ) {
BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32);
bb.skip(sizeof(QueryResult));
-
+
ss << " idhack ";
- if ( found ){
+ if ( found ) {
n = 1;
fillQueryResultFromObj( bb , pq.getFields() , resObject );
}
@@ -1081,16 +1090,16 @@ namespace mongo {
qr->setOperation(opReply);
qr->cursorId = 0;
qr->startingFrom = 0;
- qr->nReturned = n;
+ qr->nReturned = n;
result.setData( qr.release(), true );
return false;
- }
+ }
}
-
+
// regular, not QO bypass query
-
+
BSONObj oldPlan;
- if ( explain && ! pq.hasIndexSpecifier() ){
+ if ( explain && ! pq.hasIndexSpecifier() ) {
MultiPlanScanner mps( ns, query, order );
if ( mps.usingPrerecordedPlan() )
oldPlan = mps.oldExplain();
@@ -1128,7 +1137,8 @@ namespace mongo {
// this MultiCursor will use a dumb NoOp to advance(), so no need to specify mayYield
shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher(), dqo ) );
cc = new ClientCursor(queryOptions, multi, ns, jsobj.getOwned());
- } else {
+ }
+ else {
cursor->setMatcher( dqo.matcher() );
cc = new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned() );
}
@@ -1169,6 +1179,6 @@ namespace mongo {
}
ss << " nreturned:" << n;
return exhaust;
- }
-
+ }
+
} // namespace mongo
diff --git a/db/query.h b/db/query.h
index 6a66ec2dc65..5de7cedbb38 100644
--- a/db/query.h
+++ b/db/query.h
@@ -38,29 +38,29 @@
a series of JSObjects
dbDelete:
string collection;
- int flags=0; // 1=DeleteSingle
+ int flags=0; // 1=DeleteSingle
JSObject query;
dbUpdate:
string collection;
- int flags; // 1=upsert
+ int flags; // 1=upsert
JSObject query;
- JSObject objectToUpdate;
+ JSObject objectToUpdate;
objectToUpdate may include { $inc: <field> } or { $set: ... }, see struct Mod.
dbQuery:
string collection;
- int nToSkip;
- int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
+ int nToSkip;
+ int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
// greater than zero is simply a hint on how many objects to send back per "cursor batch".
// a negative number indicates a hard limit.
JSObject query;
- [JSObject fieldsToReturn]
+ [JSObject fieldsToReturn]
dbGetMore:
- string collection; // redundant, might use for security.
+ string collection; // redundant, might use for security.
int nToReturn;
int64 cursorID;
dbKillCursors=2007:
int n;
- int64 cursorIDs[n];
+ int64 cursorIDs[n];
Note that on Update, there is only one object, which is different
from insert where you can pass a list of objects to insert in the db.
@@ -78,7 +78,7 @@ namespace mongo {
struct GetMoreWaitException { };
QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& op, int pass, bool& exhaust);
-
+
struct UpdateResult {
bool existing; // if existing objects were modified
bool mod; // was this a $ mod
@@ -86,25 +86,25 @@ namespace mongo {
OID upserted; // if something was upserted, the new _id of the object
UpdateResult( bool e, bool m, unsigned long long n , const BSONObj& upsertedObject = BSONObj() )
- : existing(e) , mod(m), num(n){
+ : existing(e) , mod(m), num(n) {
upserted.clear();
BSONElement id = upsertedObject["_id"];
- if ( ! e && n == 1 && id.type() == jstOID ){
+ if ( ! e && n == 1 && id.type() == jstOID ) {
upserted = id.OID();
}
}
-
+
};
class RemoveSaver;
-
+
/* returns true if an existing object was updated, false if no existing object was found.
multi - update multiple objects - mostly useful with things like $set
god - allow access to system namespaces
*/
UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
- UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj pattern,
+ UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj pattern,
bool upsert, bool multi , bool logop , OpDebug& debug , RemoveSaver * rs = 0 );
// If justOne is true, deletedId is set to the id of the deleted object.
@@ -113,7 +113,7 @@ namespace mongo {
long long runCount(const char *ns, const BSONObj& cmd, string& err);
const char * runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result);
-
+
/* This is for languages whose "objects" are not well ordered (JSON is well ordered).
[ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
*/
@@ -145,17 +145,17 @@ namespace mongo {
class ParsedQuery {
public:
ParsedQuery( QueryMessage& qm )
- : _ns( qm.ns ) , _ntoskip( qm.ntoskip ) , _ntoreturn( qm.ntoreturn ) , _options( qm.queryOptions ){
+ : _ns( qm.ns ) , _ntoskip( qm.ntoskip ) , _ntoreturn( qm.ntoreturn ) , _options( qm.queryOptions ) {
init( qm.query );
initFields( qm.fields );
}
ParsedQuery( const char* ns , int ntoskip , int ntoreturn , int queryoptions , const BSONObj& query , const BSONObj& fields )
- : _ns( ns ) , _ntoskip( ntoskip ) , _ntoreturn( ntoreturn ) , _options( queryoptions ){
+ : _ns( ns ) , _ntoskip( ntoskip ) , _ntoreturn( ntoreturn ) , _options( queryoptions ) {
init( query );
initFields( fields );
}
-
- ~ParsedQuery(){}
+
+ ~ParsedQuery() {}
const char * ns() const { return _ns; }
bool isLocalDB() const { return strncmp(_ns, "local.", 6) == 0; }
@@ -170,7 +170,7 @@ namespace mongo {
int getOptions() const { return _options; }
bool hasOption( int x ) const { return x & _options; }
-
+
bool isExplain() const { return _explain; }
bool isSnapshot() const { return _snapshot; }
bool returnKey() const { return _returnKey; }
@@ -181,7 +181,7 @@ namespace mongo {
const BSONObj& getOrder() const { return _order; }
const BSONElement& getHint() const { return _hint; }
int getMaxScan() const { return _maxScan; }
-
+
bool couldBeCommand() const {
/* we assume you are using findOne() for running a cmd... */
return _ntoreturn == 1 && strstr( _ns , ".$cmd" );
@@ -194,7 +194,7 @@ namespace mongo {
/* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
is only a size limit. The idea is that on a find() where one doesn't use much results,
we don't return much, but once getmore kicks in, we start pushing significant quantities.
-
+
The n limit (vs. size) is important when someone fetches only one small field from big
objects, which causes massive scanning server-side.
*/
@@ -209,14 +209,14 @@ namespace mongo {
return false;
return n >= _ntoreturn;
}
-
+
private:
- void init( const BSONObj& q ){
+ void init( const BSONObj& q ) {
_reset();
uassert( 10105 , "bad skip value in query", _ntoskip >= 0);
-
- if ( _ntoreturn < 0 ){
- /* _ntoreturn greater than zero is simply a hint on how many objects to send back per
+
+ if ( _ntoreturn < 0 ) {
+ /* _ntoreturn greater than zero is simply a hint on how many objects to send back per
"cursor batch".
A negative number indicates a hard limit.
*/
@@ -224,12 +224,12 @@ namespace mongo {
_ntoreturn = -_ntoreturn;
}
-
+
BSONElement e = q["query"];
if ( ! e.isABSONObj() )
e = q["$query"];
-
- if ( e.isABSONObj() ){
+
+ if ( e.isABSONObj() ) {
_filter = e.embeddedObject();
_initTop( q );
}
@@ -238,7 +238,7 @@ namespace mongo {
}
}
- void _reset(){
+ void _reset() {
_wantMore = true;
_explain = false;
_snapshot = false;
@@ -247,19 +247,21 @@ namespace mongo {
_maxScan = 0;
}
- void _initTop( const BSONObj& top ){
+ void _initTop( const BSONObj& top ) {
BSONObjIterator i( top );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
const char * name = e.fieldName();
if ( strcmp( "$orderby" , name ) == 0 ||
- strcmp( "orderby" , name ) == 0 ){
+ strcmp( "orderby" , name ) == 0 ) {
if ( e.type() == Object ) {
_order = e.embeddedObject();
- } else if ( e.type() == Array ) {
+ }
+ else if ( e.type() == Array ) {
_order = transformOrderFromArrayFormat( _order );
- } else {
+ }
+ else {
uassert(13513, "sort must be an object or array", 0);
}
}
@@ -279,25 +281,25 @@ namespace mongo {
_maxScan = e.numberInt();
else if ( strcmp( "$showDiskLoc" , name ) == 0 )
_showDiskLoc = e.trueValue();
-
+
}
- if ( _snapshot ){
+ if ( _snapshot ) {
uassert( 12001 , "E12001 can't sort with $snapshot", _order.isEmpty() );
uassert( 12002 , "E12002 can't use hint with $snapshot", _hint.eoo() );
}
-
+
}
- void initFields( const BSONObj& fields ){
+ void initFields( const BSONObj& fields ) {
if ( fields.isEmpty() )
return;
_fields.reset( new Projection() );
_fields->init( fields );
}
- ParsedQuery( const ParsedQuery& other ){
+ ParsedQuery( const ParsedQuery& other ) {
assert(0);
}
@@ -305,10 +307,10 @@ namespace mongo {
int _ntoskip;
int _ntoreturn;
int _options;
-
+
BSONObj _filter;
shared_ptr< Projection > _fields;
-
+
bool _wantMore;
bool _explain;
@@ -321,7 +323,7 @@ namespace mongo {
BSONObj _order;
int _maxScan;
};
-
+
} // namespace mongo
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index fc78e202621..0b9dce766fc 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -31,14 +31,14 @@
namespace mongo {
- void checkTableScanAllowed( const char * ns ){
+ void checkTableScanAllowed( const char * ns ) {
if ( ! cmdLine.noTableScan )
return;
-
+
if ( strstr( ns , ".system." ) ||
- strstr( ns , "local." ) )
+ strstr( ns , "local." ) )
return;
-
+
if ( ! nsdetails( ns ) )
return;
@@ -50,8 +50,8 @@ namespace mongo {
return e.number();
return 1;
}
-
- QueryPlan::QueryPlan(
+
+ QueryPlan::QueryPlan(
NamespaceDetails *d, int idxNo,
const FieldRangeSet &fbs, const FieldRangeSet &originalFrs, const BSONObj &originalQuery, const BSONObj &order, const BSONObj &startKey, const BSONObj &endKey , string special ) :
_d(d), _idxNo(idxNo),
@@ -67,8 +67,8 @@ namespace mongo {
_unhelpful( false ),
_special( special ),
_type(0),
- _startOrEndSpec( !startKey.isEmpty() || !endKey.isEmpty() ){
-
+ _startOrEndSpec( !startKey.isEmpty() || !endKey.isEmpty() ) {
+
if ( !_fbs.matchPossible() ) {
_unhelpful = true;
_scanAndOrderRequired = false;
@@ -77,7 +77,7 @@ namespace mongo {
if( _idxNo >= 0 ) {
_index = &d->idx(_idxNo);
- }
+ }
else {
// full table scan case
if ( _order.isEmpty() || !strcmp( _order.firstElement().fieldName(), "$natural" ) )
@@ -85,7 +85,7 @@ namespace mongo {
return;
}
- if ( _special.size() ){
+ if ( _special.size() ) {
_optimal = true;
_type = _index->getSpec().getType();
massert( 13040 , (string)"no type for special: " + _special , _type );
@@ -123,7 +123,7 @@ namespace mongo {
else if ( _direction != d )
break;
}
- doneCheckOrder:
+doneCheckOrder:
if ( _scanAndOrderRequired )
_direction = 0;
BSONObjIterator i( idxKey );
@@ -142,7 +142,8 @@ namespace mongo {
++optimalIndexedQueryCount;
if ( !fb.equality() )
stillOptimalIndexedQueryCount = false;
- } else {
+ }
+ else {
if ( fb.nontrivial() )
optimalIndexedQueryCount = -1;
}
@@ -154,12 +155,12 @@ namespace mongo {
orderFieldsUnindexed.erase( e.fieldName() );
}
if ( !_scanAndOrderRequired &&
- ( optimalIndexedQueryCount == fbs.nNontrivialRanges() ) )
+ ( optimalIndexedQueryCount == fbs.nNontrivialRanges() ) )
_optimal = true;
if ( exactIndexedQueryCount == fbs.nNontrivialRanges() &&
- orderFieldsUnindexed.size() == 0 &&
- exactIndexedQueryCount == _index->keyPattern().nFields() &&
- exactIndexedQueryCount == _originalQuery.nFields() ) {
+ orderFieldsUnindexed.size() == 0 &&
+ exactIndexedQueryCount == _index->keyPattern().nFields() &&
+ exactIndexedQueryCount == _originalQuery.nFields() ) {
_exactKeyMatch = true;
}
_frv.reset( new FieldRangeVector( fbs, idxKey, _direction ) );
@@ -177,41 +178,43 @@ namespace mongo {
}
if ( ( _scanAndOrderRequired || _order.isEmpty() ) &&
- !fbs.range( idxKey.firstElement().fieldName() ).nontrivial() ) {
+ !fbs.range( idxKey.firstElement().fieldName() ).nontrivial() ) {
_unhelpful = true;
}
}
-
+
shared_ptr<Cursor> QueryPlan::newCursor( const DiskLoc &startLoc , int numWanted ) const {
if ( _type ) {
- // hopefully safe to use original query in these contexts - don't think we can mix type with $or clause separation yet
+ // hopefully safe to use original query in these contexts - don't think we can mix type with $or clause separation yet
return _type->newCursor( _originalQuery , _order , numWanted );
}
-
- if ( !_fbs.matchPossible() ){
+
+ if ( !_fbs.matchPossible() ) {
if ( _fbs.nNontrivialRanges() )
checkTableScanAllowed( _fbs.ns() );
return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) );
}
- if ( !_index ){
+ if ( !_index ) {
if ( _fbs.nNontrivialRanges() )
checkTableScanAllowed( _fbs.ns() );
return findTableScan( _fbs.ns(), _order, startLoc );
}
massert( 10363 , "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
-
+
if ( _startOrEndSpec ) {
// we are sure to spec _endKeyInclusive
return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _startKey, _endKey, _endKeyInclusive, _direction >= 0 ? 1 : -1 ) );
- } else if ( _index->getSpec().getType() ) {
- return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _frv->startKey(), _frv->endKey(), true, _direction >= 0 ? 1 : -1 ) );
- } else {
+ }
+ else if ( _index->getSpec().getType() ) {
+ return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _frv->startKey(), _frv->endKey(), true, _direction >= 0 ? 1 : -1 ) );
+ }
+ else {
return shared_ptr<Cursor>( new BtreeCursor( _d, _idxNo, *_index, _frv, _direction >= 0 ? 1 : -1 ) );
}
}
-
+
shared_ptr<Cursor> QueryPlan::newReverseCursor() const {
if ( !_fbs.matchPossible() )
return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) );
@@ -224,48 +227,48 @@ namespace mongo {
massert( 10364 , "newReverseCursor() not implemented for indexed plans", false );
return shared_ptr<Cursor>();
}
-
+
BSONObj QueryPlan::indexKey() const {
if ( !_index )
return BSON( "$natural" << 1 );
return _index->keyPattern();
}
-
+
void QueryPlan::registerSelf( long long nScanned ) const {
if ( _fbs.matchPossible() ) {
scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
- NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( _fbs.pattern( _order ), indexKey(), nScanned );
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( _fbs.pattern( _order ), indexKey(), nScanned );
}
}
-
- bool QueryPlan::isMultiKey() const {
+
+ bool QueryPlan::isMultiKey() const {
if ( _idxNo < 0 )
return false;
- return _d->isMultikey( _idxNo );
+ return _d->isMultikey( _idxNo );
}
QueryPlanSet::QueryPlanSet( const char *ns, auto_ptr< FieldRangeSet > frs, auto_ptr< FieldRangeSet > originalFrs, const BSONObj &originalQuery, const BSONObj &order, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max, bool bestGuessOnly, bool mayYield ) :
- _ns(ns),
- _originalQuery( originalQuery ),
- _fbs( frs ),
- _originalFrs( originalFrs ),
- _mayRecordPlan( true ),
- _usingPrerecordedPlan( false ),
- _hint( BSONObj() ),
- _order( order.getOwned() ),
- _oldNScanned( 0 ),
- _honorRecordedPlan( honorRecordedPlan ),
- _min( min.getOwned() ),
- _max( max.getOwned() ),
- _bestGuessOnly( bestGuessOnly ),
- _mayYield( mayYield ),
- _yieldSometimesTracker( 256, 20 ){
+ _ns(ns),
+ _originalQuery( originalQuery ),
+ _fbs( frs ),
+ _originalFrs( originalFrs ),
+ _mayRecordPlan( true ),
+ _usingPrerecordedPlan( false ),
+ _hint( BSONObj() ),
+ _order( order.getOwned() ),
+ _oldNScanned( 0 ),
+ _honorRecordedPlan( honorRecordedPlan ),
+ _min( min.getOwned() ),
+ _max( max.getOwned() ),
+ _bestGuessOnly( bestGuessOnly ),
+ _mayYield( mayYield ),
+ _yieldSometimesTracker( 256, 20 ) {
if ( hint && !hint->eoo() ) {
_hint = hint->wrap();
}
init();
}
-
+
bool QueryPlanSet::modifiedKeys() const {
for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i )
if ( (*i)->isMultiKey() )
@@ -291,7 +294,7 @@ namespace mongo {
NamespaceDetails *d = nsdetails(_ns);
_plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(id), *_fbs, *_originalFrs, _originalQuery, _order, _min, _max ) ) );
}
-
+
// returns an IndexDetails * for a hint, 0 if hint is $natural.
// hint must not be eoo()
IndexDetails *parseHint( const BSONElement &hint, NamespaceDetails *d ) {
@@ -306,7 +309,7 @@ namespace mongo {
}
}
}
- else if( hint.type() == Object ) {
+ else if( hint.type() == Object ) {
BSONObj hintobj = hint.embeddedObject();
uassert( 10112 , "bad hint", !hintobj.isEmpty() );
if ( !strcmp( hintobj.firstElement().fieldName(), "$natural" ) ) {
@@ -319,17 +322,17 @@ namespace mongo {
return &ii;
}
}
- }
+ }
uassert( 10113 , "bad hint", false );
return 0;
}
-
+
void QueryPlanSet::init() {
DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
_plans.clear();
_mayRecordPlan = true;
_usingPrerecordedPlan = false;
-
+
const char *ns = _fbs->ns();
NamespaceDetails *d = nsdetails( ns );
if ( !d || !_fbs->matchPossible() ) {
@@ -337,21 +340,22 @@ namespace mongo {
_plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
return;
}
-
+
BSONElement hint = _hint.firstElement();
if ( !hint.eoo() ) {
_mayRecordPlan = false;
IndexDetails *id = parseHint( hint, d );
if ( id ) {
addHint( *id );
- } else {
+ }
+ else {
massert( 10366 , "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
// Table scan plan
- _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
}
return;
}
-
+
if ( !_min.isEmpty() || !_max.isEmpty() ) {
string errmsg;
BSONObj keyPattern;
@@ -361,9 +365,9 @@ namespace mongo {
return;
}
- if ( isSimpleIdQuery( _originalQuery ) ){
+ if ( isSimpleIdQuery( _originalQuery ) ) {
int idx = d->findIdIndex();
- if ( idx >= 0 ){
+ if ( idx >= 0 ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
_plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_fbs , *_fbs , _originalQuery, _order ) ) );
@@ -371,24 +375,24 @@ namespace mongo {
}
}
- if ( _originalQuery.isEmpty() && _order.isEmpty() ){
+ if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
_plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ) );
return;
}
DEBUGQO( "\t special : " << _fbs->getSpecial() );
- if ( _fbs->getSpecial().size() ){
+ if ( _fbs->getSpecial().size() ) {
_special = _fbs->getSpecial();
NamespaceDetails::IndexIterator i = d->ii();
while( i.more() ) {
int j = i.pos();
IndexDetails& ii = i.next();
const IndexSpec& spec = ii.getSpec();
- if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ){
+ if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
_usingPrerecordedPlan = true;
_mayRecordPlan = false;
- _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_fbs , *_fbs , _originalQuery, _order ,
- BSONObj() , BSONObj() , _special ) ) );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_fbs , *_fbs , _originalQuery, _order ,
+ BSONObj() , BSONObj() , _special ) ) );
return;
}
}
@@ -425,10 +429,10 @@ namespace mongo {
}
}
}
-
+
addOtherPlans( false );
}
-
+
void QueryPlanSet::addOtherPlans( bool checkFirst ) {
const char *ns = _fbs->ns();
NamespaceDetails *d = nsdetails( ns );
@@ -437,12 +441,12 @@ namespace mongo {
// If table scan is optimal or natural order requested or tailable cursor requested
if ( !_fbs->matchPossible() || ( _fbs->nNontrivialRanges() == 0 && _order.isEmpty() ) ||
- ( !_order.isEmpty() && !strcmp( _order.firstElement().fieldName(), "$natural" ) ) ) {
+ ( !_order.isEmpty() && !strcmp( _order.firstElement().fieldName(), "$natural" ) ) ) {
// Table scan plan
addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ), checkFirst );
return;
}
-
+
bool normalQuery = _hint.isEmpty() && _min.isEmpty() && _max.isEmpty();
PlanSet plans;
@@ -450,7 +454,7 @@ namespace mongo {
IndexDetails& id = d->idx(i);
const IndexSpec& spec = id.getSpec();
IndexSuitability suitability = HELPFUL;
- if ( normalQuery ){
+ if ( normalQuery ) {
suitability = spec.suitability( _fbs->simplifiedQuery() , _order );
if ( suitability == USELESS )
continue;
@@ -460,7 +464,8 @@ namespace mongo {
if ( p->optimal() ) {
addPlan( p, checkFirst );
return;
- } else if ( !p->unhelpful() ) {
+ }
+ else if ( !p->unhelpful() ) {
plans.push_back( p );
}
}
@@ -470,7 +475,7 @@ namespace mongo {
// Table scan plan
addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_fbs, *_originalFrs, _originalQuery, _order ) ), checkFirst );
}
-
+
shared_ptr< QueryOp > QueryPlanSet::runOp( QueryOp &op ) {
if ( _usingPrerecordedPlan ) {
Runner r( *this, op );
@@ -487,7 +492,7 @@ namespace mongo {
Runner r( *this, op );
return r.run();
}
-
+
BSONObj QueryPlanSet::explain() const {
vector< BSONObj > arr;
for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i ) {
@@ -504,18 +509,18 @@ namespace mongo {
QueryPlanSet::QueryPlanPtr QueryPlanSet::getBestGuess() const {
assert( _plans.size() );
- if ( _plans[ 0 ]->scanAndOrderRequired() ){
- for ( unsigned i=1; i<_plans.size(); i++ ){
+ if ( _plans[ 0 ]->scanAndOrderRequired() ) {
+ for ( unsigned i=1; i<_plans.size(); i++ ) {
if ( ! _plans[i]->scanAndOrderRequired() )
return _plans[i];
}
-
+
stringstream ss;
ss << "best guess plan requested, but scan and order required:";
ss << " query: " << _fbs->simplifiedQuery();
ss << " order: " << _order;
ss << " choices: ";
- for ( unsigned i=0; i<_plans.size(); i++ ){
+ for ( unsigned i=0; i<_plans.size(); i++ ) {
ss << _plans[i]->indexKey() << " ";
}
@@ -524,12 +529,12 @@ namespace mongo {
}
return _plans[0];
}
-
+
QueryPlanSet::Runner::Runner( QueryPlanSet &plans, QueryOp &op ) :
_op( op ),
_plans( plans ) {
}
-
+
void QueryPlanSet::Runner::mayYield( const vector< shared_ptr< QueryOp > > &ops ) {
if ( _plans._mayYield ) {
if ( _plans._yieldSometimesTracker.ping() ) {
@@ -543,12 +548,12 @@ namespace mongo {
ClientCursor::staticYield( micros , _plans._ns );
for( vector< shared_ptr< QueryOp > >::const_iterator i = ops.begin(); i != ops.end(); ++i ) {
recoverFromYield( **i );
- }
+ }
}
}
- }
+ }
}
-
+
struct OpHolder {
OpHolder( const shared_ptr< QueryOp > &op ) : _op( op ), _offset() {}
shared_ptr< QueryOp > _op;
@@ -557,18 +562,19 @@ namespace mongo {
return _op->nscanned() + _offset > other._op->nscanned() + other._offset;
}
};
-
+
shared_ptr< QueryOp > QueryPlanSet::Runner::run() {
massert( 10369 , "no plans", _plans._plans.size() > 0 );
-
+
vector< shared_ptr< QueryOp > > ops;
if ( _plans._bestGuessOnly ) {
shared_ptr< QueryOp > op( _op.createChild() );
op->setQueryPlan( _plans.getBestGuess().get() );
- ops.push_back( op );
- } else {
+ ops.push_back( op );
+ }
+ else {
if ( _plans._plans.size() > 1 )
- log(1) << " running multiple plans" << endl;
+ log(1) << " running multiple plans" << endl;
for( PlanSet::iterator i = _plans._plans.begin(); i != _plans._plans.end(); ++i ) {
shared_ptr< QueryOp > op( _op.createChild() );
op->setQueryPlan( i->get() );
@@ -581,14 +587,14 @@ namespace mongo {
if ( (*i)->complete() )
return *i;
}
-
+
std::priority_queue< OpHolder > queue;
for( vector< shared_ptr< QueryOp > >::iterator i = ops.begin(); i != ops.end(); ++i ) {
if ( !(*i)->error() ) {
queue.push( *i );
}
}
-
+
while( !queue.empty() ) {
mayYield( ops );
OpHolder holder = queue.top();
@@ -618,14 +624,14 @@ namespace mongo {
if ( op->complete() )
return op;
queue.push( op );
- }
+ }
_plans._mayRecordPlan = true;
_plans._usingPrerecordedPlan = false;
- }
+ }
}
return ops[ 0 ];
}
-
+
#define GUARD_OP_EXCEPTION( op, expression ) \
try { \
expression; \
@@ -639,8 +645,8 @@ namespace mongo {
catch ( ... ) { \
op.setException( ExceptionInfo( "Caught unknown exception" , 0 ) ); \
}
-
-
+
+
void QueryPlanSet::Runner::initOp( QueryOp &op ) {
GUARD_OP_EXCEPTION( op, op.init() );
}
@@ -651,39 +657,39 @@ namespace mongo {
bool QueryPlanSet::Runner::prepareToYield( QueryOp &op ) {
GUARD_OP_EXCEPTION( op,
- if ( op.error() ) {
- return true;
- } else {
- return op.prepareToYield();
- } );
+ if ( op.error() ) {
+ return true;
+ }
+ else {
+ return op.prepareToYield();
+ } );
return true;
}
void QueryPlanSet::Runner::recoverFromYield( QueryOp &op ) {
GUARD_OP_EXCEPTION( op, if ( !op.error() ) { op.recoverFromYield(); } );
}
-
-
+
+
MultiPlanScanner::MultiPlanScanner( const char *ns,
- const BSONObj &query,
- const BSONObj &order,
- const BSONElement *hint,
- bool honorRecordedPlan,
- const BSONObj &min,
- const BSONObj &max,
- bool bestGuessOnly,
- bool mayYield ) :
- _ns( ns ),
- _or( !query.getField( "$or" ).eoo() ),
- _query( query.getOwned() ),
- _fros( ns, _query ),
- _i(),
- _honorRecordedPlan( honorRecordedPlan ),
- _bestGuessOnly( bestGuessOnly ),
- _hint( ( hint && !hint->eoo() ) ? hint->wrap() : BSONObj() ),
- _mayYield( mayYield ),
- _tableScanned()
- {
+ const BSONObj &query,
+ const BSONObj &order,
+ const BSONElement *hint,
+ bool honorRecordedPlan,
+ const BSONObj &min,
+ const BSONObj &max,
+ bool bestGuessOnly,
+ bool mayYield ) :
+ _ns( ns ),
+ _or( !query.getField( "$or" ).eoo() ),
+ _query( query.getOwned() ),
+ _fros( ns, _query ),
+ _i(),
+ _honorRecordedPlan( honorRecordedPlan ),
+ _bestGuessOnly( bestGuessOnly ),
+ _hint( ( hint && !hint->eoo() ) ? hint->wrap() : BSONObj() ),
+ _mayYield( mayYield ),
+ _tableScanned() {
if ( !order.isEmpty() || !min.isEmpty() || !max.isEmpty() || !_fros.getSpecial().empty() ) {
_or = false;
}
@@ -695,7 +701,8 @@ namespace mongo {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns, _query ) );
auto_ptr< FieldRangeSet > oldFrs( new FieldRangeSet( *frs ) );
_currentQps.reset( new QueryPlanSet( ns, frs, oldFrs, _query, order, hint, honorRecordedPlan, min, max, _bestGuessOnly, _mayYield ) );
- } else {
+ }
+ else {
BSONElement e = _query.getField( "$or" );
massert( 13268, "invalid $or spec", e.type() == Array && e.embeddedObject().nFields() > 0 );
}
@@ -719,7 +726,7 @@ namespace mongo {
_fros.popOrClause( ret->qp().indexed() ? ret->qp().indexKey() : BSONObj() );
return ret;
}
-
+
shared_ptr< QueryOp > MultiPlanScanner::runOp( QueryOp &op ) {
shared_ptr< QueryOp > ret = runOpOnce( op );
while( !ret->stopRequested() && mayRunMore() ) {
@@ -727,7 +734,7 @@ namespace mongo {
}
return ret;
}
-
+
bool MultiPlanScanner::uselessOr( const BSONElement &hint ) const {
NamespaceDetails *nsd = nsdetails( _ns );
if ( !nsd ) {
@@ -747,7 +754,8 @@ namespace mongo {
if ( id->getSpec().suitability( *i, BSONObj() ) == USELESS ) {
return true;
}
- } else {
+ }
+ else {
bool useful = false;
NamespaceDetails::IndexIterator j = nsd->ii();
while( j.more() ) {
@@ -759,12 +767,12 @@ namespace mongo {
}
if ( !useful ) {
return true;
- }
+ }
}
}
return false;
}
-
+
bool indexWorks( const BSONObj &idxPattern, const BSONObj &sampleKey, int direction, int firstSignificantField ) {
BSONObjIterator p( idxPattern );
BSONObjIterator k( sampleKey );
@@ -795,19 +803,19 @@ namespace mongo {
int idxDirection = e.number() >= 0 ? 1 : -1;
int direction = idxDirection * baseDirection;
switch( direction ) {
- case 1:
- b.appendMaxKey( e.fieldName() );
- break;
- case -1:
- b.appendMinKey( e.fieldName() );
- break;
- default:
- assert( false );
+ case 1:
+ b.appendMaxKey( e.fieldName() );
+ break;
+ case -1:
+ b.appendMinKey( e.fieldName() );
+ break;
+ default:
+ assert( false );
}
}
- return b.obj();
+ return b.obj();
}
-
+
pair< int, int > keyAudit( const BSONObj &min, const BSONObj &max ) {
int direction = 0;
int firstSignificantField = 0;
@@ -836,18 +844,19 @@ namespace mongo {
pair< int, int > flexibleKeyAudit( const BSONObj &min, const BSONObj &max ) {
if ( min.isEmpty() || max.isEmpty() ) {
return make_pair( 1, -1 );
- } else {
+ }
+ else {
return keyAudit( min, max );
}
}
-
+
// NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
if ( min.isEmpty() && max.isEmpty() ) {
errmsg = "one of min or max must be specified";
return 0;
}
-
+
Client::Context ctx( ns );
IndexDetails *id = 0;
NamespaceDetails *d = nsdetails( ns );
@@ -855,7 +864,7 @@ namespace mongo {
errmsg = "ns not found";
return 0;
}
-
+
pair< int, int > ret = flexibleKeyAudit( min, max );
if ( ret == make_pair( -1, -1 ) ) {
errmsg = "min and max keys do not share pattern";
@@ -866,15 +875,16 @@ namespace mongo {
while( i.more() ) {
IndexDetails& ii = i.next();
if ( indexWorks( ii.keyPattern(), min.isEmpty() ? max : min, ret.first, ret.second ) ) {
- if ( ii.getSpec().getType() == 0 ){
+ if ( ii.getSpec().getType() == 0 ) {
id = &ii;
keyPattern = ii.keyPattern();
break;
}
}
}
-
- } else {
+
+ }
+ else {
if ( !indexWorks( keyPattern, min.isEmpty() ? max : min, ret.first, ret.second ) ) {
errmsg = "requested keyPattern does not match specified keys";
return 0;
@@ -887,30 +897,31 @@ namespace mongo {
break;
}
if ( keyPattern.nFields() == 1 && ii.keyPattern().nFields() == 1 &&
- IndexDetails::isIdIndexPattern( keyPattern ) &&
- ii.isIdIndex() ){
+ IndexDetails::isIdIndexPattern( keyPattern ) &&
+ ii.isIdIndex() ) {
id = &ii;
break;
}
-
+
}
}
if ( min.isEmpty() ) {
min = extremeKeyForIndex( keyPattern, -1 );
- } else if ( max.isEmpty() ) {
+ }
+ else if ( max.isEmpty() ) {
max = extremeKeyForIndex( keyPattern, 1 );
}
-
+
if ( !id ) {
errmsg = (string)"no index found for specified keyPattern: " + keyPattern.toString();
return 0;
}
-
+
min = min.extractFieldsUnDotted( keyPattern );
max = max.extractFieldsUnDotted( keyPattern );
return id;
}
-
+
} // namespace mongo
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index 9a5e6667496..cf3180a5bc8 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -25,14 +25,14 @@
#include "../util/message.h"
namespace mongo {
-
+
class IndexDetails;
class IndexType;
class QueryPlan : boost::noncopyable {
public:
- QueryPlan(NamespaceDetails *d,
+ QueryPlan(NamespaceDetails *d,
int idxNo, // -1 = no index
const FieldRangeSet &fbs,
const FieldRangeSet &originalFrs,
@@ -50,7 +50,7 @@ namespace mongo {
query expression to match by itself without ever checking the main object.
*/
bool exactKeyMatch() const { return _exactKeyMatch; }
- /* If true, the startKey and endKey are unhelpful and the index order doesn't match the
+ /* If true, the startKey and endKey are unhelpful and the index order doesn't match the
requested sort order */
bool unhelpful() const { return _unhelpful; }
int direction() const { return _direction; }
@@ -101,16 +101,17 @@ namespace mongo {
// Used when handing off from one QueryOp type to another
QueryOp( const QueryOp &other ) :
- _complete(), _stopRequested(), _qp(), _error(), _matcher( other._matcher ),
- _orConstraint( other._orConstraint ) {}
-
+ _complete(), _stopRequested(), _qp(), _error(), _matcher( other._matcher ),
+ _orConstraint( other._orConstraint ) {}
+
virtual ~QueryOp() {}
-
+
/** these gets called after a query plan is set */
- void init() {
+ void init() {
if ( _oldMatcher.get() ) {
_matcher.reset( _oldMatcher->nextClauseMatcher( qp().indexKey() ) );
- } else {
+ }
+ else {
_matcher.reset( new CoveredIndexMatcher( qp().originalQuery(), qp().indexKey(), alwaysUseRecord() ) );
}
_init();
@@ -118,12 +119,12 @@ namespace mongo {
virtual void next() = 0;
virtual bool mayRecordPlan() const = 0;
-
+
virtual bool prepareToYield() { massert( 13335, "yield not supported", false ); return false; }
virtual void recoverFromYield() { massert( 13336, "yield not supported", false ); }
-
+
virtual long long nscanned() = 0;
-
+
/** @return a copy of the inheriting class, which will be run with its own
query plan. If multiple plan sets are required for an $or query,
the QueryOp of the winning plan from a given set will be cloned
@@ -159,11 +160,11 @@ namespace mongo {
void setStop() { setComplete(); _stopRequested = true; }
virtual void _init() = 0;
-
+
virtual QueryOp *_createChild() const = 0;
-
+
virtual bool alwaysUseRecord() const { return false; }
-
+
private:
bool _complete;
bool _stopRequested;
@@ -174,7 +175,7 @@ namespace mongo {
shared_ptr< CoveredIndexMatcher > _oldMatcher;
shared_ptr< FieldRangeVector > _orConstraint;
};
-
+
// Set of candidate query plans for a particular query. Used for running
// a QueryOp on these plans.
class QueryPlanSet {
@@ -184,16 +185,16 @@ namespace mongo {
typedef vector< QueryPlanPtr > PlanSet;
QueryPlanSet( const char *ns,
- auto_ptr< FieldRangeSet > frs,
- auto_ptr< FieldRangeSet > originalFrs,
- const BSONObj &originalQuery,
- const BSONObj &order,
- const BSONElement *hint = 0,
- bool honorRecordedPlan = true,
- const BSONObj &min = BSONObj(),
- const BSONObj &max = BSONObj(),
- bool bestGuessOnly = false,
- bool mayYield = false);
+ auto_ptr< FieldRangeSet > frs,
+ auto_ptr< FieldRangeSet > originalFrs,
+ const BSONObj &originalQuery,
+ const BSONObj &order,
+ const BSONElement *hint = 0,
+ bool honorRecordedPlan = true,
+ const BSONObj &min = BSONObj(),
+ const BSONObj &max = BSONObj(),
+ bool bestGuessOnly = false,
+ bool mayYield = false);
int nPlans() const { return _plans.size(); }
shared_ptr< QueryOp > runOp( QueryOp &op );
template< class T >
@@ -275,24 +276,24 @@ namespace mongo {
class MultiPlanScanner {
public:
MultiPlanScanner( const char *ns,
- const BSONObj &query,
- const BSONObj &order,
- const BSONElement *hint = 0,
- bool honorRecordedPlan = true,
- const BSONObj &min = BSONObj(),
- const BSONObj &max = BSONObj(),
- bool bestGuessOnly = false,
- bool mayYield = false);
+ const BSONObj &query,
+ const BSONObj &order,
+ const BSONElement *hint = 0,
+ bool honorRecordedPlan = true,
+ const BSONObj &min = BSONObj(),
+ const BSONObj &max = BSONObj(),
+ bool bestGuessOnly = false,
+ bool mayYield = false);
shared_ptr< QueryOp > runOp( QueryOp &op );
template< class T >
shared_ptr< T > runOp( T &op ) {
return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp& >( op ) ) );
- }
+ }
shared_ptr< QueryOp > runOpOnce( QueryOp &op );
template< class T >
shared_ptr< T > runOpOnce( T &op ) {
return dynamic_pointer_cast< T >( runOpOnce( static_cast< QueryOp& >( op ) ) );
- }
+ }
bool mayRunMore() const { return _or ? ( !_tableScanned && !_fros.orFinished() ) : _i == 0; }
BSONObj oldExplain() const { assertNotOr(); return _currentQps->explain(); }
// just report this when only one query op
@@ -321,21 +322,22 @@ namespace mongo {
bool _mayYield;
bool _tableScanned;
};
-
+
class MultiCursor : public Cursor {
public:
class CursorOp : public QueryOp {
public:
CursorOp() {}
CursorOp( const QueryOp &other ) : QueryOp( other ) {}
- virtual shared_ptr< Cursor > newCursor() const = 0;
+ virtual shared_ptr< Cursor > newCursor() const = 0;
};
// takes ownership of 'op'
MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr< CursorOp > op = shared_ptr< CursorOp >(), bool mayYield = false )
- : _mps( new MultiPlanScanner( ns, pattern, order, 0, true, BSONObj(), BSONObj(), !op.get(), mayYield ) ), _nscanned() {
+ : _mps( new MultiPlanScanner( ns, pattern, order, 0, true, BSONObj(), BSONObj(), !op.get(), mayYield ) ), _nscanned() {
if ( op.get() ) {
_op = op;
- } else {
+ }
+ else {
_op.reset( new NoOp() );
}
if ( _mps->mayRunMore() ) {
@@ -343,13 +345,14 @@ namespace mongo {
if ( !ok() ) {
advance();
}
- } else {
+ }
+ else {
_c.reset( new BasicCursor( DiskLoc() ) );
}
}
// used to handoff a query to a getMore()
MultiCursor( auto_ptr< MultiPlanScanner > mps, const shared_ptr< Cursor > &c, const shared_ptr< CoveredIndexMatcher > &matcher, const QueryOp &op )
- : _op( new NoOp( op ) ), _c( c ), _mps( mps ), _matcher( matcher ), _nscanned( -1 ) {
+ : _op( new NoOp( op ) ), _c( c ), _mps( mps ), _matcher( matcher ), _nscanned( -1 ) {
_mps->setBestGuessOnly();
_mps->mayYield( false ); // with a NoOp, there's no need to yield in QueryPlanSet
if ( !ok() ) {
@@ -375,7 +378,7 @@ namespace mongo {
}
virtual void checkLocation() {
_c->checkLocation();
- }
+ }
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return _c->supportYields(); }
@@ -383,11 +386,11 @@ namespace mongo {
// indexes, but update appears to already handle this with seenObjects
// so we don't have to do anything special here.
virtual bool getsetdup(DiskLoc loc) {
- return _c->getsetdup( loc );
+ return _c->getsetdup( loc );
}
-
+
virtual bool modifiedKeys() const { return _mps->modifiedKeys(); }
-
+
virtual bool isMultiKey() const { return _mps->hasMultiKey(); }
virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
@@ -424,11 +427,11 @@ namespace mongo {
shared_ptr< CoveredIndexMatcher > _matcher;
long long _nscanned;
};
-
+
// NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
- inline bool isSimpleIdQuery( const BSONObj& query ){
+ inline bool isSimpleIdQuery( const BSONObj& query ) {
BSONObjIterator i(query);
if( !i.more() ) return false;
BSONElement e = i.next();
@@ -436,12 +439,13 @@ namespace mongo {
if( strcmp("_id", e.fieldName()) != 0 ) return false;
return e.isSimpleType(); // e.g. not something like { _id : { $gt : ...
}
-
+
// matcher() will always work on the returned cursor
inline shared_ptr< Cursor > bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort ) {
if( !query.getField( "$or" ).eoo() ) {
return shared_ptr< Cursor >( new MultiCursor( ns, query, sort ) );
- } else {
+ }
+ else {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns, query ) );
auto_ptr< FieldRangeSet > origFrs( new FieldRangeSet( *frs ) );
shared_ptr< Cursor > ret = QueryPlanSet( ns, frs, origFrs, query, sort ).getBestGuess()->newCursor();
@@ -452,5 +456,5 @@ namespace mongo {
return ret;
}
}
-
+
} // namespace mongo
diff --git a/db/queryutil.cpp b/db/queryutil.cpp
index 4ffbc5e6e42..1cd750b0e60 100644
--- a/db/queryutil.cpp
+++ b/db/queryutil.cpp
@@ -27,108 +27,115 @@
namespace mongo {
extern BSONObj staticNull;
-
+
/** returns a string that when used as a matcher, would match a super set of regex()
returns "" for complex regular expressions
used to optimize queries in some simple regex cases that start with '^'
if purePrefix != NULL, sets it to whether the regex can be converted to a range query
*/
- string simpleRegex(const char* regex, const char* flags, bool* purePrefix){
+ string simpleRegex(const char* regex, const char* flags, bool* purePrefix) {
string r = "";
if (purePrefix) *purePrefix = false;
bool multilineOK;
- if ( regex[0] == '\\' && regex[1] == 'A'){
+ if ( regex[0] == '\\' && regex[1] == 'A') {
multilineOK = true;
regex += 2;
- } else if (regex[0] == '^') {
+ }
+ else if (regex[0] == '^') {
multilineOK = false;
regex += 1;
- } else {
+ }
+ else {
return r;
}
bool extended = false;
- while (*flags){
- switch (*(flags++)){
- case 'm': // multiline
- if (multilineOK)
- continue;
- else
- return r;
- case 'x': // extended
- extended = true;
- break;
- default:
- return r; // cant use index
+ while (*flags) {
+ switch (*(flags++)) {
+ case 'm': // multiline
+ if (multilineOK)
+ continue;
+ else
+ return r;
+ case 'x': // extended
+ extended = true;
+ break;
+ default:
+ return r; // cant use index
}
}
stringstream ss;
- while(*regex){
+ while(*regex) {
char c = *(regex++);
- if ( c == '*' || c == '?' ){
+ if ( c == '*' || c == '?' ) {
// These are the only two symbols that make the last char optional
r = ss.str();
r = r.substr( 0 , r.size() - 1 );
return r; //breaking here fails with /^a?/
- } else if (c == '\\'){
+ }
+ else if (c == '\\') {
// slash followed by non-alphanumeric represents the following char
c = *(regex++);
if ((c >= 'A' && c <= 'Z') ||
- (c >= 'a' && c <= 'z') ||
- (c >= '0' && c <= '0') ||
- (c == '\0'))
- {
+ (c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '0') ||
+ (c == '\0')) {
r = ss.str();
break;
- } else {
+ }
+ else {
ss << c;
}
- } else if (strchr("^$.[|()+{", c)){
+ }
+ else if (strchr("^$.[|()+{", c)) {
// list of "metacharacters" from man pcrepattern
r = ss.str();
break;
- } else if (extended && c == '#'){
+ }
+ else if (extended && c == '#') {
// comment
r = ss.str();
break;
- } else if (extended && isspace(c)){
+ }
+ else if (extended && isspace(c)) {
continue;
- } else {
+ }
+ else {
// self-matching char
ss << c;
}
}
- if ( r.empty() && *regex == 0 ){
+ if ( r.empty() && *regex == 0 ) {
r = ss.str();
if (purePrefix) *purePrefix = !r.empty();
}
return r;
}
- inline string simpleRegex(const BSONElement& e){
- switch(e.type()){
- case RegEx:
- return simpleRegex(e.regex(), e.regexFlags());
- case Object:{
- BSONObj o = e.embeddedObject();
- return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
- }
- default: assert(false); return ""; //return squashes compiler warning
+ inline string simpleRegex(const BSONElement& e) {
+ switch(e.type()) {
+ case RegEx:
+ return simpleRegex(e.regex(), e.regexFlags());
+ case Object: {
+ BSONObj o = e.embeddedObject();
+ return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
+ }
+ default: assert(false); return ""; //return squashes compiler warning
}
}
string simpleRegexEnd( string regex ) {
++regex[ regex.length() - 1 ];
return regex;
- }
-
-
+ }
+
+
FieldRange::FieldRange( const BSONElement &e, bool isNot, bool optimize ) {
// NOTE with $not, we could potentially form a complementary set of intervals.
if ( !isNot && !e.eoo() && e.type() != RegEx && e.getGtLtOp() == BSONObj::opIN ) {
@@ -140,7 +147,8 @@ namespace mongo {
BSONElement ie = i.next();
if ( ie.type() == RegEx ) {
regexes.push_back( FieldRange( ie, false, optimize ) );
- } else {
+ }
+ else {
vals.insert( ie );
}
}
@@ -150,22 +158,22 @@ namespace mongo {
for( vector< FieldRange >::const_iterator i = regexes.begin(); i != regexes.end(); ++i )
*this |= *i;
-
+
return;
}
-
- if ( e.type() == Array && e.getGtLtOp() == BSONObj::Equality ){
-
+
+ if ( e.type() == Array && e.getGtLtOp() == BSONObj::Equality ) {
+
_intervals.push_back( FieldInterval(e) );
-
+
const BSONElement& temp = e.embeddedObject().firstElement();
- if ( ! temp.eoo() ){
+ if ( ! temp.eoo() ) {
if ( temp < e )
_intervals.insert( _intervals.begin() , temp );
else
_intervals.push_back( FieldInterval(temp) );
}
-
+
return;
}
@@ -184,9 +192,8 @@ namespace mongo {
return;
int op = e.getGtLtOp();
if ( e.type() == RegEx
- || (e.type() == Object && !e.embeddedObject()["$regex"].eoo())
- )
- {
+ || (e.type() == Object && !e.embeddedObject()["$regex"].eoo())
+ ) {
uassert( 13454, "invalid regular expression operator", op == BSONObj::Equality || op == BSONObj::opREGEX );
if ( !isNot ) { // no optimization for negated regex - we could consider creating 2 intervals comprising all nonmatching prefixes
const string r = simpleRegex(e);
@@ -194,7 +201,8 @@ namespace mongo {
lower = addObj( BSON( "" << r ) ).firstElement();
upper = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
upperInclusive = false;
- } else {
+ }
+ else {
BSONObjBuilder b1(32), b2(32);
b1.appendMinForType( "" , String );
lower = addObj( b1.obj() ).firstElement();
@@ -205,10 +213,11 @@ namespace mongo {
}
// regex matches self - regex type > string type
- if (e.type() == RegEx){
+ if (e.type() == RegEx) {
BSONElement re = addObj( BSON( "" << e ) ).firstElement();
_intervals.push_back( FieldInterval(re) );
- } else {
+ }
+ else {
BSONObj orig = e.embeddedObject();
BSONObjBuilder b;
b.appendRegex("", orig["$regex"].valuestrsafe(), orig["$options"].valuestrsafe());
@@ -221,32 +230,32 @@ namespace mongo {
}
if ( isNot ) {
switch( op ) {
- case BSONObj::Equality:
- return;
+ case BSONObj::Equality:
+ return;
// op = BSONObj::NE;
// break;
- case BSONObj::opALL:
- case BSONObj::opMOD: // NOTE for mod and type, we could consider having 1-2 intervals comprising the complementary types (multiple intervals already possible with $in)
- case BSONObj::opTYPE:
- // no bound calculation
- return;
- case BSONObj::NE:
- op = BSONObj::Equality;
- break;
- case BSONObj::LT:
- op = BSONObj::GTE;
- break;
- case BSONObj::LTE:
- op = BSONObj::GT;
- break;
- case BSONObj::GT:
- op = BSONObj::LTE;
- break;
- case BSONObj::GTE:
- op = BSONObj::LT;
- break;
- default: // otherwise doesn't matter
- break;
+ case BSONObj::opALL:
+ case BSONObj::opMOD: // NOTE for mod and type, we could consider having 1-2 intervals comprising the complementary types (multiple intervals already possible with $in)
+ case BSONObj::opTYPE:
+ // no bound calculation
+ return;
+ case BSONObj::NE:
+ op = BSONObj::Equality;
+ break;
+ case BSONObj::LT:
+ op = BSONObj::GTE;
+ break;
+ case BSONObj::LTE:
+ op = BSONObj::GT;
+ break;
+ case BSONObj::GT:
+ op = BSONObj::LTE;
+ break;
+ case BSONObj::GTE:
+ op = BSONObj::LT;
+ break;
+ default: // otherwise doesn't matter
+ break;
}
}
switch( op ) {
@@ -280,9 +289,9 @@ namespace mongo {
massert( 10370 , "$all requires array", e.type() == Array );
BSONObjIterator i( e.embeddedObject() );
bool bound = false;
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement x = i.next();
- if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ){
+ if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
// taken care of elsewhere
}
else if ( x.type() != RegEx ) {
@@ -317,7 +326,7 @@ namespace mongo {
BSONObjBuilder b;
b.appendMaxForType( "" , NumberDouble );
upper = addObj( b.obj() ).firstElement();
- }
+ }
break;
}
case BSONObj::opTYPE: {
@@ -332,7 +341,7 @@ namespace mongo {
b.appendMaxForType( "" , t );
upper = addObj( b.obj() ).firstElement();
}
-
+
break;
}
case BSONObj::opREGEX:
@@ -350,14 +359,14 @@ namespace mongo {
default:
break;
}
-
- if ( optimize ){
- if ( lower.type() != MinKey && upper.type() == MaxKey && lower.isSimpleType() ){ // TODO: get rid of isSimpleType
+
+ if ( optimize ) {
+ if ( lower.type() != MinKey && upper.type() == MaxKey && lower.isSimpleType() ) { // TODO: get rid of isSimpleType
BSONObjBuilder b;
b.appendMaxForType( lower.fieldName() , lower.type() );
upper = addObj( b.obj() ).firstElement();
}
- else if ( lower.type() == MinKey && upper.type() != MaxKey && upper.isSimpleType() ){ // TODO: get rid of isSimpleType
+ else if ( lower.type() == MinKey && upper.type() != MaxKey && upper.isSimpleType() ) { // TODO: get rid of isSimpleType
BSONObjBuilder b;
b.appendMinForType( upper.fieldName() , upper.type() );
lower = addObj( b.obj() ).firstElement();
@@ -373,7 +382,7 @@ namespace mongo {
if ( _special.size() == 0 && other._special.size() )
_special = other._special;
}
-
+
// as called, these functions find the max/min of a bound in the
// opposite direction, so inclusive bounds are considered less
// superlative
@@ -396,7 +405,7 @@ namespace mongo {
result._upper = minFieldBound( one._upper, two._upper );
return result.strictValid();
}
-
+
const FieldRange &FieldRange::operator&=( const FieldRange &other ) {
vector< FieldInterval > newIntervals;
vector< FieldInterval >::const_iterator i = _intervals.begin();
@@ -408,31 +417,34 @@ namespace mongo {
}
if ( i->_upper == minFieldBound( i->_upper, j->_upper ) ) {
++i;
- } else {
+ }
+ else {
++j;
}
}
finishOperation( newIntervals, other );
return *this;
}
-
+
void handleInterval( const FieldInterval &lower, FieldBound &low, FieldBound &high, vector< FieldInterval > &newIntervals ) {
if ( low._bound.eoo() ) {
low = lower._lower; high = lower._upper;
- } else {
+ }
+ else {
int cmp = high._bound.woCompare( lower._lower._bound, false );
if ( ( cmp < 0 ) || ( cmp == 0 && !high._inclusive && !lower._lower._inclusive ) ) {
FieldInterval tmp;
tmp._lower = low;
tmp._upper = high;
newIntervals.push_back( tmp );
- low = lower._lower; high = lower._upper;
- } else {
+ low = lower._lower; high = lower._upper;
+ }
+ else {
high = lower._upper;
}
- }
+ }
}
-
+
const FieldRange &FieldRange::operator|=( const FieldRange &other ) {
vector< FieldInterval > newIntervals;
FieldBound low;
@@ -444,27 +456,28 @@ namespace mongo {
if ( ( cmp == 0 && i->_lower._inclusive ) || cmp < 0 ) {
handleInterval( *i, low, high, newIntervals );
++i;
- } else {
+ }
+ else {
handleInterval( *j, low, high, newIntervals );
++j;
- }
+ }
}
while( i != _intervals.end() ) {
handleInterval( *i, low, high, newIntervals );
- ++i;
+ ++i;
}
while( j != other._intervals.end() ) {
handleInterval( *j, low, high, newIntervals );
- ++j;
+ ++j;
}
FieldInterval tmp;
tmp._lower = low;
tmp._upper = high;
- newIntervals.push_back( tmp );
+ newIntervals.push_back( tmp );
finishOperation( newIntervals, other );
- return *this;
+ return *this;
}
-
+
const FieldRange &FieldRange::operator-=( const FieldRange &other ) {
vector< FieldInterval > newIntervals;
vector< FieldInterval >::iterator i = _intervals.begin();
@@ -472,47 +485,53 @@ namespace mongo {
while( i != _intervals.end() && j != other._intervals.end() ) {
int cmp = i->_lower._bound.woCompare( j->_lower._bound, false );
if ( cmp < 0 ||
- ( cmp == 0 && i->_lower._inclusive && !j->_lower._inclusive ) ) {
+ ( cmp == 0 && i->_lower._inclusive && !j->_lower._inclusive ) ) {
int cmp2 = i->_upper._bound.woCompare( j->_lower._bound, false );
if ( cmp2 < 0 ) {
newIntervals.push_back( *i );
++i;
- } else if ( cmp2 == 0 ) {
+ }
+ else if ( cmp2 == 0 ) {
newIntervals.push_back( *i );
if ( newIntervals.back()._upper._inclusive && j->_lower._inclusive ) {
newIntervals.back()._upper._inclusive = false;
}
++i;
- } else {
+ }
+ else {
newIntervals.push_back( *i );
newIntervals.back()._upper = j->_lower;
newIntervals.back()._upper.flipInclusive();
int cmp3 = i->_upper._bound.woCompare( j->_upper._bound, false );
if ( cmp3 < 0 ||
- ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
+ ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
++i;
- } else {
+ }
+ else {
i->_lower = j->_upper;
i->_lower.flipInclusive();
++j;
}
}
- } else {
+ }
+ else {
int cmp2 = i->_lower._bound.woCompare( j->_upper._bound, false );
if ( cmp2 > 0 ||
- ( cmp2 == 0 && ( !i->_lower._inclusive || !j->_upper._inclusive ) ) ) {
+ ( cmp2 == 0 && ( !i->_lower._inclusive || !j->_upper._inclusive ) ) ) {
++j;
- } else {
+ }
+ else {
int cmp3 = i->_upper._bound.woCompare( j->_upper._bound, false );
if ( cmp3 < 0 ||
- ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
+ ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
++i;
- } else {
+ }
+ else {
i->_lower = j->_upper;
- i->_lower.flipInclusive();
+ i->_lower.flipInclusive();
++j;
}
- }
+ }
}
}
while( i != _intervals.end() ) {
@@ -520,24 +539,24 @@ namespace mongo {
++i;
}
finishOperation( newIntervals, other );
- return *this;
+ return *this;
}
-
+
// TODO write a proper implementation that doesn't do a full copy
bool FieldRange::operator<=( const FieldRange &other ) {
FieldRange temp = *this;
temp -= other;
return temp.empty();
}
-
+
BSONObj FieldRange::addObj( const BSONObj &o ) {
_objData.push_back( o );
return o;
}
-
+
string FieldRangeSet::getSpecial() const {
string s = "";
- for ( map<string,FieldRange>::iterator i=_ranges.begin(); i!=_ranges.end(); i++ ){
+ for ( map<string,FieldRange>::iterator i=_ranges.begin(); i!=_ranges.end(); i++ ) {
if ( i->second.getSpecial().size() == 0 )
continue;
uassert( 13033 , "can't have 2 special fields" , s.size() == 0 );
@@ -563,34 +582,35 @@ namespace mongo {
}
if ( op2 == BSONObj::opELEM_MATCH ) {
BSONObjIterator k( g.embeddedObjectUserCheck() );
- while ( k.more() ){
+ while ( k.more() ) {
BSONElement h = k.next();
StringBuilder buf(32);
buf << fieldName << "." << h.fieldName();
string fullname = buf.str();
-
+
int op3 = getGtLtOp( h );
- if ( op3 == BSONObj::Equality ){
+ if ( op3 == BSONObj::Equality ) {
_ranges[ fullname ] &= FieldRange( h , isNot , optimize );
}
else {
BSONObjIterator l( h.embeddedObject() );
- while ( l.more() ){
+ while ( l.more() ) {
_ranges[ fullname ] &= FieldRange( l.next() , isNot , optimize );
}
}
- }
- } else {
+ }
+ }
+ else {
_ranges[ fieldName ] &= FieldRange( f , isNot , optimize );
- }
+ }
}
-
+
void FieldRangeSet::processQueryField( const BSONElement &e, bool optimize ) {
bool equality = ( getGtLtOp( e ) == BSONObj::Equality );
if ( equality && e.type() == Object ) {
equality = ( strcmp( e.embeddedObject().firstElement().fieldName(), "$not" ) != 0 );
}
-
+
if ( equality || ( e.type() == Object && !e.embeddedObject()[ "$regex" ].eoo() ) ) {
_ranges[ e.fieldName() ] &= FieldRange( e , false , optimize );
}
@@ -600,65 +620,66 @@ namespace mongo {
BSONElement f = j.next();
if ( strcmp( f.fieldName(), "$not" ) == 0 ) {
switch( f.type() ) {
- case Object: {
- BSONObjIterator k( f.embeddedObject() );
- while( k.more() ) {
- BSONElement g = k.next();
- uassert( 13034, "invalid use of $not", g.getGtLtOp() != BSONObj::Equality );
- processOpElement( e.fieldName(), g, true, optimize );
- }
- break;
+ case Object: {
+ BSONObjIterator k( f.embeddedObject() );
+ while( k.more() ) {
+ BSONElement g = k.next();
+ uassert( 13034, "invalid use of $not", g.getGtLtOp() != BSONObj::Equality );
+ processOpElement( e.fieldName(), g, true, optimize );
}
- case RegEx:
- processOpElement( e.fieldName(), f, true, optimize );
- break;
- default:
- uassert( 13041, "invalid use of $not", false );
+ break;
}
- } else {
+ case RegEx:
+ processOpElement( e.fieldName(), f, true, optimize );
+ break;
+ default:
+ uassert( 13041, "invalid use of $not", false );
+ }
+ }
+ else {
processOpElement( e.fieldName(), f, false, optimize );
}
- }
- }
+ }
+ }
}
-
+
FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query , bool optimize )
: _ns( ns ), _queries( 1, query.getOwned() ) {
- BSONObjIterator i( _queries[ 0 ] );
-
- while( i.more() ) {
- BSONElement e = i.next();
- // e could be x:1 or x:{$gt:1}
-
- if ( strcmp( e.fieldName(), "$where" ) == 0 ) {
- continue;
- }
-
- if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
- continue;
- }
-
- if ( strcmp( e.fieldName(), "$nor" ) == 0 ) {
- continue;
- }
-
- processQueryField( e, optimize );
- }
+ BSONObjIterator i( _queries[ 0 ] );
+
+ while( i.more() ) {
+ BSONElement e = i.next();
+ // e could be x:1 or x:{$gt:1}
+
+ if ( strcmp( e.fieldName(), "$where" ) == 0 ) {
+ continue;
+ }
+
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ continue;
+ }
+
+ if ( strcmp( e.fieldName(), "$nor" ) == 0 ) {
+ continue;
+ }
+
+ processQueryField( e, optimize );
}
+ }
FieldRangeOrSet::FieldRangeOrSet( const char *ns, const BSONObj &query , bool optimize )
: _baseSet( ns, query, optimize ), _orFound() {
BSONObjIterator i( _baseSet._queries[ 0 ] );
-
+
while( i.more() ) {
BSONElement e = i.next();
- if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
- massert( 13262, "$or requires nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
- BSONObjIterator j( e.embeddedObject() );
- while( j.more() ) {
- BSONElement f = j.next();
- massert( 13263, "$or array must contain objects", f.type() == Object );
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ massert( 13262, "$or requires nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ massert( 13263, "$or array must contain objects", f.type() == Object );
_orSets.push_back( FieldRangeSet( ns, f.embeddedObject(), optimize ) );
massert( 13291, "$or may not contain 'special' query", _orSets.back().getSpecial().empty() );
_originalOrSets.push_back( _orSets.back() );
@@ -668,7 +689,7 @@ namespace mongo {
}
}
}
-
+
void FieldRangeOrSet::popOrClause( const BSONObj &indexSpec ) {
massert( 13274, "no or clause to pop", !orFinished() );
auto_ptr< FieldRangeSet > holder;
@@ -686,14 +707,15 @@ namespace mongo {
if( !i->matchPossible() ) {
i = _orSets.erase( i );
j = _originalOrSets.erase( j );
- } else {
+ }
+ else {
++i;
++j;
}
}
_oldOrSets.push_front( _orSets.front() );
_orSets.pop_front();
- _originalOrSets.pop_front();
+ _originalOrSets.pop_front();
}
FieldRange *FieldRangeSet::trivialRange_ = 0;
@@ -702,7 +724,7 @@ namespace mongo {
trivialRange_ = new FieldRange();
return *trivialRange_;
}
-
+
BSONObj FieldRangeSet::simplifiedQuery( const BSONObj &_fields ) const {
BSONObj fields = _fields;
if ( fields.isEmpty() ) {
@@ -734,14 +756,15 @@ namespace mongo {
}
return b.obj();
}
-
+
QueryPattern FieldRangeSet::pattern( const BSONObj &sort ) const {
QueryPattern qp;
for( map< string, FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
assert( !i->second.empty() );
if ( i->second.equality() ) {
qp._fieldTypes[ i->first ] = QueryPattern::Equality;
- } else if ( i->second.nontrivial() ) {
+ }
+ else if ( i->second.nontrivial() ) {
bool upper = i->second.max().type() != MaxKey;
bool lower = i->second.min().type() != MinKey;
if ( upper && lower )
@@ -749,18 +772,18 @@ namespace mongo {
else if ( upper )
qp._fieldTypes[ i->first ] = QueryPattern::UpperBound;
else if ( lower )
- qp._fieldTypes[ i->first ] = QueryPattern::LowerBound;
+ qp._fieldTypes[ i->first ] = QueryPattern::LowerBound;
}
}
qp.setSort( sort );
return qp;
}
-
+
// TODO get rid of this
BoundList FieldRangeSet::indexBounds( const BSONObj &keyPattern, int direction ) const {
typedef vector< pair< shared_ptr< BSONObjBuilder >, shared_ptr< BSONObjBuilder > > > BoundBuilders;
BoundBuilders builders;
- builders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
+ builders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
BSONObjIterator i( keyPattern );
bool ineq = false; // until ineq is true, we are just dealing with equality and $in bounds
while( i.more() ) {
@@ -774,7 +797,8 @@ namespace mongo {
j->first->appendAs( fr.min(), "" );
j->second->appendAs( fr.min(), "" );
}
- } else {
+ }
+ else {
if ( !fr.inQuery() ) {
ineq = true;
}
@@ -794,7 +818,8 @@ namespace mongo {
newBuilders.back().first->appendAs( j->_lower._bound, "" );
newBuilders.back().second->appendAs( j->_upper._bound, "" );
}
- } else {
+ }
+ else {
for( vector< FieldInterval >::const_reverse_iterator j = intervals.rbegin(); j != intervals.rend(); ++j ) {
uassert( 13304, "combinatorial limit of $in partitioning of result set exceeded", newBuilders.size() < maxCombinations );
newBuilders.push_back( make_pair( shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ), shared_ptr< BSONObjBuilder >( new BSONObjBuilder() ) ) );
@@ -807,7 +832,8 @@ namespace mongo {
}
builders = newBuilders;
}
- } else {
+ }
+ else {
for( BoundBuilders::const_iterator j = builders.begin(); j != builders.end(); ++j ) {
j->first->appendAs( forward ? fr.min() : fr.max(), "" );
j->second->appendAs( forward ? fr.max() : fr.min(), "" );
@@ -818,8 +844,8 @@ namespace mongo {
for( BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i )
ret.push_back( make_pair( i->first->obj(), i->second->obj() ) );
return ret;
- }
-
+ }
+
FieldRangeSet *FieldRangeSet::subset( const BSONObj &fields ) const {
FieldRangeSet *ret = new FieldRangeSet( _ns, BSONObj() );
BSONObjIterator i( fields );
@@ -832,13 +858,13 @@ namespace mongo {
ret->_queries = _queries;
return ret;
}
-
+
bool FieldRangeVector::matchesElement( const BSONElement &e, int i, bool forward ) const {
bool eq;
int l = matchingLowElement( e, i, forward, eq );
- return ( l % 2 == 0 ); // if we're inside an interval
+ return ( l % 2 == 0 ); // if we're inside an interval
}
-
+
// binary search for interval containing the specified element
// an even return value indicates that the element is contained within a valid interval
int FieldRangeVector::matchingLowElement( const BSONElement &e, int i, bool forward, bool &lowEquality ) const {
@@ -853,7 +879,8 @@ namespace mongo {
if ( m % 2 == 0 ) {
toCmp = interval._lower._bound;
toCmpInclusive = interval._lower._inclusive;
- } else {
+ }
+ else {
toCmp = interval._upper._bound;
toCmpInclusive = interval._upper._inclusive;
}
@@ -863,9 +890,11 @@ namespace mongo {
}
if ( cmp < 0 ) {
l = m;
- } else if ( cmp > 0 ) {
+ }
+ else if ( cmp > 0 ) {
h = m;
- } else {
+ }
+ else {
if ( m % 2 == 0 ) {
lowEquality = true;
}
@@ -883,7 +912,7 @@ namespace mongo {
assert( l + 1 == h );
return l;
}
-
+
bool FieldRangeVector::matches( const BSONObj &obj ) const {
if ( !_indexSpec.get() ) {
_indexSpec.reset( new IndexSpec( _keyPattern ) );
@@ -914,7 +943,7 @@ namespace mongo {
}
return false;
}
-
+
// TODO optimize more
int FieldRangeVector::Iterator::advance( const BSONObj &curr ) {
BSONObjIterator j( curr );
@@ -942,14 +971,16 @@ namespace mongo {
int diff = (int)_v._ranges[ i ].intervals().size() - _i[ i ];
if ( diff > 1 ) {
latestNonEndpoint = i;
- } else if ( diff == 1 ) {
+ }
+ else if ( diff == 1 ) {
int x = _v._ranges[ i ].intervals()[ _i[ i ] ]._upper._bound.woCompare( jj, false );
if ( x != 0 ) {
latestNonEndpoint = i;
}
}
continue;
- } else { // not in a valid range for this field - determine if and how to advance
+ }
+ else { // not in a valid range for this field - determine if and how to advance
// check if we're after the last interval for this field
if ( l == (int)_v._ranges[ i ].intervals().size() * 2 - 1 ) {
if ( latestNonEndpoint == -1 ) {
@@ -958,13 +989,13 @@ namespace mongo {
setZero( latestNonEndpoint + 1 );
// skip to curr / latestNonEndpoint + 1 / superlative
_after = true;
- return latestNonEndpoint + 1;
+ return latestNonEndpoint + 1;
}
_i[ i ] = ( l + 1 ) / 2;
if ( lowEquality ) {
// skip to curr / i + 1 / superlative
_after = true;
- return i + 1;
+ return i + 1;
}
// skip to curr / i / nextbounds
_cmp[ i ] = &_v._ranges[ i ].intervals()[ _i[ i ] ]._lower._bound;
@@ -974,7 +1005,7 @@ namespace mongo {
_inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
}
_after = false;
- return i;
+ return i;
}
}
bool first = true;
@@ -1011,7 +1042,7 @@ namespace mongo {
setZero( i + 1 );
// skip to curr / i + 1 / superlative
_after = true;
- return i + 1;
+ return i + 1;
}
// if we're less than the lower bound, advance
if ( x > 0 ) {
@@ -1025,7 +1056,8 @@ namespace mongo {
}
_after = false;
return i;
- } else {
+ }
+ else {
break;
}
}
@@ -1038,7 +1070,8 @@ namespace mongo {
if ( diff > 1 || ( !eq && diff == 1 ) ) {
// check if we're not at the end of valid values for this field
latestNonEndpoint = i;
- } else if ( diff == 0 ) { // check if we're past the last interval for this field
+ }
+ else if ( diff == 0 ) { // check if we're past the last interval for this field
if ( latestNonEndpoint == -1 ) {
return -2;
}
@@ -1049,18 +1082,18 @@ namespace mongo {
return latestNonEndpoint + 1;
}
}
- return -1;
+ return -1;
}
-
+
void FieldRangeVector::Iterator::prepDive() {
for( int j = 0; j < (int)_i.size(); ++j ) {
_cmp[ j ] = &_v._ranges[ j ].intervals().front()._lower._bound;
_inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
- }
+ }
}
-
+
struct SimpleRegexUnitTest : UnitTest {
- void run(){
+ void run() {
{
BSONObjBuilder b;
b.appendRegex("r", "^foo");
@@ -1119,34 +1152,34 @@ namespace mongo {
} simple_regex_unittest;
- long long applySkipLimit( long long num , const BSONObj& cmd ){
+ long long applySkipLimit( long long num , const BSONObj& cmd ) {
BSONElement s = cmd["skip"];
BSONElement l = cmd["limit"];
-
- if ( s.isNumber() ){
+
+ if ( s.isNumber() ) {
num = num - s.numberLong();
if ( num < 0 ) {
num = 0;
}
}
-
- if ( l.isNumber() ){
+
+ if ( l.isNumber() ) {
long long limit = l.numberLong();
- if ( limit < num ){
+ if ( limit < num ) {
num = limit;
}
}
- return num;
+ return num;
}
- string debugString( Message& m ){
+ string debugString( Message& m ) {
stringstream ss;
ss << "op: " << opToString( m.operation() ) << " len: " << m.size();
- if ( m.operation() >= 2000 && m.operation() < 2100 ){
+ if ( m.operation() >= 2000 && m.operation() < 2100 ) {
DbMessage d(m);
ss << " ns: " << d.getns();
- switch ( m.operation() ){
+ switch ( m.operation() ) {
case dbUpdate: {
int flags = d.pullInt();
BSONObj q = d.nextJsObj();
@@ -1166,10 +1199,10 @@ namespace mongo {
default:
ss << " CANNOT HANDLE YET";
}
-
-
+
+
}
return ss.str();
- }
+ }
} // namespace mongo
diff --git a/db/queryutil.h b/db/queryutil.h
index cd29e37f021..953a94976b0 100644
--- a/db/queryutil.h
+++ b/db/queryutil.h
@@ -26,7 +26,7 @@ namespace mongo {
bool _inclusive;
bool operator==( const FieldBound &other ) const {
return _bound.woCompare( other._bound ) == 0 &&
- _inclusive == other._inclusive;
+ _inclusive == other._inclusive;
}
void flipInclusive() { _inclusive = !_inclusive; }
};
@@ -77,7 +77,7 @@ namespace mongo {
if ( equality() ) {
return true;
}
- for( vector< FieldInterval >::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ for( vector< FieldInterval >::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
if ( !i->equality() ) {
return false;
}
@@ -86,14 +86,14 @@ namespace mongo {
}
bool nontrivial() const {
return
- ! empty() &&
+ ! empty() &&
( _intervals.size() != 1 ||
minKey.firstElement().woCompare( min(), false ) != 0 ||
maxKey.firstElement().woCompare( max(), false ) != 0 );
}
bool empty() const { return _intervals.empty(); }
void makeEmpty() { _intervals.clear(); }
- const vector< FieldInterval > &intervals() const { return _intervals; }
+ const vector< FieldInterval > &intervals() const { return _intervals; }
string getSpecial() const { return _special; }
void setExclusiveBounds() {
for( vector< FieldInterval >::iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
@@ -121,7 +121,7 @@ namespace mongo {
vector< BSONObj > _objData;
string _special;
};
-
+
// implements query pattern matching, used to determine if a query is
// similar to an earlier query and should use the same plan
class QueryPattern {
@@ -192,8 +192,8 @@ namespace mongo {
// the specified direction of traversal. For example, given a simple index {i:1}
// and direction +1, one valid BoundList is: (1, 2); (4, 6). The same BoundList
// would be valid for index {i:-1} with direction -1.
- typedef vector< pair< BSONObj, BSONObj > > BoundList;
-
+ typedef vector< pair< BSONObj, BSONObj > > BoundList;
+
// ranges of fields' value that may be determined from query -- used to
// determine index limits
class FieldRangeSet {
@@ -209,13 +209,13 @@ namespace mongo {
map< string, FieldRange >::const_iterator f = _ranges.find( fieldName );
if ( f == _ranges.end() )
return trivialRange();
- return f->second;
+ return f->second;
}
FieldRange &range( const char *fieldName ) {
map< string, FieldRange >::iterator f = _ranges.find( fieldName );
if ( f == _ranges.end() )
return trivialRange();
- return f->second;
+ return f->second;
}
int nNontrivialRanges() const {
int count = 0;
@@ -236,7 +236,7 @@ namespace mongo {
}
QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
string getSpecial() const;
- // Btree scanning for a multidimentional key range will yield a
+ // Btree scanning for a multidimentional key range will yield a
// multidimensional box. The idea here is that if an 'other'
// multidimensional box contains the current box we don't have to scan
// the current box. If the 'other' box contains the current box in
@@ -258,22 +258,25 @@ namespace mongo {
if ( cmp == 0 ) {
if ( i->second <= j->second ) {
// nothing
- } else {
+ }
+ else {
++nUnincluded;
unincludedKey = i->first;
}
++i;
++j;
- } else if ( cmp < 0 ) {
+ }
+ else if ( cmp < 0 ) {
++i;
- } else {
+ }
+ else {
// other has a bound we don't, nothing can be done
return *this;
}
}
if ( j != other._ranges.end() ) {
// other has a bound we don't, nothing can be done
- return *this;
+ return *this;
}
if ( nUnincluded > 1 ) {
return *this;
@@ -296,23 +299,25 @@ namespace mongo {
i->second &= j->second;
++i;
++j;
- } else if ( cmp < 0 ) {
+ }
+ else if ( cmp < 0 ) {
++i;
- } else {
+ }
+ else {
_ranges[ j->first ] = j->second;
++j;
}
}
while( j != other._ranges.end() ) {
_ranges[ j->first ] = j->second;
- ++j;
+ ++j;
}
appendQueries( other );
return *this;
}
// TODO get rid of this
BoundList indexBounds( const BSONObj &keyPattern, int direction ) const;
-
+
/**
* @return - A new FieldRangeSet based on this FieldRangeSet, but with only
* a subset of the fields.
@@ -323,8 +328,8 @@ namespace mongo {
private:
void appendQueries( const FieldRangeSet &other ) {
for( vector< BSONObj >::const_iterator i = other._queries.begin(); i != other._queries.end(); ++i ) {
- _queries.push_back( *i );
- }
+ _queries.push_back( *i );
+ }
}
void makeEmpty() {
for( map< string, FieldRange >::iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
@@ -342,7 +347,7 @@ namespace mongo {
};
class IndexSpec;
-
+
/**
* This class manages the ranges of valid element values for each field in
* an ordered list of signed fields corresponding to an index specification.
@@ -355,8 +360,7 @@ namespace mongo {
* @direction The direction of index traversal
*/
FieldRangeVector( const FieldRangeSet &frs, const BSONObj &keyPattern, int direction )
- :_keyPattern( keyPattern ), _direction( direction >= 0 ? 1 : -1 )
- {
+ :_keyPattern( keyPattern ), _direction( direction >= 0 ? 1 : -1 ) {
_queries = frs._queries;
BSONObjIterator i( _keyPattern );
while( i.more() ) {
@@ -365,7 +369,8 @@ namespace mongo {
bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
if ( forward ) {
_ranges.push_back( frs.range( e.fieldName() ) );
- } else {
+ }
+ else {
_ranges.push_back( FieldRange() );
frs.range( e.fieldName() ).reverse( _ranges.back() );
}
@@ -379,14 +384,14 @@ namespace mongo {
ret *= i->intervals().size();
}
return ret;
- }
+ }
BSONObj startKey() const {
BSONObjBuilder b;
for( vector< FieldRange >::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
const FieldInterval &fi = i->intervals().front();
b.appendAs( fi._lower._bound, "" );
}
- return b.obj();
+ return b.obj();
}
BSONObj endKey() const {
BSONObjBuilder b;
@@ -394,7 +399,7 @@ namespace mongo {
const FieldInterval &fi = i->intervals().back();
b.appendAs( fi._upper._bound, "" );
}
- return b.obj();
+ return b.obj();
}
BSONObj obj() const {
BSONObjBuilder b;
@@ -402,7 +407,7 @@ namespace mongo {
for( int i = 0; i < (int)_ranges.size(); ++i ) {
BSONArrayBuilder a( b.subarrayStart( k.next().fieldName() ) );
for( vector< FieldInterval >::const_iterator j = _ranges[ i ].intervals().begin();
- j != _ranges[ i ].intervals().end(); ++j ) {
+ j != _ranges[ i ].intervals().end(); ++j ) {
a << BSONArray( BSON_ARRAY( j->_lower._bound << j->_upper._bound ).clientReadable() );
}
a.done();
@@ -440,7 +445,8 @@ namespace mongo {
for( unsigned j = i + 1; j < _i.size(); ++j ) {
_i[ j ] = 0;
}
- } else {
+ }
+ else {
_i[ 0 ] = _v._ranges[ 0 ].intervals().size();
}
return ok();
@@ -482,7 +488,7 @@ namespace mongo {
const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
b.appendAs( fi._upper._bound, "" );
}
- return b.obj();
+ return b.obj();
}
// check
private:
@@ -502,7 +508,7 @@ namespace mongo {
// This IndexSpec is lazily constructed directly from _keyPattern if needed.
mutable shared_ptr< IndexSpec > _indexSpec;
};
-
+
// generages FieldRangeSet objects, accounting for or clauses
class FieldRangeOrSet {
public:
@@ -522,7 +528,7 @@ namespace mongo {
void popOrClause( const BSONObj &indexSpec = BSONObj() );
FieldRangeSet *topFrs() const {
FieldRangeSet *ret = new FieldRangeSet( _baseSet );
- if (_orSets.size()){
+ if (_orSets.size()) {
*ret &= _orSets.front();
}
return ret;
@@ -532,10 +538,10 @@ namespace mongo {
// used instead of more precise bounds, they should
FieldRangeSet *topFrsOriginal() const {
FieldRangeSet *ret = new FieldRangeSet( _baseSet );
- if (_originalOrSets.size()){
+ if (_originalOrSets.size()) {
*ret &= _originalOrSets.front();
}
- return ret;
+ return ret;
}
void allClausesSimplified( vector< BSONObj > &ret ) const {
for( list< FieldRangeSet >::const_iterator i = _orSets.begin(); i != _orSets.end(); ++i ) {
@@ -554,7 +560,7 @@ namespace mongo {
list< FieldRangeSet > _oldOrSets; // make sure memory is owned
bool _orFound;
};
-
+
/** returns a string that when used as a matcher, would match a super set of regex()
returns "" for complex regular expressions
used to optimize queries in some simple regex cases that start with '^'
diff --git a/db/repl.cpp b/db/repl.cpp
index 14334a86be8..8a3ff31d565 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -49,13 +49,13 @@
#include "repl/rs.h"
namespace mongo {
-
+
// our config from command line etc.
ReplSettings replSettings;
/* if 1 sync() is running */
volatile int syncing = 0;
- static volatile int relinquishSyncingSome = 0;
+ static volatile int relinquishSyncingSome = 0;
/* if true replace our peer in a replication pair -- don't worry about if his
local.oplog.$main is empty.
@@ -68,9 +68,9 @@ namespace mongo {
const char *replAllDead = 0;
time_t lastForcedResync = 0;
-
+
IdTracker &idTracker = *( new IdTracker() );
-
+
} // namespace mongo
#include "replpair.h"
@@ -159,8 +159,8 @@ namespace mongo {
break;
{
dbtemprelease t;
- relinquishSyncingSome = 1;
- sleepmillis(1);
+ relinquishSyncingSome = 1;
+ sleepmillis(1);
}
}
if ( syncing ) {
@@ -206,7 +206,7 @@ namespace mongo {
return true;
}
} cmdForceDead;
-
+
/* operator requested resynchronization of replication (on the slave). { resync : 1 } */
class CmdResync : public Command {
public:
@@ -221,7 +221,7 @@ namespace mongo {
void help(stringstream&h) const { h << "resync (from scratch) an out of date replica slave.\nhttp://www.mongodb.org/display/DOCS/Master+Slave"; }
CmdResync() : Command("resync") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( cmdLine.usingReplSets() ) {
+ if( cmdLine.usingReplSets() ) {
errmsg = "resync command not currently supported with replica sets. See RS102 info in the mongodb documentations";
result.append("info", "http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member");
return false;
@@ -231,18 +231,18 @@ namespace mongo {
if ( !waitForSyncToFinish( errmsg ) )
return false;
replAllDead = "resync forced";
- }
+ }
if ( !replAllDead ) {
errmsg = "not dead, no need to resync";
return false;
}
if ( !waitForSyncToFinish( errmsg ) )
return false;
-
+
ReplSource::forceResyncDead( "client" );
result.append( "info", "triggered resync for all sources" );
- return true;
- }
+ return true;
+ }
bool waitForSyncToFinish( string &errmsg ) const {
// Wait for slave thread to finish syncing, so sources will be be
// reloaded with new saved state on next pass.
@@ -252,7 +252,7 @@ namespace mongo {
break;
{
dbtemprelease t;
- relinquishSyncingSome = 1;
+ relinquishSyncingSome = 1;
sleepmillis(1);
}
}
@@ -263,31 +263,31 @@ namespace mongo {
return true;
}
} cmdResync;
-
- bool anyReplEnabled(){
+
+ bool anyReplEnabled() {
return replPair || replSettings.slave || replSettings.master;
}
bool replAuthenticate(DBClientBase *conn);
-
- void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level ){
+
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level ) {
if ( replSet ) {
- if( theReplSet == 0 ) {
+ if( theReplSet == 0 ) {
result.append("ismaster", false);
result.append("secondary", false);
result.append("info", ReplSet::startupStatusMsg);
result.append( "isreplicaset" , true );
return;
}
-
+
theReplSet->fillIsMaster(result);
return;
}
-
+
if ( replAllDead ) {
result.append("ismaster", 0);
- if( authed ) {
+ if( authed ) {
if ( replPair )
result.append("remote", replPair->remote);
}
@@ -306,25 +306,25 @@ namespace mongo {
result.appendBool("ismaster", _isMaster() );
}
- if ( level && replSet ){
+ if ( level && replSet ) {
result.append( "info" , "is replica set" );
}
- else if ( level ){
+ else if ( level ) {
BSONObjBuilder sources( result.subarrayStart( "sources" ) );
-
+
readlock lk( "local.sources" );
Client::Context ctx( "local.sources" );
shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
int n = 0;
- while ( c->ok() ){
+ while ( c->ok() ) {
BSONObj s = c->current();
-
+
BSONObjBuilder bb;
bb.append( s["host"] );
string sourcename = s["source"].valuestr();
if ( sourcename != "main" )
bb.append( s["source"] );
-
+
{
BSONElement e = s["syncedTo"];
BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
@@ -332,8 +332,8 @@ namespace mongo {
t.append( "inc" , e.timestampInc() );
t.done();
}
-
- if ( level > 1 ){
+
+ if ( level > 1 ) {
dbtemprelease unlock;
// note: there is no so-style timeout on this connection; perhaps we should have one.
ScopedDbConnection conn( s["host"].valuestr() );
@@ -352,7 +352,7 @@ namespace mongo {
sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
c->advance();
}
-
+
sources.done();
}
}
@@ -370,11 +370,11 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
CmdIsMaster() : Command("isMaster", true, "ismaster") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
- authenticated.
- we allow unauthenticated ismaster but we aren't as verbose informationally if
- one is not authenticated for admin db to be safe.
- */
+ /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
+ authenticated.
+ we allow unauthenticated ismaster but we aren't as verbose informationally if
+ one is not authenticated for admin db to be safe.
+ */
bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
appendReplicationInfo( result , authed );
@@ -396,7 +396,7 @@ namespace mongo {
return true;
}
} cmdisinitialsynccomplete;
-
+
/* negotiate who is master
-1=not set (probably means we just booted)
@@ -496,7 +496,7 @@ namespace mongo {
return true;
}
} cmdnegotiatemaster;
-
+
int ReplPair::negotiate(DBClientConnection *conn, string method) {
BSONObjBuilder b;
b.append("negotiatemaster",1);
@@ -505,7 +505,7 @@ namespace mongo {
b.append("your_port", remotePort);
BSONObj cmd = b.done();
BSONObj res = conn->findOne("admin.$cmd", cmd);
- if ( ! res["ok"].trueValue() ){
+ if ( ! res["ok"].trueValue() ) {
string message = method + " negotiate failed";
problem() << message << ": " << res.toString() << '\n';
setMasterLocked(State_Confused, message.c_str());
@@ -517,7 +517,8 @@ namespace mongo {
// choose who is master.
if ( x != State_Slave && x != State_Master && x != State_Negotiating ) {
problem() << method << " negotiate: bad you_are value " << res.toString() << endl;
- } else if ( x != State_Negotiating ) {
+ }
+ else if ( x != State_Negotiating ) {
string message = method + " negotiation";
setMasterLocked(x, message.c_str());
}
@@ -556,8 +557,8 @@ namespace mongo {
break;
addDbNextPass.insert( e.fieldName() );
}
- }
-
+ }
+
dbsObj = o.getObjectField("incompleteCloneDbs");
if ( !dbsObj.isEmpty() ) {
BSONObjIterator i(dbsObj);
@@ -567,7 +568,7 @@ namespace mongo {
break;
incompleteCloneDbs.insert( e.fieldName() );
}
- }
+ }
_lastSavedLocalTs = OpTime( o.getField( "localLogTs" ).date() );
}
@@ -583,7 +584,7 @@ namespace mongo {
b.appendTimestamp("syncedTo", syncedTo.asDate());
b.appendTimestamp("localLogTs", _lastSavedLocalTs.asDate());
-
+
BSONObjBuilder dbsNextPassBuilder;
int n = 0;
for ( set<string>::iterator i = addDbNextPass.begin(); i != addDbNextPass.end(); i++ ) {
@@ -698,11 +699,12 @@ namespace mongo {
else {
try {
massert( 10384 , "--only requires use of --source", cmdLine.only.empty());
- } catch ( ... ) {
+ }
+ catch ( ... ) {
dbexit( EXIT_BADOPTIONS );
}
}
-
+
if ( replPair ) {
const string &remote = replPair->remote;
// --pairwith host specified.
@@ -744,9 +746,9 @@ namespace mongo {
tmp.syncedTo = OpTime();
tmp.replacing = true;
}
- }
+ }
if ( ( !replPair && tmp.syncedTo.isNull() ) ||
- ( replPair && replSettings.fastsync ) ) {
+ ( replPair && replSettings.fastsync ) ) {
DBDirectClient c;
if ( c.exists( "local.oplog.$main" ) ) {
BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
@@ -780,7 +782,7 @@ namespace mongo {
}
return false;
}
-
+
void ReplSource::forceResyncDead( const char *requester ) {
if ( !replAllDead )
return;
@@ -789,9 +791,9 @@ namespace mongo {
for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) {
(*i)->forceResync( requester );
}
- replAllDead = 0;
+ replAllDead = 0;
}
-
+
void ReplSource::forceResync( const char *requester ) {
BSONObj info;
{
@@ -814,7 +816,7 @@ namespace mongo {
}
}
}
- }
+ }
syncedTo = OpTime();
addDbNextPass.clear();
save();
@@ -826,7 +828,7 @@ namespace mongo {
dropDatabase(db);
return db;
}
-
+
/* grab initial copy of a database from the master */
bool ReplSource::resync(string db) {
string dummyNs = resyncDrop( db.c_str(), "internal" );
@@ -855,7 +857,7 @@ namespace mongo {
log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;;
}
catch ( DBException& e ) {
- log() << "sync: caught db exception " << e << " while applying op: " << op << endl;;
+ log() << "sync: caught db exception " << e << " while applying op: " << op << endl;;
}
}
@@ -902,7 +904,8 @@ namespace mongo {
assert( countdown >= 0 );
if( countdown > 0 ) {
countdown--; // was pretouched on a prev pass
- } else {
+ }
+ else {
const int m = 4;
if( tp.get() == 0 ) {
int nthr = min(8, cmdLine.pretouch);
@@ -943,7 +946,7 @@ namespace mongo {
log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
throw SyncException();
}
-
+
Client::Context ctx( ns );
ctx.getClient()->curop()->reset();
@@ -952,14 +955,14 @@ namespace mongo {
if( logLevel >= 6 )
log(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
-
+
// always apply admin command command
// this is a bit hacky -- the semantics of replication/commands aren't well specified
if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) {
applyOperation( op );
return;
}
-
+
if ( ctx.justCreated() || empty || incompleteClone ) {
// we must add to incomplete list now that setClient has been called
incompleteCloneDbs.insert( clientName );
@@ -970,7 +973,8 @@ namespace mongo {
clone 100 databases in one pass.)
*/
addDbNextPass.insert( clientName );
- } else {
+ }
+ else {
if ( incompleteClone ) {
log() << "An earlier initial clone of '" << clientName << "' did not complete, now resyncing." << endl;
}
@@ -982,21 +986,25 @@ namespace mongo {
incompleteCloneDbs.erase( clientName );
}
save();
- } else {
+ }
+ else {
bool mod;
if ( replPair && replPair->state == ReplPair::State_Master ) {
BSONObj id = idForOp( op, mod );
if ( !idTracker.haveId( ns, id ) ) {
- applyOperation( op );
- } else if ( idTracker.haveModId( ns, id ) ) {
+ applyOperation( op );
+ }
+ else if ( idTracker.haveModId( ns, id ) ) {
log( 6 ) << "skipping operation matching mod id object " << op << endl;
BSONObj existing;
if ( Helpers::findOne( ns, id, existing ) )
logOp( "i", ns, existing );
- } else {
+ }
+ else {
log( 6 ) << "skipping operation matching changed id object " << op << endl;
}
- } else {
+ }
+ else {
applyOperation( op );
}
addDbNextPass.erase( clientName );
@@ -1008,33 +1016,33 @@ namespace mongo {
const char *opType = op.getStringField( "op" );
BSONObj o = op.getObjectField( "o" );
switch( opType[ 0 ] ) {
- case 'i': {
- BSONObjBuilder idBuilder;
- BSONElement id;
- if ( !o.getObjectID( id ) )
- return BSONObj();
- idBuilder.append( id );
- return idBuilder.obj();
- }
- case 'u': {
- BSONObj o2 = op.getObjectField( "o2" );
- if ( strcmp( o2.firstElement().fieldName(), "_id" ) != 0 )
- return BSONObj();
- if ( o.firstElement().fieldName()[ 0 ] == '$' )
- mod = true;
- return o2;
- }
- case 'd': {
- if ( opType[ 1 ] != '\0' )
- return BSONObj(); // skip "db" op type
- return o;
- }
- default:
- break;
- }
+ case 'i': {
+ BSONObjBuilder idBuilder;
+ BSONElement id;
+ if ( !o.getObjectID( id ) )
+ return BSONObj();
+ idBuilder.append( id );
+ return idBuilder.obj();
+ }
+ case 'u': {
+ BSONObj o2 = op.getObjectField( "o2" );
+ if ( strcmp( o2.firstElement().fieldName(), "_id" ) != 0 )
+ return BSONObj();
+ if ( o.firstElement().fieldName()[ 0 ] == '$' )
+ mod = true;
+ return o2;
+ }
+ case 'd': {
+ if ( opType[ 1 ] != '\0' )
+ return BSONObj(); // skip "db" op type
+ return o;
+ }
+ default:
+ break;
+ }
return BSONObj();
}
-
+
void ReplSource::updateSetsWithOp( const BSONObj &op, bool mayUnlock ) {
if ( mayUnlock ) {
idTracker.mayUpgradeStorage();
@@ -1049,42 +1057,42 @@ namespace mongo {
if ( mod )
idTracker.haveModId( ns, id, true );
idTracker.haveId( ns, id, true );
- }
+ }
}
-
+
void ReplSource::syncToTailOfRemoteLog() {
string _ns = ns();
BSONObjBuilder b;
if ( !only.empty() ) {
b.appendRegex("ns", string("^") + only);
- }
+ }
BSONObj last = oplogReader.findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
if ( !last.isEmpty() ) {
BSONElement ts = last.getField( "ts" );
massert( 10386 , "non Date ts found: " + last.toString(), ts.type() == Date || ts.type() == Timestamp );
syncedTo = OpTime( ts.date() );
- }
+ }
}
-
+
OpTime ReplSource::nextLastSavedLocalTs() const {
Client::Context ctx( "local.oplog.$main" );
shared_ptr<Cursor> c = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
if ( c->ok() )
- return OpTime( c->current().getField( "ts" ).date() );
+ return OpTime( c->current().getField( "ts" ).date() );
return OpTime();
}
-
+
void ReplSource::setLastSavedLocalTs( const OpTime &nextLocalTs ) {
_lastSavedLocalTs = nextLocalTs;
log( 3 ) << "updated _lastSavedLocalTs to: " << _lastSavedLocalTs << endl;
}
-
+
void ReplSource::resetSlave() {
log() << "**********************************************************\n";
log() << "Sending forcedead command to slave to stop its replication\n";
log() << "Host: " << hostName << " paired: " << paired << endl;
massert( 10387 , "request to kill slave replication failed",
- oplogReader.conn()->simpleCommand( "admin", 0, "forcedead" ) );
+ oplogReader.conn()->simpleCommand( "admin", 0, "forcedead" ) );
syncToTailOfRemoteLog();
{
dblock lk;
@@ -1093,7 +1101,7 @@ namespace mongo {
oplogReader.resetCursor();
}
}
-
+
bool ReplSource::updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock ) {
Client::Context ctx( "local.oplog.$main" );
shared_ptr<Cursor> localLog = findTableScan( "local.oplog.$main", BSON( "$natural" << -1 ) );
@@ -1119,16 +1127,16 @@ namespace mongo {
dbtemprelease t;
resetSlave();
massert( 10388 , "local master log filled, forcing slave resync", false );
- }
+ }
if ( !newTail.isNull() )
localLogTail = newTail;
return true;
}
-
+
extern unsigned replApplyBatchSize;
/* slave: pull some data from the master's oplog
- note: not yet in db mutex at this point.
+ note: not yet in db mutex at this point.
@return -1 error
0 ok, don't sleep
1 ok, sleep
@@ -1148,7 +1156,7 @@ namespace mongo {
OpTime localLogTail = _lastSavedLocalTs;
bool initial = syncedTo.isNull();
-
+
if ( !oplogReader.haveCursor() || initial ) {
if ( initial ) {
// Important to grab last oplog timestamp before listing databases.
@@ -1174,13 +1182,13 @@ namespace mongo {
dblock lk;
save();
}
-
+
BSONObjBuilder q;
q.appendDate("$gte", syncedTo.asDate());
BSONObjBuilder query;
query.append("ts", q.done());
if ( !only.empty() ) {
- // note we may here skip a LOT of data table scanning, a lot of work for the master.
+ // note we may here skip a LOT of data table scanning, a lot of work for the master.
query.appendRegex("ns", string("^") + only); // maybe append "\\." here?
}
BSONObj queryObj = query.done();
@@ -1217,7 +1225,8 @@ namespace mongo {
if( oplogReader.awaitCapable() )
okResultCode = 0; // don't sleep
- } else {
+ }
+ else {
log() << "repl: " << ns << " oplog is empty\n";
}
{
@@ -1229,11 +1238,11 @@ namespace mongo {
setLastSavedLocalTs( nextLastSaved );
}
}
- save();
+ save();
}
return okResultCode;
}
-
+
OpTime nextOpTime;
{
BSONObj op = oplogReader.next();
@@ -1256,27 +1265,27 @@ namespace mongo {
massert( 10391 , "repl: bad object read from remote oplog", false);
}
}
-
+
if ( replPair && replPair->state == ReplPair::State_Master ) {
-
+
OpTime next( ts.date() );
if ( !tailing && !initial && next != syncedTo ) {
log() << "remote slave log filled, forcing slave resync" << endl;
resetSlave();
return 1;
- }
-
+ }
+
dblock lk;
updateSetsWithLocalOps( localLogTail, true );
}
-
+
nextOpTime = OpTime( ts.date() );
log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
if ( initial ) {
log(1) << "repl: initial run\n";
}
if( tailing ) {
- if( !( syncedTo < nextOpTime ) ) {
+ if( !( syncedTo < nextOpTime ) ) {
log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl;
log() << "repl syncTo: " << syncedTo.toStringLong() << endl;
log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
@@ -1309,7 +1318,7 @@ namespace mongo {
// apply operations
{
int n = 0;
- time_t saveLast = time(0);
+ time_t saveLast = time(0);
while ( 1 ) {
/* from a.s.:
I think the idea here is that we can establish a sync point between the local op log and the remote log with the following steps:
@@ -1337,7 +1346,8 @@ namespace mongo {
if ( getInitialSyncCompleted() ) { // if initial sync hasn't completed, break out of loop so we can set to completed or clone more dbs
continue;
}
- } else {
+ }
+ else {
setLastSavedLocalTs( nextLastSaved );
}
}
@@ -1353,17 +1363,17 @@ namespace mongo {
else {
}
- OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
- // periodically note our progress, in case we are doing a lot of work and crash
- dblock lk;
+ OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
+ // periodically note our progress, in case we are doing a lot of work and crash
+ dblock lk;
syncedTo = nextOpTime;
// can't update local log ts since there are pending operations from our peer
- save();
+ save();
log() << "repl: checkpoint applied " << n << " operations" << endl;
log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl;
- saveLast = time(0);
- n = 0;
- }
+ saveLast = time(0);
+ n = 0;
+ }
BSONObj op = oplogReader.next();
@@ -1372,8 +1382,8 @@ namespace mongo {
scoped_ptr<writelock> lk( justOne ? 0 : new writelock() );
while( 1 ) {
- BSONElement ts = op.getField("ts");
- if( !( ts.type() == Date || ts.type() == Timestamp ) ) {
+ BSONElement ts = op.getField("ts");
+ if( !( ts.type() == Date || ts.type() == Timestamp ) ) {
log() << "sync error: problem querying remote oplog record" << endl;
log() << "op: " << op.toString() << endl;
log() << "halting replication" << endl;
@@ -1423,13 +1433,13 @@ namespace mongo {
return okResultCode;
}
- BSONObj userReplQuery = fromjson("{\"user\":\"repl\"}");
-
- bool replAuthenticate(DBClientBase *conn) {
- if( ! cc().isAdmin() ){
- log() << "replauthenticate: requires admin permissions, failing\n";
- return false;
- }
+ BSONObj userReplQuery = fromjson("{\"user\":\"repl\"}");
+
+ bool replAuthenticate(DBClientBase *conn) {
+ if( ! cc().isAdmin() ) {
+ log() << "replauthenticate: requires admin permissions, failing\n";
+ return false;
+ }
string u;
string p;
@@ -1443,8 +1453,8 @@ namespace mongo {
dblock lk;
Client::Context ctxt("local.");
if( !Helpers::findOne("local.system.users", userReplQuery, user) ||
- // try the first user in local
- !Helpers::getSingleton("local.system.users", user) ) {
+ // try the first user in local
+ !Helpers::getSingleton("local.system.users", user) ) {
log() << "replauthenticate: no user in local.system.users to use for authentication\n";
return noauth;
}
@@ -1454,28 +1464,28 @@ namespace mongo {
massert( 10392 , "bad user object? [1]", !u.empty());
massert( 10393 , "bad user object? [2]", !p.empty());
}
-
- string err;
- if( !conn->auth("local", u.c_str(), p.c_str(), err, false) ) {
- log() << "replauthenticate: can't authenticate to master server, user:" << u << endl;
- return false;
- }
- return true;
- }
+
+ string err;
+ if( !conn->auth("local", u.c_str(), p.c_str(), err, false) ) {
+ log() << "replauthenticate: can't authenticate to master server, user:" << u << endl;
+ return false;
+ }
+ return true;
+ }
bool replHandshake(DBClientConnection *conn) {
-
+
BSONObj me;
{
dblock l;
- if ( ! Helpers::getSingleton( "local.me" , me ) ){
+ if ( ! Helpers::getSingleton( "local.me" , me ) ) {
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
me = b.obj();
Helpers::putSingleton( "local.me" , me );
}
}
-
+
BSONObjBuilder cmd;
cmd.appendAs( me["_id"] , "handshake" );
@@ -1491,9 +1501,9 @@ namespace mongo {
_conn = auto_ptr<DBClientConnection>(new DBClientConnection( false, 0, replPair ? 20 : 0 /* tcp timeout */));
string errmsg;
ReplInfo r("trying to connect to sync source");
- if ( !_conn->connect(hostName.c_str(), errmsg) ||
- !replAuthenticate(_conn.get()) ||
- !replHandshake(_conn.get()) ) {
+ if ( !_conn->connect(hostName.c_str(), errmsg) ||
+ !replAuthenticate(_conn.get()) ||
+ !replHandshake(_conn.get()) ) {
resetConnection();
log() << "repl: " << errmsg << endl;
return false;
@@ -1501,7 +1511,7 @@ namespace mongo {
}
return true;
}
-
+
/* note: not yet in mutex at this point.
returns >= 0 if ok. return -1 if you want to reconnect.
return value of zero indicates no sleep necessary before next call
@@ -1527,14 +1537,14 @@ namespace mongo {
}
if ( !oplogReader.connect(hostName) ) {
- log(4) << "repl: can't connect to sync source" << endl;
+ log(4) << "repl: can't connect to sync source" << endl;
if ( replPair && paired ) {
assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
replPair->arbitrate();
}
return -1;
}
-
+
if ( paired ) {
int remote = replPair->negotiate(oplogReader.conn(), "direct");
int nMasters = ( remote == ReplPair::State_Master ) + ( replPair->state == ReplPair::State_Master );
@@ -1545,17 +1555,17 @@ namespace mongo {
}
/*
- // get current mtime at the server.
- BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
- BSONElement e = o.getField("optime");
- if( e.eoo() ) {
- log() << "repl: failed to get cur optime from master" << endl;
- log() << " " << o.toString() << endl;
- return false;
- }
- uassert( 10124 , e.type() == Date );
- OpTime serverCurTime;
- serverCurTime.asDate() = e.date();
+ // get current mtime at the server.
+ BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
+ BSONElement e = o.getField("optime");
+ if( e.eoo() ) {
+ log() << "repl: failed to get cur optime from master" << endl;
+ log() << " " << o.toString() << endl;
+ return false;
+ }
+ uassert( 10124 , e.type() == Date );
+ OpTime serverCurTime;
+ serverCurTime.asDate() = e.date();
*/
return sync_pullOpLog(nApplied);
}
@@ -1568,7 +1578,7 @@ namespace mongo {
_ reuse that cursor when we can
*/
- /* returns: # of seconds to sleep before next pass
+ /* returns: # of seconds to sleep before next pass
0 = no sleep recommended
1 = special sentinel indicating adaptive sleep recommended
*/
@@ -1595,7 +1605,7 @@ namespace mongo {
try {
res = s->sync(nApplied);
bool moreToSync = s->haveMoreDbsToSync();
- if( res < 0 ) {
+ if( res < 0 ) {
sleepAdvice = 3;
}
else if( moreToSync ) {
@@ -1604,7 +1614,7 @@ namespace mongo {
else if ( s->sleepAdvice() ) {
sleepAdvice = s->sleepAdvice();
}
- else
+ else
sleepAdvice = res;
if ( res >= 0 && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
pairSync->setInitialSyncCompletedLocking();
@@ -1630,9 +1640,9 @@ namespace mongo {
}
catch ( const std::exception &e ) {
log() << "repl: std::exception " << e.what() << endl;
- replInfo = "replMain caught std::exception";
+ replInfo = "replMain caught std::exception";
}
- catch ( ... ) {
+ catch ( ... ) {
log() << "unexpected exception during replication. replication will halt" << endl;
replAllDead = "caught unexpected exception during replication";
}
@@ -1658,15 +1668,16 @@ namespace mongo {
try {
int nApplied = 0;
s = _replMain(sources, nApplied);
- if( s == 1 ) {
+ if( s == 1 ) {
if( nApplied == 0 ) s = 2;
- else if( nApplied > 100 ) {
+ else if( nApplied > 100 ) {
// sleep very little - just enought that we aren't truly hammering master
sleepmillis(75);
s = 0;
}
}
- } catch (...) {
+ }
+ catch (...) {
out() << "caught exception in _replMain" << endl;
s = 4;
}
@@ -1676,10 +1687,10 @@ namespace mongo {
syncing--;
}
- if( relinquishSyncingSome ) {
- relinquishSyncingSome = 0;
- s = 1; // sleep before going back in to syncing=1
- }
+ if( relinquishSyncingSome ) {
+ relinquishSyncingSome = 0;
+ s = 1; // sleep before going back in to syncing=1
+ }
if ( s ) {
stringstream ss;
@@ -1702,21 +1713,21 @@ namespace mongo {
while( 1 ) {
sleepsecs( toSleep );
- /* write a keep-alive like entry to the log. this will make things like
+ /* write a keep-alive like entry to the log. this will make things like
printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date
even when things are idle.
*/
{
writelocktry lk("",1);
- if ( lk.got() ){
+ if ( lk.got() ) {
toSleep = 10;
-
- cc().getAuthenticationInfo()->authorize("admin");
-
- try {
+
+ cc().getAuthenticationInfo()->authorize("admin");
+
+ try {
logKeepalive();
}
- catch(...) {
+ catch(...) {
log() << "caught exception in replMasterThread()" << endl;
}
}
@@ -1732,11 +1743,11 @@ namespace mongo {
sleepsecs(1);
Client::initThread("replslave");
cc().iAmSyncThread();
-
+
{
dblock lk;
cc().getAuthenticationInfo()->authorize("admin");
-
+
BSONObj obj;
if ( Helpers::getSingleton("local.pair.startup", obj) ) {
// should be: {replacepeer:1}
@@ -1772,7 +1783,7 @@ namespace mongo {
void startReplication() {
/* if we are going to be a replica set, we aren't doing other forms of replication. */
if( !cmdLine._replSet.empty() ) {
- if( replSettings.slave || replSettings.master || replPair ) {
+ if( replSettings.slave || replSettings.master || replPair ) {
log() << "***" << endl;
log() << "ERROR: can't use --slave or --master replication options with --replSet" << endl;
log() << "***" << endl;
@@ -1814,7 +1825,7 @@ namespace mongo {
createOplog();
boost::thread t(replMasterThread);
}
-
+
while( replSettings.fastsync ) // don't allow writes until we've set up from log
sleepmillis( 50 );
}
@@ -1848,23 +1859,23 @@ namespace mongo {
}
tp.join();
}
-
+
class ReplApplyBatchSizeValidator : public ParameterValidator {
public:
- ReplApplyBatchSizeValidator() : ParameterValidator( "replApplyBatchSize" ){}
+ ReplApplyBatchSizeValidator() : ParameterValidator( "replApplyBatchSize" ) {}
- virtual bool isValid( BSONElement e , string& errmsg ){
+ virtual bool isValid( BSONElement e , string& errmsg ) {
int b = e.numberInt();
- if( b < 1 || b > 1024 ) {
+ if( b < 1 || b > 1024 ) {
errmsg = "replApplyBatchSize has to be >= 1 and < 1024";
return false;
}
-
- if ( replSettings.slavedelay != 0 && b > 1 ){
+
+ if ( replSettings.slavedelay != 0 && b > 1 ) {
errmsg = "can't use a batch size > 1 with slavedelay";
return false;
}
- if ( ! replSettings.slave ){
+ if ( ! replSettings.slave ) {
errmsg = "can't set replApplyBatchSize on a non-slave machine";
return false;
}
diff --git a/db/repl.h b/db/repl.h
index fe0cb1d628d..45036fa30d7 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -40,16 +40,16 @@
namespace mongo {
- /* replication slave? (possibly with slave or repl pair nonmaster)
+ /* replication slave? (possibly with slave or repl pair nonmaster)
--slave cmd line setting -> SimpleSlave
- */
- typedef enum { NotSlave=0, SimpleSlave, ReplPairSlave } SlaveTypes;
+ */
+ typedef enum { NotSlave=0, SimpleSlave, ReplPairSlave } SlaveTypes;
class ReplSettings {
public:
SlaveTypes slave;
- /* true means we are master and doing replication. if we are not writing to oplog (no --master or repl pairing),
+ /* true means we are master and doing replication. if we are not writing to oplog (no --master or repl pairing),
this won't be true.
*/
bool master;
@@ -57,9 +57,9 @@ namespace mongo {
int opIdMem;
bool fastsync;
-
+
bool autoresync;
-
+
int slavedelay;
ReplSettings()
@@ -69,14 +69,14 @@ namespace mongo {
};
extern ReplSettings replSettings;
-
- bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot);
+
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot);
/* A replication exception */
class SyncException : public DBException {
public:
- SyncException() : DBException( "sync exception" , 10001 ){}
+ SyncException() : DBException( "sync exception" , 10001 ) {}
};
/* A Source is a source from which we can pull (replicate) data.
@@ -112,7 +112,7 @@ namespace mongo {
set<string> incompleteCloneDbs;
ReplSource();
-
+
// returns the dummy ns used to do the drop
string resyncDrop( const char *db, const char *requester );
// returns possibly unowned id spec for the operation.
@@ -130,7 +130,7 @@ namespace mongo {
bool updateSetsWithLocalOps( OpTime &localLogTail, bool mayUnlock );
string ns() const { return string( "local.oplog.$" ) + sourceName(); }
unsigned _sleepAdviceTime;
-
+
public:
OplogReader oplogReader;
@@ -147,8 +147,8 @@ namespace mongo {
/* This is for repl pairs.
_lastSavedLocalTs is the most recent point in the local log that we know is consistent
- with the remote log ( ie say the local op log has entries ABCDE and the remote op log
- has ABCXY, then _lastSavedLocalTs won't be greater than C until we have reconciled
+ with the remote log ( ie say the local op log has entries ABCDE and the remote op log
+ has ABCXY, then _lastSavedLocalTs won't be greater than C until we have reconciled
the DE-XY difference.)
*/
OpTime _lastSavedLocalTs;
@@ -172,15 +172,15 @@ namespace mongo {
return hostName == r.hostName && sourceName() == r.sourceName();
}
string toString() const { return sourceName() + "@" + hostName; }
-
- bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
+
+ bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
int sleepAdvice() const {
if ( !_sleepAdviceTime )
return 0;
int wait = _sleepAdviceTime - unsigned( time( 0 ) );
return wait > 0 ? wait : 0;
}
-
+
static bool throttledForceResyncDead( const char *requester );
static void forceResyncDead( const char *requester );
void forceResync( const char *requester );
@@ -201,7 +201,8 @@ namespace mongo {
if ( imp_[ ns ].insert( id.getOwned() ).second ) {
size_ += id.objsize() + sizeof( BSONObj );
}
- } else {
+ }
+ else {
if ( imp_[ ns ].erase( id ) == 1 ) {
size_ -= id.objsize() + sizeof( BSONObj );
}
@@ -237,7 +238,7 @@ namespace mongo {
// rename _id to id since there may be duplicates
b.appendAs( id.firstElement(), "id" );
return b.obj();
- }
+ }
DbSet impl_;
};
@@ -245,14 +246,14 @@ namespace mongo {
// All functions must be called with db mutex held
// Kind of sloppy class structure, for now just want to keep the in mem
// version speedy.
- // see http://www.mongodb.org/display/DOCS/Pairing+Internals
+ // see http://www.mongodb.org/display/DOCS/Pairing+Internals
class IdTracker {
public:
IdTracker() :
- dbIds_( "local.temp.replIds" ),
- dbModIds_( "local.temp.replModIds" ),
- inMem_( true ),
- maxMem_( replSettings.opIdMem ) {
+ dbIds_( "local.temp.replIds" ),
+ dbModIds_( "local.temp.replModIds" ),
+ inMem_( true ),
+ maxMem_( replSettings.opIdMem ) {
}
void reset( int maxMem = replSettings.opIdMem ) {
memIds_.reset();
@@ -310,7 +311,7 @@ namespace mongo {
void upgrade( MemIds &a, DbIds &b ) {
for( MemIds::IdSets::const_iterator i = a.imp_.begin(); i != a.imp_.end(); ++i ) {
for( BSONObjSetDefaultOrder::const_iterator j = i->second.begin(); j != i->second.end(); ++j ) {
- set( b, i->first.c_str(), *j, true );
+ set( b, i->first.c_str(), *j, true );
RARELY {
dbtemprelease t;
}
@@ -324,9 +325,9 @@ namespace mongo {
bool inMem_;
int maxMem_;
};
-
+
bool anyReplEnabled();
void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level = 0 );
-
-
+
+
} // namespace mongo
diff --git a/db/repl/connections.h b/db/repl/connections.h
index 428b8302491..7e7bfe5f381 100644
--- a/db/repl/connections.h
+++ b/db/repl/connections.h
@@ -1,4 +1,4 @@
-// @file
+// @file
/*
* Copyright (C) 2010 10gen Inc.
@@ -22,10 +22,10 @@
#include "../../client/dbclient.h"
#include "../security_key.h"
-namespace mongo {
+namespace mongo {
- /** here we keep a single connection (with reconnect) for a set of hosts,
- one each, and allow one user at a time per host. if in use already for that
+ /** here we keep a single connection (with reconnect) for a set of hosts,
+ one each, and allow one user at a time per host. if in use already for that
host, we block. so this is an easy way to keep a 1-deep pool of connections
that many threads can share.
@@ -40,24 +40,24 @@ namespace mongo {
throws exception on connect error (but fine to try again later with a new
scopedconn object for same host).
*/
- class ScopedConn {
+ class ScopedConn {
public:
/** throws assertions if connect failure etc. */
ScopedConn(string hostport);
~ScopedConn();
/* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic.
- So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
+ So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed.
*/
bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0) {
return conn()->runCommand(dbname, cmd, info, options);
}
- unsigned long long count(const string &ns) {
- return conn()->count(ns);
+ unsigned long long count(const string &ns) {
+ return conn()->count(ns);
}
- BSONObj findOne(const string &ns, const Query& q, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
+ BSONObj findOne(const string &ns, const Query& q, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
return conn()->findOne(ns, q, fieldsToReturn, queryOptions);
}
void setTimeout(double to) {
@@ -67,10 +67,10 @@ namespace mongo {
private:
auto_ptr<scoped_lock> connLock;
static mongo::mutex mapMutex;
- struct X {
+ struct X {
mongo::mutex z;
DBClientConnection cc;
- X() : z("X"), cc(/*reconnect*/ true, 0, /*timeout*/ 10.0) {
+ X() : z("X"), cc(/*reconnect*/ true, 0, /*timeout*/ 10.0) {
cc._logLevel = 2;
}
} *x;
@@ -90,7 +90,7 @@ namespace mongo {
connLock.reset( new scoped_lock(x->z) );
}
}
- if( !first ) {
+ if( !first ) {
connLock.reset( new scoped_lock(x->z) );
return;
}
@@ -108,12 +108,12 @@ namespace mongo {
}
}
- inline ScopedConn::~ScopedConn() {
+ inline ScopedConn::~ScopedConn() {
// conLock releases...
}
- /*inline DBClientConnection* ScopedConn::operator->() {
- return &x->cc;
+ /*inline DBClientConnection* ScopedConn::operator->() {
+ return &x->cc;
}*/
}
diff --git a/db/repl/consensus.cpp b/db/repl/consensus.cpp
index 479526c49bf..54bccf015bd 100644
--- a/db/repl/consensus.cpp
+++ b/db/repl/consensus.cpp
@@ -19,9 +19,9 @@
#include "rs.h"
#include "multicmd.h"
-namespace mongo {
+namespace mongo {
- class CmdReplSetFresh : public ReplSetCommand {
+ class CmdReplSetFresh : public ReplSetCommand {
public:
CmdReplSetFresh() : ReplSetCommand("replSetFresh") { }
private:
@@ -29,23 +29,23 @@ namespace mongo {
if( !check(errmsg, result) )
return false;
- if( cmdObj["set"].String() != theReplSet->name() ) {
+ if( cmdObj["set"].String() != theReplSet->name() ) {
errmsg = "wrong repl set name";
return false;
}
string who = cmdObj["who"].String();
int cfgver = cmdObj["cfgver"].Int();
- OpTime opTime(cmdObj["opTime"].Date());
+ OpTime opTime(cmdObj["opTime"].Date());
bool weAreFresher = false;
- if( theReplSet->config().version > cfgver ) {
+ if( theReplSet->config().version > cfgver ) {
log() << "replSet member " << who << " is not yet aware its cfg version " << cfgver << " is stale" << rsLog;
- result.append("info", "config version stale");
+ result.append("info", "config version stale");
+ weAreFresher = true;
+ }
+ else if( opTime < theReplSet->lastOpTimeWritten ) {
weAreFresher = true;
}
- else if( opTime < theReplSet->lastOpTimeWritten ) {
- weAreFresher = true;
- }
result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
result.append("fresher", weAreFresher);
return true;
@@ -66,10 +66,10 @@ namespace mongo {
}
} cmdReplSetElect;
- int Consensus::totalVotes() const {
+ int Consensus::totalVotes() const {
static int complain = 0;
int vTot = rs._self->config().votes;
- for( Member *m = rs.head(); m; m=m->next() )
+ for( Member *m = rs.head(); m; m=m->next() )
vTot += m->config().votes;
if( vTot % 2 == 0 && vTot && complain++ == 0 )
log() << "replSet " /*buildbot! warning */ "total number of votes is even - add arbiter or give one member an extra vote" << rsLog;
@@ -78,7 +78,7 @@ namespace mongo {
bool Consensus::aMajoritySeemsToBeUp() const {
int vUp = rs._self->config().votes;
- for( Member *m = rs.head(); m; m=m->next() )
+ for( Member *m = rs.head(); m; m=m->next() )
vUp += m->hbinfo().up() ? m->config().votes : 0;
return vUp * 2 > totalVotes();
}
@@ -98,13 +98,13 @@ namespace mongo {
const time_t LeaseTime = 30;
- unsigned Consensus::yea(unsigned memberId) /* throws VoteException */ {
+ unsigned Consensus::yea(unsigned memberId) { /* throws VoteException */
Atomic<LastYea>::tran t(ly);
LastYea &ly = t.ref();
time_t now = time(0);
if( ly.when + LeaseTime >= now && ly.who != memberId ) {
log(1) << "replSet not voting yea for " << memberId <<
- " voted for " << ly.who << ' ' << now-ly.when << " secs ago" << rsLog;
+ " voted for " << ly.who << ' ' << now-ly.when << " secs ago" << rsLog;
throw VoteException();
}
ly.when = now;
@@ -112,7 +112,7 @@ namespace mongo {
return rs._self->config().votes;
}
- /* we vote for ourself at start of election. once it fails, we can cancel the lease we had in
+ /* we vote for ourself at start of election. once it fails, we can cancel the lease we had in
place instead of leaving it for a long time.
*/
void Consensus::electionFailed(unsigned meid) {
@@ -124,7 +124,7 @@ namespace mongo {
}
/* todo: threading **************** !!!!!!!!!!!!!!!! */
- void Consensus::electCmdReceived(BSONObj cmd, BSONObjBuilder* _b) {
+ void Consensus::electCmdReceived(BSONObj cmd, BSONObjBuilder* _b) {
BSONObjBuilder& b = *_b;
DEV log() << "replSet received elect msg " << cmd.toString() << rsLog;
else log(2) << "replSet received elect msg " << cmd.toString() << rsLog;
@@ -138,14 +138,14 @@ namespace mongo {
const Member* hopeful = rs.findById(whoid);
int vote = 0;
- if( set != rs.name() ) {
+ if( set != rs.name() ) {
log() << "replSet error received an elect request for '" << set << "' but our set name is '" << rs.name() << "'" << rsLog;
}
- else if( myver < cfgver ) {
+ else if( myver < cfgver ) {
// we are stale. don't vote
}
- else if( myver > cfgver ) {
+ else if( myver > cfgver ) {
// they are stale!
log() << "replSet info got stale version # during election" << rsLog;
vote = -10000;
@@ -157,7 +157,7 @@ namespace mongo {
else if( primary && primary->hbinfo().opTime > hopeful->hbinfo().opTime ) {
// other members might be aware of more up-to-date nodes
log() << hopeful->fullName() << " is trying to elect itself but " <<
- primary->fullName() << " is already primary and more up-to-date" << rsLog;
+ primary->fullName() << " is already primary and more up-to-date" << rsLog;
vote = -10000;
}
else {
@@ -166,7 +166,7 @@ namespace mongo {
rs.relinquish();
log() << "replSet info voting yea for " << whoid << rsLog;
}
- catch(VoteException&) {
+ catch(VoteException&) {
log() << "replSet voting no already voted for another" << rsLog;
}
}
@@ -182,10 +182,10 @@ namespace mongo {
L.push_back( Target(m->fullName()) );
}
- /* config version is returned as it is ok to use this unlocked. BUT, if unlocked, you would need
+ /* config version is returned as it is ok to use this unlocked. BUT, if unlocked, you would need
to check later that the config didn't change. */
void ReplSetImpl::getTargets(list<Target>& L, int& configVersion) {
- if( lockedByMe() ) {
+ if( lockedByMe() ) {
_getTargets(L, configVersion);
return;
}
@@ -200,19 +200,19 @@ namespace mongo {
bool Consensus::weAreFreshest(bool& allUp, int& nTies) {
const OpTime ord = theReplSet->lastOpTimeWritten;
nTies = 0;
- assert( !ord.isNull() );
+ assert( !ord.isNull() );
BSONObj cmd = BSON(
- "replSetFresh" << 1 <<
- "set" << rs.name() <<
- "opTime" << Date_t(ord.asDate()) <<
- "who" << rs._self->fullName() <<
- "cfgver" << rs._cfg->version );
+ "replSetFresh" << 1 <<
+ "set" << rs.name() <<
+ "opTime" << Date_t(ord.asDate()) <<
+ "who" << rs._self->fullName() <<
+ "cfgver" << rs._cfg->version );
list<Target> L;
int ver;
- /* the following queries arbiters, even though they are never fresh. wonder if that makes sense.
- it doesn't, but it could, if they "know" what freshness it one day. so consider removing
- arbiters from getTargets() here. although getTargets is used elsewhere for elections; there
- arbiters are certainly targets - so a "includeArbs" bool would be necessary if we want to make
+ /* the following queries arbiters, even though they are never fresh. wonder if that makes sense.
+ it doesn't, but it could, if they "know" what freshness it one day. so consider removing
+ arbiters from getTargets() here. although getTargets is used elsewhere for elections; there
+ arbiters are certainly targets - so a "includeArbs" bool would be necessary if we want to make
not fetching them herein happen.
*/
rs.getTargets(L, ver);
@@ -234,25 +234,25 @@ namespace mongo {
allUp = false;
}
}
- log(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
+ log(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
assert( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
return true;
}
extern time_t started;
- void Consensus::multiCommand(BSONObj cmd, list<Target>& L) {
+ void Consensus::multiCommand(BSONObj cmd, list<Target>& L) {
assert( !rs.lockedByMe() );
mongo::multiCommand(cmd, L);
}
void Consensus::_electSelf() {
- if( time(0) < steppedDown )
+ if( time(0) < steppedDown )
return;
{
const OpTime ord = theReplSet->lastOpTimeWritten;
- if( ord == 0 ) {
+ if( ord == 0 ) {
log() << "replSet info not trying to elect self, do not yet have a complete set of data from any point in time" << rsLog;
return;
}
@@ -260,16 +260,16 @@ namespace mongo {
bool allUp;
int nTies;
- if( !weAreFreshest(allUp, nTies) ) {
+ if( !weAreFreshest(allUp, nTies) ) {
log() << "replSet info not electing self, we are not freshest" << rsLog;
return;
}
rs.sethbmsg("",9);
- if( !allUp && time(0) - started < 60 * 5 ) {
- /* the idea here is that if a bunch of nodes bounce all at once, we don't want to drop data
- if we don't have to -- we'd rather be offline and wait a little longer instead
+ if( !allUp && time(0) - started < 60 * 5 ) {
+ /* the idea here is that if a bunch of nodes bounce all at once, we don't want to drop data
+ if we don't have to -- we'd rather be offline and wait a little longer instead
todo: make this configurable.
*/
rs.sethbmsg("not electing self, not all members up and we have been up less than 5 minutes");
@@ -282,9 +282,10 @@ namespace mongo {
/* tie? we then randomly sleep to try to not collide on our voting. */
/* todo: smarter. */
if( me.id() == 0 || sleptLast ) {
- // would be fine for one node not to sleep
+ // would be fine for one node not to sleep
// todo: biggest / highest priority nodes should be the ones that get to not sleep
- } else {
+ }
+ else {
assert( !rs.lockedByMe() ); // bad to go to sleep locked
unsigned ms = ((unsigned) rand()) % 1000 + 50;
DEV log() << "replSet tie " << nTies << " sleeping a little " << ms << "ms" << rsLog;
@@ -303,13 +304,13 @@ namespace mongo {
log() << "replSet info electSelf " << meid << rsLog;
BSONObj electCmd = BSON(
- "replSetElect" << 1 <<
- "set" << rs.name() <<
- "who" << me.fullName() <<
- "whoid" << me.hbinfo().id() <<
- "cfgver" << rs._cfg->version <<
- "round" << OID::gen() /* this is just for diagnostics */
- );
+ "replSetElect" << 1 <<
+ "set" << rs.name() <<
+ "who" << me.fullName() <<
+ "whoid" << me.hbinfo().id() <<
+ "cfgver" << rs._cfg->version <<
+ "round" << OID::gen() /* this is just for diagnostics */
+ );
int configVersion;
list<Target> L;
@@ -332,7 +333,7 @@ namespace mongo {
// defensive; should never happen as we have timeouts on connection and operation for our conn
log() << "replSet too much time passed during our election, ignoring result" << rsLog;
}
- else if( configVersion != rs.config().version ) {
+ else if( configVersion != rs.config().version ) {
log() << "replSet config version changed during our election, ignoring result" << rsLog;
}
else {
@@ -340,9 +341,10 @@ namespace mongo {
log(1) << "replSet election succeeded, assuming primary role" << rsLog;
success = true;
rs.assumePrimary();
- }
+ }
}
- } catch( std::exception& ) {
+ }
+ catch( std::exception& ) {
if( !success ) electionFailed(meid);
throw;
}
@@ -353,19 +355,19 @@ namespace mongo {
assert( !rs.lockedByMe() );
assert( !rs.myConfig().arbiterOnly );
assert( rs.myConfig().slaveDelay == 0 );
- try {
- _electSelf();
- }
- catch(RetryAfterSleepException&) {
+ try {
+ _electSelf();
+ }
+ catch(RetryAfterSleepException&) {
throw;
}
- catch(VoteException& ) {
+ catch(VoteException& ) {
log() << "replSet not trying to elect self as responded yea to someone else recently" << rsLog;
}
- catch(DBException& e) {
+ catch(DBException& e) {
log() << "replSet warning caught unexpected exception in electSelf() " << e.toString() << rsLog;
}
- catch(...) {
+ catch(...) {
log() << "replSet warning caught unexpected exception in electSelf()" << rsLog;
}
}
diff --git a/db/repl/health.cpp b/db/repl/health.cpp
index 19a322d23a7..8464edf258f 100644
--- a/db/repl/health.cpp
+++ b/db/repl/health.cpp
@@ -34,11 +34,11 @@
namespace mongo {
/* decls for connections.h */
- ScopedConn::M& ScopedConn::_map = *(new ScopedConn::M());
+ ScopedConn::M& ScopedConn::_map = *(new ScopedConn::M());
mutex ScopedConn::mapMutex("ScopedConn::mapMutex");
}
-namespace mongo {
+namespace mongo {
using namespace mongoutils::html;
using namespace bson;
@@ -47,7 +47,7 @@ namespace mongo {
Tee *rsLog = &_rsLog;
extern bool replSetBlind;
- string ago(time_t t) {
+ string ago(time_t t) {
if( t == 0 ) return "";
time_t x = time(0) - t;
@@ -60,14 +60,14 @@ namespace mongo {
s.precision(2);
s << x / 60.0 << " mins";
}
- else {
+ else {
s.precision(2);
s << x / 3600.0 << " hrs";
}
return s.str();
}
- void Member::summarizeMember(stringstream& s) const {
+ void Member::summarizeMember(stringstream& s) const {
s << tr();
{
stringstream u;
@@ -92,11 +92,11 @@ namespace mongo {
}
s << td(config().votes);
s << td(config().priority);
- {
+ {
string stateText = state().toString();
if( _config.hidden )
stateText += " (hidden)";
- if( ok || stateText.empty() )
+ if( ok || stateText.empty() )
s << td(stateText); // text blank if we've never connected
else
s << td( grey(str::stream() << "(was " << state().toString() << ')', true) );
@@ -107,12 +107,13 @@ namespace mongo {
s << td( a(q.str(), "", never ? "?" : hbinfo().opTime.toString()) );
if( hbinfo().skew > INT_MIN ) {
s << td( grey(str::stream() << hbinfo().skew,!ok) );
- } else
+ }
+ else
s << td("");
s << _tr();
}
-
- string ReplSetImpl::stateAsHtml(MemberState s) {
+
+ string ReplSetImpl::stateAsHtml(MemberState s) {
if( s.s == MemberState::RS_STARTUP ) return a("", "serving still starting up, or still trying to initiate the set", "STARTUP");
if( s.s == MemberState::RS_PRIMARY ) return a("", "this server thinks it is primary", "PRIMARY");
if( s.s == MemberState::RS_SECONDARY ) return a("", "this server thinks it is a secondary (slave mode)", "SECONDARY");
@@ -125,7 +126,7 @@ namespace mongo {
return "";
}
- string MemberState::toString() const {
+ string MemberState::toString() const {
if( s == MemberState::RS_STARTUP ) return "STARTUP";
if( s == MemberState::RS_PRIMARY ) return "PRIMARY";
if( s == MemberState::RS_SECONDARY ) return "SECONDARY";
@@ -146,9 +147,9 @@ namespace mongo {
set<string> skip;
be e = op["ts"];
- if( e.type() == Date || e.type() == Timestamp ) {
+ if( e.type() == Date || e.type() == Timestamp ) {
OpTime ot = e._opTime();
- ss << td( time_t_to_String_short( ot.getSecs() ) );
+ ss << td( time_t_to_String_short( ot.getSecs() ) );
ss << td( ot.toString() );
skip.insert("ts");
}
@@ -158,7 +159,8 @@ namespace mongo {
if( e.type() == NumberLong ) {
ss << "<td>" << hex << e.Long() << "</td>\n";
skip.insert("h");
- } else
+ }
+ else
ss << td("?");
ss << td(op["op"].valuestrsafe());
@@ -167,7 +169,7 @@ namespace mongo {
skip.insert("ns");
ss << "<td>";
- for( bo::iterator i(op); i.more(); ) {
+ for( bo::iterator i(op); i.more(); ) {
be e = i.next();
if( skip.count(e.fieldName()) ) continue;
ss << e.toString() << ' ';
@@ -175,9 +177,9 @@ namespace mongo {
ss << "</td></tr>\n";
}
- void ReplSetImpl::_getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const {
+ void ReplSetImpl::_getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const {
const Member *m = findById(server_id);
- if( m == 0 ) {
+ if( m == 0 ) {
ss << "Error : can't find a member with id: " << server_id << '\n';
return;
}
@@ -191,25 +193,25 @@ namespace mongo {
DBClientConnection conn(false, 0, /*timeout*/ 20);
{
string errmsg;
- if( !conn.connect(m->fullName(), errmsg) ) {
+ if( !conn.connect(m->fullName(), errmsg) ) {
ss << "couldn't connect to " << m->fullName() << ' ' << errmsg;
return;
}
}
auto_ptr<DBClientCursor> c = conn.query(rsoplog, Query().sort("$natural",1), 20, 0, &fields);
- if( c.get() == 0 ) {
+ if( c.get() == 0 ) {
ss << "couldn't query " << rsoplog;
return;
}
static const char *h[] = {"ts","optime", "h","op","ns","rest",0};
ss << "<style type=\"text/css\" media=\"screen\">"
- "table { font-size:75% }\n"
+ "table { font-size:75% }\n"
// "th { background-color:#bbb; color:#000 }\n"
// "td,th { padding:.25em }\n"
- "</style>\n";
-
+ "</style>\n";
+
ss << table(h, true);
//ss << "<pre>\n";
int n = 0;
@@ -219,17 +221,17 @@ namespace mongo {
while( c->more() ) {
bo o = c->next();
otLast = o["ts"]._opTime();
- if( otFirst.isNull() )
+ if( otFirst.isNull() )
otFirst = otLast;
say(ss, o);
- n++;
+ n++;
}
if( n == 0 ) {
ss << rsoplog << " is empty\n";
}
- else {
+ else {
auto_ptr<DBClientCursor> c = conn.query(rsoplog, Query().sort("$natural",-1), 20, 0, &fields);
- if( c.get() == 0 ) {
+ if( c.get() == 0 ) {
ss << "couldn't query [2] " << rsoplog;
return;
}
@@ -238,7 +240,7 @@ namespace mongo {
otEnd = o["ts"]._opTime();
while( 1 ) {
stringstream z;
- if( o["ts"]._opTime() == otLast )
+ if( o["ts"]._opTime() == otLast )
break;
say(z, o);
x = z.str() + x;
@@ -261,30 +263,31 @@ namespace mongo {
ss.precision(3);
if( h < 72 )
ss << h << " hours";
- else
+ else
ss << h / 24.0 << " days";
ss << "</p>\n";
}
}
- void ReplSetImpl::_summarizeAsHtml(stringstream& s) const {
+ void ReplSetImpl::_summarizeAsHtml(stringstream& s) const {
s << table(0, false);
s << tr("Set name:", _name);
s << tr("Majority up:", elect.aMajoritySeemsToBeUp()?"yes":"no" );
s << _table();
- const char *h[] = {"Member",
- "<a title=\"member id in the replset config\">id</a>",
- "Up",
- "<a title=\"length of time we have been continuously connected to the other member with no reconnects (for self, shows uptime)\">cctime</a>",
- "<a title=\"when this server last received a heartbeat response - includes error code responses\">Last heartbeat</a>",
- "Votes", "Priority", "State", "Messages",
- "<a title=\"how up to date this server is. this value polled every few seconds so actually lag is typically much lower than value shown here.\">optime</a>",
- "<a title=\"Clock skew in seconds relative to this server. Informational; server clock variances will make the diagnostics hard to read, but otherwise are benign..\">skew</a>",
- 0};
+ const char *h[] = {"Member",
+ "<a title=\"member id in the replset config\">id</a>",
+ "Up",
+ "<a title=\"length of time we have been continuously connected to the other member with no reconnects (for self, shows uptime)\">cctime</a>",
+ "<a title=\"when this server last received a heartbeat response - includes error code responses\">Last heartbeat</a>",
+ "Votes", "Priority", "State", "Messages",
+ "<a title=\"how up to date this server is. this value polled every few seconds so actually lag is typically much lower than value shown here.\">optime</a>",
+ "<a title=\"Clock skew in seconds relative to this server. Informational; server clock variances will make the diagnostics hard to read, but otherwise are benign..\">skew</a>",
+ 0
+ };
s << table(h);
- /* this is to sort the member rows by their ordinal _id, so they show up in the same
+ /* this is to sort the member rows by their ordinal _id, so they show up in the same
order on all the different web ui's; that is less confusing for the operator. */
map<int,string> mp;
@@ -293,13 +296,13 @@ namespace mongo {
readlocktry lk("local.replset.minvalid", 300);
if( lk.got() ) {
BSONObj mv;
- if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
myMinValid = "minvalid:" + mv["ts"]._opTime().toString();
}
}
else myMinValid = ".";
}
- catch(...) {
+ catch(...) {
myMinValid = "exception fetching minvalid";
}
@@ -307,26 +310,26 @@ namespace mongo {
stringstream s;
/* self row */
s << tr() << td(_self->fullName() + " (me)") <<
- td(_self->id()) <<
- td("1") << //up
- td(ago(started)) <<
- td("") << // last heartbeat
- td(ToString(_self->config().votes)) <<
- td(ToString(_self->config().priority)) <<
- td( stateAsHtml(box.getState()) + (_self->config().hidden?" (hidden)":"") );
+ td(_self->id()) <<
+ td("1") << //up
+ td(ago(started)) <<
+ td("") << // last heartbeat
+ td(ToString(_self->config().votes)) <<
+ td(ToString(_self->config().priority)) <<
+ td( stateAsHtml(box.getState()) + (_self->config().hidden?" (hidden)":"") );
s << td( _hbmsg );
stringstream q;
q << "/_replSetOplog?_id=" << _self->id();
s << td( a(q.str(), myMinValid, theReplSet->lastOpTimeWritten.toString()) );
s << td(""); // skew
s << _tr();
- mp[_self->hbinfo().id()] = s.str();
+ mp[_self->hbinfo().id()] = s.str();
}
Member *m = head();
while( m ) {
- stringstream s;
+ stringstream s;
m->summarizeMember(s);
- mp[m->hbinfo().id()] = s.str();
+ mp[m->hbinfo().id()] = s.str();
m = m->next();
}
@@ -340,15 +343,15 @@ namespace mongo {
_rsLog.toHTML( s );
}
- const Member* ReplSetImpl::findById(unsigned id) const {
+ const Member* ReplSetImpl::findById(unsigned id) const {
if( id == _self->id() ) return _self;
for( Member *m = head(); m; m = m->next() )
- if( m->id() == id )
+ if( m->id() == id )
return m;
return 0;
}
- void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
+ void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
vector<BSONObj> v;
// add self
@@ -380,8 +383,9 @@ namespace mongo {
bb.append("state", (int) m->state().s);
if( h == 0 ) {
// if we can't connect the state info is from the past and could be confusing to show
- bb.append("stateStr", "(not reachable/healthy)");
- } else {
+ bb.append("stateStr", "(not reachable/healthy)");
+ }
+ else {
bb.append("stateStr", m->state().toString());
}
bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
@@ -403,8 +407,8 @@ namespace mongo {
b.append("blind",true); // to avoid confusion if set...normally never set except for testing.
}
- static struct Test : public UnitTest {
- void run() {
+ static struct Test : public UnitTest {
+ void run() {
HealthOptions a,b;
assert( a == b );
assert( a.isDefault() );
diff --git a/db/repl/health.h b/db/repl/health.h
index 645a3b5e5bb..a32db00d8cc 100644
--- a/db/repl/health.h
+++ b/db/repl/health.h
@@ -23,8 +23,8 @@ namespace mongo {
/* throws */
bool requestHeartbeat(string setname, string fromHost, string memberFullName, BSONObj& result, int myConfigVersion, int& theirConfigVersion, bool checkEmpty = false);
- struct HealthOptions {
- HealthOptions() {
+ struct HealthOptions {
+ HealthOptions() {
heartbeatSleepMillis = 2000;
heartbeatTimeoutMillis = 10000;
heartbeatConnRetries = 2;
@@ -42,8 +42,8 @@ namespace mongo {
uassert(13113, "bad replset heartbeat option", heartbeatTimeoutMillis >= 10);
}
- bool operator==(const HealthOptions& r) const {
- return heartbeatSleepMillis==r.heartbeatSleepMillis && heartbeatTimeoutMillis==r.heartbeatTimeoutMillis && heartbeatConnRetries==heartbeatConnRetries;
+ bool operator==(const HealthOptions& r) const {
+ return heartbeatSleepMillis==r.heartbeatSleepMillis && heartbeatTimeoutMillis==r.heartbeatTimeoutMillis && heartbeatConnRetries==heartbeatConnRetries;
}
};
diff --git a/db/repl/heartbeat.cpp b/db/repl/heartbeat.cpp
index 80216da214d..39724665579 100644
--- a/db/repl/heartbeat.cpp
+++ b/db/repl/heartbeat.cpp
@@ -31,7 +31,7 @@
#include "../../util/unittest.h"
#include "../instance.h"
-namespace mongo {
+namespace mongo {
using namespace bson;
@@ -42,7 +42,7 @@ namespace mongo {
long long HeartbeatInfo::timeDown() const {
if( up() ) return 0;
- if( downSince == 0 )
+ if( downSince == 0 )
return 0; // still waiting on first heartbeat
return jsTime() - downSince;
}
@@ -53,10 +53,10 @@ namespace mongo {
virtual bool adminOnly() const { return false; }
CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") { }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( replSetBlind )
+ if( replSetBlind )
return false;
- /* we don't call ReplSetCommand::check() here because heartbeat
+ /* we don't call ReplSetCommand::check() here because heartbeat
checks many things that are pre-initialization. */
if( !replSet ) {
errmsg = "not running with --replSet";
@@ -66,11 +66,11 @@ namespace mongo {
/* we want to keep heartbeat connections open when relinquishing primary. tag them here. */
{
MessagingPort *mp = cc().port();
- if( mp )
+ if( mp )
mp->tag |= 1;
}
- if( cmdObj["pv"].Int() != 1 ) {
+ if( cmdObj["pv"].Int() != 1 ) {
errmsg = "incompatible replset protocol version";
return false;
}
@@ -86,7 +86,7 @@ namespace mongo {
}
result.append("rs", true);
- if( cmdObj["checkEmpty"].trueValue() ) {
+ if( cmdObj["checkEmpty"].trueValue() ) {
result.append("hasData", replHasDatabases());
}
if( theReplSet == 0 ) {
@@ -98,7 +98,7 @@ namespace mongo {
return false;
}
- if( theReplSet->name() != cmdObj.getStringField("replSetHeartbeat") ) {
+ if( theReplSet->name() != cmdObj.getStringField("replSetHeartbeat") ) {
errmsg = "repl set names do not match (2)";
result.append("mismatch", true);
return false;
@@ -118,8 +118,8 @@ namespace mongo {
} cmdReplSetHeartbeat;
/* throws dbexception */
- bool requestHeartbeat(string setName, string from, string memberFullName, BSONObj& result, int myCfgVersion, int& theirCfgVersion, bool checkEmpty) {
- if( replSetBlind ) {
+ bool requestHeartbeat(string setName, string from, string memberFullName, BSONObj& result, int myCfgVersion, int& theirCfgVersion, bool checkEmpty) {
+ if( replSetBlind ) {
//sleepmillis( rand() );
return false;
}
@@ -145,7 +145,7 @@ namespace mongo {
ReplSetHealthPollTask(const HostAndPort& hh, const HeartbeatInfo& mm) : h(hh), m(mm) { }
string name() const { return "ReplSetHealthPollTask"; }
- void doWork() {
+ void doWork() {
if ( !theReplSet ) {
log(2) << "theReplSet not initialized yet, skipping health poll this round" << rsLog;
return;
@@ -153,7 +153,7 @@ namespace mongo {
HeartbeatInfo mem = m;
HeartbeatInfo old = mem;
- try {
+ try {
BSONObj info;
int theirConfigVersion = -10000;
@@ -165,9 +165,9 @@ namespace mongo {
if ( info["time"].isNumber() ) {
long long t = info["time"].numberLong();
- if( t > after )
+ if( t > after )
mem.skew = (int) (t - after);
- else if( t < before )
+ else if( t < before )
mem.skew = (int) (t - before); // negative
}
else {
@@ -195,12 +195,12 @@ namespace mongo {
be cfg = info["config"];
if( cfg.ok() ) {
// received a new config
- boost::function<void()> f =
+ boost::function<void()> f =
boost::bind(&Manager::msgReceivedNewConfig, theReplSet->mgr, cfg.Obj().copy());
theReplSet->mgr->send(f);
}
}
- else {
+ else {
down(mem, info.getStringField("errmsg"));
}
}
@@ -217,8 +217,8 @@ namespace mongo {
static time_t last = 0;
time_t now = time(0);
bool changed = mem.changed(old);
- if( changed ) {
- if( old.hbstate != mem.hbstate )
+ if( changed ) {
+ if( old.hbstate != mem.hbstate )
log() << "replSet member " << h.toString() << ' ' << mem.hbstate.toString() << rsLog;
}
if( changed || now-last>4 ) {
@@ -239,12 +239,12 @@ namespace mongo {
}
};
- void ReplSetImpl::endOldHealthTasks() {
+ void ReplSetImpl::endOldHealthTasks() {
unsigned sz = healthTasks.size();
for( set<ReplSetHealthPollTask*>::iterator i = healthTasks.begin(); i != healthTasks.end(); i++ )
(*i)->halt();
healthTasks.clear();
- if( sz )
+ if( sz )
DEV log() << "replSet debug: cleared old tasks " << sz << endl;
}
@@ -256,8 +256,8 @@ namespace mongo {
void startSyncThread();
- /** called during repl set startup. caller expects it to return fairly quickly.
- note ReplSet object is only created once we get a config - so this won't run
+ /** called during repl set startup. caller expects it to return fairly quickly.
+ note ReplSet object is only created once we get a config - so this won't run
until the initiation.
*/
void ReplSetImpl::startThreads() {
diff --git a/db/repl/manager.cpp b/db/repl/manager.cpp
index c1a7c858d18..ed39c3107d7 100644
--- a/db/repl/manager.cpp
+++ b/db/repl/manager.cpp
@@ -1,4 +1,4 @@
-/* @file manager.cpp
+/* @file manager.cpp
*/
/**
@@ -23,20 +23,20 @@
namespace mongo {
- enum {
+ enum {
NOPRIMARY = -2,
SELFPRIMARY = -1
};
/* check members OTHER THAN US to see if they think they are primary */
- const Member * Manager::findOtherPrimary(bool& two) {
+ const Member * Manager::findOtherPrimary(bool& two) {
two = false;
Member *m = rs->head();
Member *p = 0;
while( m ) {
DEV assert( m != rs->_self );
if( m->state().primary() && m->hbinfo().up() ) {
- if( p ) {
+ if( p ) {
two = true;
return 0;
}
@@ -44,36 +44,36 @@ namespace mongo {
}
m = m->next();
}
- if( p )
+ if( p )
noteARemoteIsPrimary(p);
return p;
}
- Manager::Manager(ReplSetImpl *_rs) :
- task::Server("rs Manager"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY)
- {
+ Manager::Manager(ReplSetImpl *_rs) :
+ task::Server("rs Manager"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY) {
}
-
- Manager::~Manager() {
- /* we don't destroy the replset object we sit in; however, the destructor could have thrown on init.
- the log message below is just a reminder to come back one day and review this code more, and to
- make it cleaner.
+
+ Manager::~Manager() {
+ /* we don't destroy the replset object we sit in; however, the destructor could have thrown on init.
+ the log message below is just a reminder to come back one day and review this code more, and to
+ make it cleaner.
*/
log() << "info: ~Manager called" << rsLog;
rs->mgr = 0;
}
- void Manager::starting() {
+ void Manager::starting() {
Client::initThread("rs Manager");
}
- void Manager::noteARemoteIsPrimary(const Member *m) {
+ void Manager::noteARemoteIsPrimary(const Member *m) {
if( rs->box.getPrimary() == m )
return;
rs->_self->lhb() = "";
if( rs->iAmArbiterOnly() ) {
rs->box.set(MemberState::RS_ARBITER, m);
- } else {
+ }
+ else {
rs->box.noteRemoteIsPrimary(m);
}
}
@@ -90,9 +90,8 @@ namespace mongo {
const Member *p = rs->box.getPrimary();
if( p && p != rs->_self ) {
- if( !p->hbinfo().up() ||
- !p->hbinfo().hbstate.primary() )
- {
+ if( !p->hbinfo().up() ||
+ !p->hbinfo().hbstate.primary() ) {
p = 0;
rs->box.setOtherPrimary(0);
}
@@ -111,29 +110,29 @@ namespace mongo {
if( p2 ) {
/* someone else thinks they are primary. */
- if( p == p2 ) {
+ if( p == p2 ) {
// we thought the same; all set.
return;
}
if( p == 0 ) {
- noteARemoteIsPrimary(p2);
+ noteARemoteIsPrimary(p2);
return;
}
// todo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
if( p != rs->_self ) {
// switch primary from oldremotep->newremotep2
- noteARemoteIsPrimary(p2);
+ noteARemoteIsPrimary(p2);
return;
}
/* we thought we were primary, yet now someone else thinks they are. */
if( !rs->elect.aMajoritySeemsToBeUp() ) {
/* we can't see a majority. so the other node is probably the right choice. */
- noteARemoteIsPrimary(p2);
+ noteARemoteIsPrimary(p2);
return;
}
- /* ignore for now, keep thinking we are master.
- this could just be timing (we poll every couple seconds) or could indicate
- a problem? if it happens consistently for a duration of time we should
+ /* ignore for now, keep thinking we are master.
+ this could just be timing (we poll every couple seconds) or could indicate
+ a problem? if it happens consistently for a duration of time we should
alert the sysadmin.
*/
return;
@@ -141,17 +140,17 @@ namespace mongo {
/* didn't find anyone who wants to be primary */
- if( p ) {
+ if( p ) {
/* we are already primary */
- if( p != rs->_self ) {
+ if( p != rs->_self ) {
rs->sethbmsg("error p != rs->self in checkNewState");
log() << "replSet " << p->fullName() << rsLog;
log() << "replSet " << rs->_self->fullName() << rsLog;
return;
}
- if( rs->elect.shouldRelinquish() ) {
+ if( rs->elect.shouldRelinquish() ) {
log() << "replSet can't see a majority of the set, relinquishing primary" << rsLog;
rs->relinquish();
}
@@ -165,7 +164,7 @@ namespace mongo {
/* TODO : CHECK PRIORITY HERE. can't be elected if priority zero. */
/* no one seems to be primary. shall we try to elect ourself? */
- if( !rs->elect.aMajoritySeemsToBeUp() ) {
+ if( !rs->elect.aMajoritySeemsToBeUp() ) {
static time_t last;
static int n;
int ll = 0;
@@ -178,15 +177,15 @@ namespace mongo {
busyWithElectSelf = true; // don't try to do further elections & such while we are already working on one.
}
- try {
- rs->elect.electSelf();
+ try {
+ rs->elect.electSelf();
}
catch(RetryAfterSleepException&) {
/* we want to process new inbounds before trying this again. so we just put a checkNewstate in the queue for eval later. */
requeue();
}
- catch(...) {
- log() << "replSet error unexpected assertion in rs manager" << rsLog;
+ catch(...) {
+ log() << "replSet error unexpected assertion in rs manager" << rsLog;
}
busyWithElectSelf = false;
}
diff --git a/db/repl/replset_commands.cpp b/db/repl/replset_commands.cpp
index 7474f05c039..dc8567a4083 100644
--- a/db/repl/replset_commands.cpp
+++ b/db/repl/replset_commands.cpp
@@ -26,7 +26,7 @@
using namespace bson;
-namespace mongo {
+namespace mongo {
void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial);
@@ -52,7 +52,7 @@ namespace mongo {
}
// may not need this, but if removed check all tests still work:
- if( !check(errmsg, result) )
+ if( !check(errmsg, result) )
return false;
if( cmdObj.hasElement("blind") ) {
@@ -71,11 +71,11 @@ namespace mongo {
virtual void help( stringstream &help ) const {
help << "internal";
}
- CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {
+ CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {
rbid = (int) curTimeMillis();
}
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( !check(errmsg, result) )
+ if( !check(errmsg, result) )
return false;
result.append("rbid",rbid);
return true;
@@ -83,16 +83,16 @@ namespace mongo {
} cmdReplSetRBID;
/** we increment the rollback id on every rollback event. */
- void incRBID() {
+ void incRBID() {
cmdReplSetRBID.rbid++;
}
/** helper to get rollback id from another server. */
- int getRBID(DBClientConnection *c) {
+ int getRBID(DBClientConnection *c) {
bo info;
c->simpleCommand("admin", &info, "replSetGetRBID");
return info["rbid"].numberInt();
- }
+ }
class CmdReplSetGetStatus : public ReplSetCommand {
public:
@@ -106,7 +106,7 @@ namespace mongo {
if ( cmdObj["forShell"].trueValue() )
lastError.disableForCommand();
- if( !check(errmsg, result) )
+ if( !check(errmsg, result) )
return false;
theReplSet->summarizeStatus(result);
return true;
@@ -123,7 +123,7 @@ namespace mongo {
}
CmdReplSetReconfig() : ReplSetCommand("replSetReconfig"), mutex("rsreconfig") { }
virtual bool run(const string& a, BSONObj& b, string& errmsg, BSONObjBuilder& c, bool d) {
- try {
+ try {
rwlock_try_write lk(mutex);
return _run(a,b,errmsg,c,d);
}
@@ -133,16 +133,16 @@ namespace mongo {
}
private:
bool _run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if( !check(errmsg, result) )
+ if( !check(errmsg, result) )
return false;
- if( !theReplSet->box.getState().primary() ) {
+ if( !theReplSet->box.getState().primary() ) {
errmsg = "replSetReconfig command must be sent to the current replica set primary.";
return false;
}
{
- // just make sure we can get a write lock before doing anything else. we'll reacquire one
- // later. of course it could be stuck then, but this check lowers the risk if weird things
+ // just make sure we can get a write lock before doing anything else. we'll reacquire one
+ // later. of course it could be stuck then, but this check lowers the risk if weird things
// are up - we probably don't want a change to apply 30 minutes after the initial attempt.
time_t t = time(0);
writelock lk("");
@@ -167,7 +167,7 @@ namespace mongo {
log() << "replSet replSetReconfig config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;
- if( !ReplSetConfig::legalChange(theReplSet->getConfig(), newConfig, errmsg) ) {
+ if( !ReplSetConfig::legalChange(theReplSet->getConfig(), newConfig, errmsg) ) {
return false;
}
@@ -178,7 +178,7 @@ namespace mongo {
theReplSet->haveNewConfig(newConfig, true);
ReplSet::startupStatusMsg = "replSetReconfig'd";
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
log() << "replSet replSetReconfig exception: " << e.what() << rsLog;
throw;
}
@@ -207,7 +207,7 @@ namespace mongo {
if( secs == 0 )
result.append("info","unfreezing");
}
- if( secs == 1 )
+ if( secs == 1 )
result.append("warning", "you really want to freeze for only 1 second?");
return true;
}
@@ -243,24 +243,25 @@ namespace mongo {
class ReplSetHandler : public DbWebHandler {
public:
- ReplSetHandler() : DbWebHandler( "_replSet" , 1 , true ){}
+ ReplSetHandler() : DbWebHandler( "_replSet" , 1 , true ) {}
virtual bool handles( const string& url ) const {
return startsWith( url , "/_replSet" );
}
- virtual void handle( const char *rq, string url, BSONObj params,
+ virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ){
-
+ vector<string>& headers, const SockAddr &from ) {
+
if( url == "/_replSetOplog" ) {
responseMsg = _replSetOplog(params);
- } else
+ }
+ else
responseMsg = _replSet();
responseCode = 200;
}
- string _replSetOplog(bo parms) {
+ string _replSetOplog(bo parms) {
int _id = (int) str::toUnsigned( parms["_id"].String() );
stringstream s;
@@ -268,11 +269,11 @@ namespace mongo {
s << start(t);
s << p(t);
- if( theReplSet == 0 ) {
- if( cmdLine._replSet.empty() )
+ if( theReplSet == 0 ) {
+ if( cmdLine._replSet.empty() )
s << p("Not using --replSet");
else {
- s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ ".<br>" + ReplSet::startupStatusMsg);
}
}
@@ -280,8 +281,8 @@ namespace mongo {
try {
theReplSet->getOplogDiagsAsHtml(_id, s);
}
- catch(std::exception& e) {
- s << "error querying oplog: " << e.what() << '\n';
+ catch(std::exception& e) {
+ s << "error querying oplog: " << e.what() << '\n';
}
}
@@ -290,20 +291,20 @@ namespace mongo {
}
/* /_replSet show replica set status in html format */
- string _replSet() {
+ string _replSet() {
stringstream s;
s << start("Replica Set Status " + prettyHostName());
- s << p( a("/", "back", "Home") + " | " +
+ s << p( a("/", "back", "Home") + " | " +
a("/local/system.replset/?html=1", "", "View Replset Config") + " | " +
a("/replSetGetStatus?text=1", "", "replSetGetStatus") + " | " +
a("http://www.mongodb.org/display/DOCS/Replica+Sets", "", "Docs")
);
- if( theReplSet == 0 ) {
- if( cmdLine._replSet.empty() )
+ if( theReplSet == 0 ) {
+ if( cmdLine._replSet.empty() )
s << p("Not using --replSet");
else {
- s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ ".<br>" + ReplSet::startupStatusMsg);
}
}
diff --git a/db/repl/rs.cpp b/db/repl/rs.cpp
index ed0eb3b18c5..045f906b064 100644
--- a/db/repl/rs.cpp
+++ b/db/repl/rs.cpp
@@ -23,7 +23,7 @@
#include "rs.h"
#include "connections.h"
-namespace mongo {
+namespace mongo {
using namespace bson;
@@ -31,18 +31,18 @@ namespace mongo {
ReplSet *theReplSet = 0;
extern string *discoveredSeed;
- void ReplSetImpl::sethbmsg(string s, int logLevel) {
+ void ReplSetImpl::sethbmsg(string s, int logLevel) {
static time_t lastLogged;
_hbmsgTime = time(0);
- if( s == _hbmsg ) {
+ if( s == _hbmsg ) {
// unchanged
if( _hbmsgTime - lastLogged < 60 )
return;
}
unsigned sz = s.size();
- if( sz >= 256 )
+ if( sz >= 256 )
memcpy(_hbmsg, s.c_str(), 255);
else {
_hbmsg[sz] = 0;
@@ -54,7 +54,7 @@ namespace mongo {
}
}
- void ReplSetImpl::assumePrimary() {
+ void ReplSetImpl::assumePrimary() {
assert( iAmPotentiallyHot() );
writelock lk("admin."); // so we are synchronized with _logOp()
box.setSelfPrimary(_self);
@@ -65,11 +65,11 @@ namespace mongo {
const bool closeOnRelinquish = true;
- void ReplSetImpl::relinquish() {
+ void ReplSetImpl::relinquish() {
if( box.getState().primary() ) {
log() << "replSet relinquishing primary state" << rsLog;
changeState(MemberState::RS_SECONDARY);
-
+
if( closeOnRelinquish ) {
/* close sockets that were talking to us so they don't blithly send many writes that will fail
with "not master" (of course client could check result code, but in case they are not)
@@ -85,8 +85,8 @@ namespace mongo {
}
/* look freshly for who is primary - includes relinquishing ourself. */
- void ReplSetImpl::forgetPrimary() {
- if( box.getState().primary() )
+ void ReplSetImpl::forgetPrimary() {
+ if( box.getState().primary() )
relinquish();
else {
box.setOtherPrimary(0);
@@ -94,9 +94,9 @@ namespace mongo {
}
// for the replSetStepDown command
- bool ReplSetImpl::_stepDown(int secs) {
+ bool ReplSetImpl::_stepDown(int secs) {
lock lk(this);
- if( box.getState().primary() ) {
+ if( box.getState().primary() ) {
elect.steppedDown = time(0) + secs;
log() << "replSet info stepping down as primary secs=" << secs << rsLog;
relinquish();
@@ -105,17 +105,17 @@ namespace mongo {
return false;
}
- bool ReplSetImpl::_freeze(int secs) {
+ bool ReplSetImpl::_freeze(int secs) {
lock lk(this);
- /* note if we are primary we remain primary but won't try to elect ourself again until
- this time period expires.
+ /* note if we are primary we remain primary but won't try to elect ourself again until
+ this time period expires.
*/
- if( secs == 0 ) {
+ if( secs == 0 ) {
elect.steppedDown = 0;
log() << "replSet info 'unfreezing'" << rsLog;
}
else {
- if( !box.getState().primary() ) {
+ if( !box.getState().primary() ) {
elect.steppedDown = time(0) + secs;
log() << "replSet info 'freezing' for " << secs << " seconds" << rsLog;
}
@@ -126,7 +126,7 @@ namespace mongo {
return true;
}
- void ReplSetImpl::msgUpdateHBInfo(HeartbeatInfo h) {
+ void ReplSetImpl::msgUpdateHBInfo(HeartbeatInfo h) {
for( Member *m = _members.head(); m; m=m->next() ) {
if( m->id() == h.id() ) {
m->_hbinfo = h;
@@ -135,7 +135,7 @@ namespace mongo {
}
}
- list<HostAndPort> ReplSetImpl::memberHostnames() const {
+ list<HostAndPort> ReplSetImpl::memberHostnames() const {
list<HostAndPort> L;
L.push_back(_self->h());
for( Member *m = _members.head(); m; m = m->next() )
@@ -153,8 +153,9 @@ namespace mongo {
}
else if( !m->config().arbiterOnly ) {
if( m->config().slaveDelay ) {
- /* hmmm - we don't list these as they are stale. */
- } else {
+ /* hmmm - we don't list these as they are stale. */
+ }
+ else {
passives.push_back(m->h().toString());
}
}
@@ -189,7 +190,7 @@ namespace mongo {
}
}
- if( !isp ) {
+ if( !isp ) {
const Member *m = sp.primary;
if( m )
b.append("primary", m->h().toString());
@@ -208,8 +209,8 @@ namespace mongo {
/** @param cfgString <setname>/<seedhost1>,<seedhost2> */
- void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet ) {
- const char *p = cfgString.c_str();
+ void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet ) {
+ const char *p = cfgString.c_str();
const char *slash = strchr(p, '/');
if( slash )
setname = string(p, slash-p);
@@ -239,7 +240,8 @@ namespace mongo {
//uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
if( m.isSelf() ) {
log(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
- } else
+ }
+ else
seeds.push_back(m);
if( *comma == 0 )
break;
@@ -248,10 +250,9 @@ namespace mongo {
}
}
- ReplSetImpl::ReplSetImpl(ReplSetCmdline& replSetCmdline) : elect(this),
- _self(0),
- mgr( new Manager(this) )
- {
+ ReplSetImpl::ReplSetImpl(ReplSetCmdline& replSetCmdline) : elect(this),
+ _self(0),
+ mgr( new Manager(this) ) {
_cfg = 0;
memset(_hbmsg, 0, sizeof(_hbmsg));
*_hbmsg = '.'; // temp...just to see
@@ -272,20 +273,21 @@ namespace mongo {
}
for( set<HostAndPort>::iterator i = replSetCmdline.seedSet.begin(); i != replSetCmdline.seedSet.end(); i++ ) {
if( i->isSelf() ) {
- if( sss == 1 )
+ if( sss == 1 )
log(1) << "replSet warning self is listed in the seed list and there are no other seeds listed did you intend that?" << rsLog;
- } else
+ }
+ else
log() << "replSet warning command line seed " << i->toString() << " is not present in the current repl set config" << rsLog;
}
}
void newReplUp();
- void ReplSetImpl::loadLastOpTimeWritten() {
+ void ReplSetImpl::loadLastOpTimeWritten() {
//assert( lastOpTimeWritten.isNull() );
readlock lk(rsoplog);
BSONObj o;
- if( Helpers::getLast(rsoplog, o) ) {
+ if( Helpers::getLast(rsoplog, o) ) {
lastH = o["h"].numberLong();
lastOpTimeWritten = o["ts"]._opTime();
uassert(13290, "bad replSet oplog entry?", !lastOpTimeWritten.isNull());
@@ -293,11 +295,11 @@ namespace mongo {
}
/* call after constructing to start - returns fairly quickly after launching its threads */
- void ReplSetImpl::_go() {
- try {
+ void ReplSetImpl::_go() {
+ try {
loadLastOpTimeWritten();
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "replSet error fatal couldn't query the local " << rsoplog << " collection. Terminating mongod after 30 seconds." << rsLog;
log() << e.what() << rsLog;
sleepsecs(30);
@@ -325,7 +327,7 @@ namespace mongo {
@return true if ok; throws if config really bad; false if config doesn't include self
*/
bool ReplSetImpl::initFromConfig(ReplSetConfig& c, bool reconf) {
- /* NOTE: haveNewConfig() writes the new config to disk before we get here. So
+ /* NOTE: haveNewConfig() writes the new config to disk before we get here. So
we cannot error out at this point, except fatally. Check errors earlier.
*/
lock lk(this);
@@ -340,24 +342,24 @@ namespace mongo {
{
unsigned nfound = 0;
int me = 0;
- for( vector<ReplSetConfig::MemberCfg>::iterator i = c.members.begin(); i != c.members.end(); i++ ) {
+ for( vector<ReplSetConfig::MemberCfg>::iterator i = c.members.begin(); i != c.members.end(); i++ ) {
const ReplSetConfig::MemberCfg& m = *i;
if( m.h.isSelf() ) {
nfound++;
me++;
if( !reconf || (_self && _self->id() == (unsigned) m._id) )
;
- else {
+ else {
log() << "replSet " << _self->id() << ' ' << m._id << rsLog;
assert(false);
}
}
- else if( reconf ) {
+ else if( reconf ) {
const Member *old = findById(m._id);
- if( old ) {
+ if( old ) {
nfound++;
assert( (int) old->id() == m._id );
- if( old->config() == m ) {
+ if( old->config() == m ) {
additive = false;
}
}
@@ -365,7 +367,7 @@ namespace mongo {
newOnes.push_back(&m);
}
}
-
+
// change timeout settings, if necessary
ScopedConn conn(m.h.toString());
conn.setTimeout(c.ho.heartbeatTimeoutMillis/1000.0);
@@ -378,7 +380,7 @@ namespace mongo {
}
uassert( 13302, "replSet error self appears twice in the repl set configuration", me<=1 );
- if( reconf && config().members.size() != nfound )
+ if( reconf && config().members.size() != nfound )
additive = false;
}
@@ -388,14 +390,14 @@ namespace mongo {
_name = _cfg->_id;
assert( !_name.empty() );
- if( additive ) {
+ if( additive ) {
log() << "replSet info : additive change to configuration" << rsLog;
for( list<const ReplSetConfig::MemberCfg*>::const_iterator i = newOnes.begin(); i != newOnes.end(); i++ ) {
const ReplSetConfig::MemberCfg* m = *i;
Member *mi = new Member(m->h, m->_id, m, false);
- /** we will indicate that new members are up() initially so that we don't relinquish our
- primary state because we can't (transiently) see a majority. they should be up as we
+ /** we will indicate that new members are up() initially so that we don't relinquish our
+ primary state because we can't (transiently) see a majority. they should be up as we
check that new members are up before getting here on reconfig anyway.
*/
mi->get_hbinfo().health = 0.1;
@@ -414,14 +416,14 @@ namespace mongo {
int oldPrimaryId = -1;
{
const Member *p = box.getPrimary();
- if( p )
+ if( p )
oldPrimaryId = p->id();
}
forgetPrimary();
-
+
bool iWasArbiterOnly = _self ? iAmArbiterOnly() : false;
setSelfTo(0);
- for( vector<ReplSetConfig::MemberCfg>::iterator i = _cfg->members.begin(); i != _cfg->members.end(); i++ ) {
+ for( vector<ReplSetConfig::MemberCfg>::iterator i = _cfg->members.begin(); i != _cfg->members.end(); i++ ) {
const ReplSetConfig::MemberCfg& m = *i;
Member *mi;
if( m.h.isSelf() ) {
@@ -433,10 +435,11 @@ namespace mongo {
if (iWasArbiterOnly ^ iAmArbiterOnly()) {
_changeArbiterState();
}
-
+
if( (int)mi->id() == oldPrimaryId )
box.setSelfPrimary(mi);
- } else {
+ }
+ else {
mi = new Member(m.h, m._id, &m, false);
_members.push(mi);
startHealthTaskFor(mi);
@@ -472,32 +475,32 @@ namespace mongo {
else {
changeState(MemberState::RS_RECOVERING);
- // oplog will be allocated when sync begins
+ // oplog will be allocated when sync begins
/* TODO : could this cause two sync threads to exist (race condition)? */
boost::thread t(startSyncThread);
}
}
// Our own config must be the first one.
- bool ReplSetImpl::_loadConfigFinish(vector<ReplSetConfig>& cfgs) {
+ bool ReplSetImpl::_loadConfigFinish(vector<ReplSetConfig>& cfgs) {
int v = -1;
ReplSetConfig *highest = 0;
int myVersion = -2000;
int n = 0;
- for( vector<ReplSetConfig>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
+ for( vector<ReplSetConfig>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
ReplSetConfig& cfg = *i;
if( ++n == 1 ) myVersion = cfg.version;
- if( cfg.ok() && cfg.version > v ) {
+ if( cfg.ok() && cfg.version > v ) {
highest = &cfg;
v = cfg.version;
}
}
assert( highest );
- if( !initFromConfig(*highest) )
+ if( !initFromConfig(*highest) )
return false;
- if( highest->version > myVersion && highest->version >= 0 ) {
+ if( highest->version > myVersion && highest->version >= 0 ) {
log() << "replSet got config version " << highest->version << " from a remote, saving locally" << rsLog;
writelock lk("admin.");
highest->saveConfigLocally(BSONObj());
@@ -511,7 +514,7 @@ namespace mongo {
startupStatusMsg = "loading " + rsConfigNs + " config (LOADINGCONFIG)";
try {
vector<ReplSetConfig> configs;
- try {
+ try {
configs.push_back( ReplSetConfig(HostAndPort::me()) );
}
catch(DBException& e) {
@@ -519,26 +522,26 @@ namespace mongo {
throw;
}
for( vector<HostAndPort>::const_iterator i = _seeds->begin(); i != _seeds->end(); i++ ) {
- try {
+ try {
configs.push_back( ReplSetConfig(*i) );
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
log() << "replSet exception trying to load config from " << *i << " : " << e.toString() << rsLog;
}
}
- if( discoveredSeed ) {
+ if( discoveredSeed ) {
try {
configs.push_back( ReplSetConfig(HostAndPort(*discoveredSeed)) );
}
- catch( DBException& ) {
+ catch( DBException& ) {
log(1) << "replSet exception trying to load config from discovered seed " << *discoveredSeed << rsLog;
}
}
int nok = 0;
int nempty = 0;
- for( vector<ReplSetConfig>::iterator i = configs.begin(); i != configs.end(); i++ ) {
+ for( vector<ReplSetConfig>::iterator i = configs.begin(); i != configs.end(); i++ ) {
if( i->ok() )
nok++;
if( i->empty() )
@@ -551,7 +554,7 @@ namespace mongo {
startupStatusMsg = "can't get " + rsConfigNs + " config from self or any seed (EMPTYCONFIG)";
log() << "replSet can't get " << rsConfigNs << " config from self or any seed (EMPTYCONFIG)" << rsLog;
static unsigned once;
- if( ++once == 1 )
+ if( ++once == 1 )
log() << "replSet info you may need to run replSetInitiate -- rs.initiate() in the shell -- if that is not already done" << rsLog;
if( _seeds->size() == 0 )
log(1) << "replSet info no seed hosts were specified on the --replSet command line" << rsLog;
@@ -566,13 +569,13 @@ namespace mongo {
continue;
}
- if( !_loadConfigFinish(configs) ) {
+ if( !_loadConfigFinish(configs) ) {
log() << "replSet info Couldn't load config yet. Sleeping 20sec and will try again." << rsLog;
sleepsecs(20);
continue;
}
}
- catch(DBException& e) {
+ catch(DBException& e) {
startupStatus = BADCONFIG;
startupStatusMsg = "replSet error loading set config (BADCONFIG)";
log() << "replSet error loading configurations " << e.toString() << rsLog;
@@ -587,22 +590,21 @@ namespace mongo {
startupStatus = STARTED;
}
- void ReplSetImpl::_fatal()
- {
+ void ReplSetImpl::_fatal() {
//lock l(this);
box.set(MemberState::RS_FATAL, 0);
//sethbmsg("fatal error");
- log() << "replSet error fatal, stopping replication" << rsLog;
+ log() << "replSet error fatal, stopping replication" << rsLog;
}
- void ReplSet::haveNewConfig(ReplSetConfig& newConfig, bool addComment) {
+ void ReplSet::haveNewConfig(ReplSetConfig& newConfig, bool addComment) {
lock l(this); // convention is to lock replset before taking the db rwlock
writelock lk("");
bo comment;
if( addComment )
comment = BSON( "msg" << "Reconfig set" << "version" << newConfig.version );
newConfig.saveConfigLocally(comment);
- try {
+ try {
initFromConfig(newConfig, true);
log() << "replSet replSetReconfig new config saved locally" << rsLog;
}
@@ -615,7 +617,7 @@ namespace mongo {
log() << "replSet error unexpected exception in haveNewConfig() : " << e.toString() << rsLog;
_fatal();
}
- catch(...) {
+ catch(...) {
log() << "replSet error unexpected exception in haveNewConfig()" << rsLog;
_fatal();
}
@@ -626,20 +628,20 @@ namespace mongo {
ReplSetConfig c(o);
if( c.version > rs->config().version )
theReplSet->haveNewConfig(c, false);
- else {
- log() << "replSet info msgReceivedNewConfig but version isn't higher " <<
- c.version << ' ' << rs->config().version << rsLog;
+ else {
+ log() << "replSet info msgReceivedNewConfig but version isn't higher " <<
+ c.version << ' ' << rs->config().version << rsLog;
}
}
- /* forked as a thread during startup
- it can run quite a while looking for config. but once found,
+ /* forked as a thread during startup
+ it can run quite a while looking for config. but once found,
a separate thread takes over as ReplSetImpl::Manager, and this thread
terminates.
*/
void startReplSets(ReplSetCmdline *replSetCmdline) {
Client::initThread("startReplSets");
- try {
+ try {
assert( theReplSet == 0 );
if( replSetCmdline == 0 ) {
assert(!replSet);
@@ -650,9 +652,9 @@ namespace mongo {
}
(theReplSet = new ReplSet(*replSetCmdline))->go();
}
- catch(std::exception& e) {
+ catch(std::exception& e) {
log() << "replSet caught exception in startReplSets thread: " << e.what() << rsLog;
- if( theReplSet )
+ if( theReplSet )
theReplSet->fatal();
}
cc().shutdown();
@@ -660,10 +662,9 @@ namespace mongo {
}
-namespace boost {
+namespace boost {
- void assertion_failed(char const * expr, char const * function, char const * file, long line)
- {
+ void assertion_failed(char const * expr, char const * function, char const * file, long line) {
mongo::log() << "boost assertion failure " << expr << ' ' << function << ' ' << file << ' ' << line << endl;
}
diff --git a/db/repl/rs.h b/db/repl/rs.h
index b41807e3920..1419ad6e9f5 100644
--- a/db/repl/rs.h
+++ b/db/repl/rs.h
@@ -68,8 +68,8 @@ namespace mongo {
bool busyWithElectSelf;
int _primary;
- /** @param two - if true two primaries were seen. this can happen transiently, in addition to our
- polling being only occasional. in this case null is returned, but the caller should
+ /** @param two - if true two primaries were seen. this can happen transiently, in addition to our
+ polling being only occasional. in this case null is returned, but the caller should
not assume primary itself in that situation.
*/
const Member* findOtherPrimary(bool& two);
@@ -87,7 +87,7 @@ namespace mongo {
class Consensus {
ReplSetImpl &rs;
- struct LastYea {
+ struct LastYea {
LastYea() : when(0), who(0xffffffff) { }
time_t when;
unsigned who;
@@ -99,12 +99,12 @@ namespace mongo {
bool weAreFreshest(bool& allUp, int& nTies);
bool sleptLast; // slept last elect() pass
public:
- Consensus(ReplSetImpl *t) : rs(*t) {
+ Consensus(ReplSetImpl *t) : rs(*t) {
sleptLast = false;
steppedDown = 0;
}
- /* if we've stepped down, this is when we are allowed to try to elect ourself again.
+ /* if we've stepped down, this is when we are allowed to try to elect ourself again.
todo: handle possible weirdnesses at clock skews etc.
*/
time_t steppedDown;
@@ -118,7 +118,7 @@ namespace mongo {
};
/** most operations on a ReplSet object should be done while locked. that logic implemented here. */
- class RSBase : boost::noncopyable {
+ class RSBase : boost::noncopyable {
public:
const unsigned magic;
void assertValid() { assert( magic == 0x12345677 ); }
@@ -128,30 +128,30 @@ namespace mongo {
ThreadLocalValue<bool> _lockedByMe;
protected:
RSBase() : magic(0x12345677), m("RSBase"), _locked(0) { }
- ~RSBase() {
+ ~RSBase() {
/* this can happen if we throw in the constructor; otherwise never happens. thus we log it as it is quite unusual. */
log() << "replSet ~RSBase called" << rsLog;
}
- class lock {
+ class lock {
RSBase& rsbase;
auto_ptr<scoped_lock> sl;
public:
- lock(RSBase* b) : rsbase(*b) {
+ lock(RSBase* b) : rsbase(*b) {
if( rsbase._lockedByMe.get() )
return; // recursive is ok...
sl.reset( new scoped_lock(rsbase.m) );
DEV assert(rsbase._locked == 0);
- rsbase._locked++;
+ rsbase._locked++;
rsbase._lockedByMe.set(true);
}
- ~lock() {
+ ~lock() {
if( sl.get() ) {
assert( rsbase._lockedByMe.get() );
DEV assert(rsbase._locked == 1);
rsbase._lockedByMe.set(false);
- rsbase._locked--;
+ rsbase._locked--;
}
}
};
@@ -160,11 +160,11 @@ namespace mongo {
/* for asserts */
bool locked() const { return _locked != 0; }
- /* if true, is locked, and was locked by this thread. note if false, it could be in the lock or not for another
+ /* if true, is locked, and was locked by this thread. note if false, it could be in the lock or not for another
just for asserts & such so we can make the contracts clear on who locks what when.
we don't use these locks that frequently, so the little bit of overhead is fine.
*/
- bool lockedByMe() { return _lockedByMe.get(); }
+ bool lockedByMe() { return _lockedByMe.get(); }
};
class ReplSetHealthPollTask;
@@ -177,19 +177,19 @@ namespace mongo {
MemberState state;
const Member *primary;
};
- const SP get() {
+ const SP get() {
scoped_lock lk(m);
return sp;
}
MemberState getState() const { return sp.state; }
const Member* getPrimary() const { return sp.primary; }
- void change(MemberState s, const Member *self) {
+ void change(MemberState s, const Member *self) {
scoped_lock lk(m);
- if( sp.state != s ) {
+ if( sp.state != s ) {
log() << "replSet " << s.toString() << rsLog;
}
sp.state = s;
- if( s.primary() ) {
+ if( s.primary() ) {
sp.primary = self;
}
else {
@@ -197,17 +197,17 @@ namespace mongo {
sp.primary = 0;
}
}
- void set(MemberState s, const Member *p) {
+ void set(MemberState s, const Member *p) {
scoped_lock lk(m);
sp.state = s; sp.primary = p;
}
void setSelfPrimary(const Member *self) { change(MemberState::RS_PRIMARY, self); }
- void setOtherPrimary(const Member *mem) {
+ void setOtherPrimary(const Member *mem) {
scoped_lock lk(m);
assert( !sp.state.primary() );
sp.primary = mem;
}
- void noteRemoteIsPrimary(const Member *remote) {
+ void noteRemoteIsPrimary(const Member *remote) {
scoped_lock lk(m);
if( !sp.state.secondary() && !sp.state.fatal() )
sp.state = MemberState::RS_RECOVERING;
@@ -218,7 +218,7 @@ namespace mongo {
mongo::mutex m;
SP sp;
};
-
+
void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet );
/** Parameter given to the --replSet command line option (parsed).
@@ -233,15 +233,15 @@ namespace mongo {
};
/* information about the entire repl set, such as the various servers in the set, and their state */
- /* note: We currently do not free mem when the set goes away - it is assumed the replset is a
+ /* note: We currently do not free mem when the set goes away - it is assumed the replset is a
singleton and long lived.
*/
class ReplSetImpl : protected RSBase {
public:
/** info on our state if the replset isn't yet "up". for example, if we are pre-initiation. */
- enum StartupStatus {
- PRESTART=0, LOADINGCONFIG=1, BADCONFIG=2, EMPTYCONFIG=3,
- EMPTYUNREACHABLE=4, STARTED=5, SOON=6
+ enum StartupStatus {
+ PRESTART=0, LOADINGCONFIG=1, BADCONFIG=2, EMPTYCONFIG=3,
+ EMPTYUNREACHABLE=4, STARTED=5, SOON=6
};
static StartupStatus startupStatus;
static string startupStatusMsg;
@@ -273,11 +273,11 @@ namespace mongo {
void _changeArbiterState();
protected:
// "heartbeat message"
- // sent in requestHeartbeat respond in field "hbm"
+ // sent in requestHeartbeat respond in field "hbm"
char _hbmsg[256]; // we change this unlocked, thus not an stl::string
time_t _hbmsgTime; // when it was logged
public:
- void sethbmsg(string s, int logLevel = 0);
+ void sethbmsg(string s, int logLevel = 0);
protected:
bool initFromConfig(ReplSetConfig& c, bool reconf=false); // true if ok; throws if config really bad; false if config doesn't include self
void _fillIsMaster(BSONObjBuilder&);
@@ -287,7 +287,7 @@ namespace mongo {
MemberState state() const { return box.getState(); }
void _fatal();
void _getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const;
- void _summarizeAsHtml(stringstream&) const;
+ void _summarizeAsHtml(stringstream&) const;
void _summarizeStatus(BSONObjBuilder&) const; // for replSetGetStatus command
/* throws exception if a problem initializing. */
@@ -301,7 +301,7 @@ namespace mongo {
const vector<HostAndPort> *_seeds;
ReplSetConfig *_cfg;
- /** load our configuration from admin.replset. try seed machines too.
+ /** load our configuration from admin.replset. try seed machines too.
@return true if ok; throws if config really bad; false if config doesn't include self
*/
bool _loadConfigFinish(vector<ReplSetConfig>& v);
@@ -312,10 +312,10 @@ namespace mongo {
bool iAmArbiterOnly() const { return myConfig().arbiterOnly; }
bool iAmPotentiallyHot() const { return myConfig().potentiallyHot(); }
protected:
- Member *_self;
+ Member *_self;
bool _buildIndexes; // = _self->config().buildIndexes
void setSelfTo(Member *); // use this as it sets buildIndexes var
- private:
+ private:
List1<Member> _members; /* all members of the set EXCEPT self. */
public:
@@ -354,7 +354,7 @@ namespace mongo {
void syncThread();
};
- class ReplSet : public ReplSetImpl {
+ class ReplSet : public ReplSetImpl {
public:
ReplSet(ReplSetCmdline& replSetCmdline) : ReplSetImpl(replSetCmdline) { }
@@ -364,7 +364,7 @@ namespace mongo {
// for the replSetFreeze command
bool freeze(int secs) { return _freeze(secs); }
- string selfFullName() {
+ string selfFullName() {
lock lk(this);
return _self->fullName();
}
@@ -385,7 +385,7 @@ namespace mongo {
void summarizeStatus(BSONObjBuilder& b) const { _summarizeStatus(b); }
void fillIsMaster(BSONObjBuilder& b) { _fillIsMaster(b); }
- /* we have a new config (reconfig) - apply it.
+ /* we have a new config (reconfig) - apply it.
@param comment write a no-op comment to the oplog about it. only makes sense if one is primary and initiating the reconf.
*/
void haveNewConfig(ReplSetConfig& c, bool comment);
@@ -396,16 +396,16 @@ namespace mongo {
bool lockedByMe() { return RSBase::lockedByMe(); }
// heartbeat msg to send to others; descriptive diagnostic info
- string hbmsg() const {
+ string hbmsg() const {
if( time(0)-_hbmsgTime > 120 ) return "";
- return _hbmsg;
+ return _hbmsg;
}
};
- /** base class for repl set commands. checks basic things such as in rs mode before the command
+ /** base class for repl set commands. checks basic things such as in rs mode before the command
does its real work
*/
- class ReplSetCommand : public Command {
+ class ReplSetCommand : public Command {
protected:
ReplSetCommand(const char * s, bool show=false) : Command(s, show) { }
virtual bool slaveOk() const { return true; }
@@ -414,14 +414,14 @@ namespace mongo {
virtual LockType locktype() const { return NONE; }
virtual void help( stringstream &help ) const { help << "internal"; }
bool check(string& errmsg, BSONObjBuilder& result) {
- if( !replSet ) {
+ if( !replSet ) {
errmsg = "not running with --replSet";
return false;
}
if( theReplSet == 0 ) {
result.append("startupStatus", ReplSet::startupStatus);
errmsg = ReplSet::startupStatusMsg.empty() ? "replset unknown error 2" : ReplSet::startupStatusMsg;
- if( ReplSet::startupStatus == 3 )
+ if( ReplSet::startupStatus == 3 )
result.append("info", "run rs.initiate(...) if not yet done for the set");
return false;
}
@@ -431,9 +431,8 @@ namespace mongo {
/** inlines ----------------- */
- inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
- _config(*c), _h(h), _hbinfo(ord)
- {
+ inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig::MemberCfg *c, bool self) :
+ _config(*c), _h(h), _hbinfo(ord) {
if( self )
_hbinfo.health = 1.0;
}
diff --git a/db/repl/rs_config.cpp b/db/repl/rs_config.cpp
index 8537873a851..03ab914aee9 100644
--- a/db/repl/rs_config.cpp
+++ b/db/repl/rs_config.cpp
@@ -27,11 +27,11 @@
using namespace bson;
-namespace mongo {
+namespace mongo {
void logOpInitiate(const bo&);
- void assertOnlyHas(BSONObj o, const set<string>& fields) {
+ void assertOnlyHas(BSONObj o, const set<string>& fields) {
BSONObj::iterator i(o);
while( i.more() ) {
BSONElement e = i.next();
@@ -41,7 +41,7 @@ namespace mongo {
}
}
- list<HostAndPort> ReplSetConfig::otherMemberHostnames() const {
+ list<HostAndPort> ReplSetConfig::otherMemberHostnames() const {
list<HostAndPort> L;
for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); i++ ) {
if( !i->h.isSelf() )
@@ -49,12 +49,12 @@ namespace mongo {
}
return L;
}
-
+
/* comment MUST only be set when initiating the set by the initiator */
- void ReplSetConfig::saveConfigLocally(bo comment) {
+ void ReplSetConfig::saveConfigLocally(bo comment) {
checkRsConfig();
log() << "replSet info saving a newer config version to local.system.replset" << rsLog;
- {
+ {
writelock lk("");
Client::Context cx( rsConfigNs );
cx.db()->flushFiles(true);
@@ -70,21 +70,21 @@ namespace mongo {
}
DEV log() << "replSet saveConfigLocally done" << rsLog;
}
-
- /*static*/
- /*void ReplSetConfig::receivedNewConfig(BSONObj cfg) {
+
+ /*static*/
+ /*void ReplSetConfig::receivedNewConfig(BSONObj cfg) {
if( theReplSet )
return; // this is for initial setup only, so far. todo
ReplSetConfig c(cfg);
writelock lk("admin.");
- if( theReplSet )
+ if( theReplSet )
return;
c.saveConfigLocally(bo());
}*/
- bo ReplSetConfig::MemberCfg::asBson() const {
+ bo ReplSetConfig::MemberCfg::asBson() const {
bob b;
b << "_id" << _id;
b.append("host", h.toString());
@@ -94,7 +94,7 @@ namespace mongo {
if( slaveDelay ) b << "slaveDelay" << slaveDelay;
if( hidden ) b << "hidden" << hidden;
if( !buildIndexes ) b << "buildIndexes" << buildIndexes;
- if( !tags.empty() ) {
+ if( !tags.empty() ) {
BSONArrayBuilder a;
for( set<string>::const_iterator i = tags.begin(); i != tags.end(); i++ )
a.append(*i);
@@ -103,15 +103,15 @@ namespace mongo {
return b.obj();
}
- bo ReplSetConfig::asBson() const {
+ bo ReplSetConfig::asBson() const {
bob b;
b.append("_id", _id).append("version", version);
if( !ho.isDefault() || !getLastErrorDefaults.isEmpty() ) {
bob settings;
if( !ho.isDefault() )
- settings << "heartbeatConnRetries " << ho.heartbeatConnRetries <<
- "heartbeatSleep" << ho.heartbeatSleepMillis / 1000.0 <<
- "heartbeatTimeout" << ho.heartbeatTimeoutMillis / 1000.0;
+ settings << "heartbeatConnRetries " << ho.heartbeatConnRetries <<
+ "heartbeatSleep" << ho.heartbeatSleepMillis / 1000.0 <<
+ "heartbeatTimeout" << ho.heartbeatTimeoutMillis / 1000.0;
if( !getLastErrorDefaults.isEmpty() )
settings << "getLastErrorDefaults" << getLastErrorDefaults;
b << "settings" << settings.obj();
@@ -129,7 +129,7 @@ namespace mongo {
uassert(13126, "bad Member config", expr);
}
- void ReplSetConfig::MemberCfg::check() const{
+ void ReplSetConfig::MemberCfg::check() const {
mchk(_id >= 0 && _id <= 255);
mchk(priority >= 0 && priority <= 1000);
mchk(votes >= 0 && votes <= 100);
@@ -143,7 +143,7 @@ namespace mongo {
static const string legal[] = {"state", "name", "_id","optime"};
static const set<string> legals(legal, legal + 4);
assertOnlyHas(initialSync, legals);
-
+
if (initialSync.hasElement("state")) {
uassert(13525, "initialSync source state must be 1 or 2",
initialSync["state"].isNumber() &&
@@ -167,50 +167,50 @@ namespace mongo {
}
/** @param o old config
- @param n new config
+ @param n new config
*/
- /*static*/
- bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
+ /*static*/
+ bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
assert( theReplSet );
- if( o._id != n._id ) {
- errmsg = "set name may not change";
+ if( o._id != n._id ) {
+ errmsg = "set name may not change";
return false;
}
/* TODO : wonder if we need to allow o.version < n.version only, which is more lenient.
- if someone had some intermediate config this node doesnt have, that could be
+ if someone had some intermediate config this node doesnt have, that could be
necessary. but then how did we become primary? so perhaps we are fine as-is.
*/
- if( o.version + 1 != n.version ) {
+ if( o.version + 1 != n.version ) {
errmsg = "version number wrong";
return false;
}
map<HostAndPort,const ReplSetConfig::MemberCfg*> old;
- for( vector<ReplSetConfig::MemberCfg>::const_iterator i = o.members.begin(); i != o.members.end(); i++ ) {
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = o.members.begin(); i != o.members.end(); i++ ) {
old[i->h] = &(*i);
}
int me = 0;
- for( vector<ReplSetConfig::MemberCfg>::const_iterator i = n.members.begin(); i != n.members.end(); i++ ) {
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = n.members.begin(); i != n.members.end(); i++ ) {
const ReplSetConfig::MemberCfg& m = *i;
- if( old.count(m.h) ) {
+ if( old.count(m.h) ) {
const ReplSetConfig::MemberCfg& oldCfg = *old[m.h];
- if( oldCfg._id != m._id ) {
+ if( oldCfg._id != m._id ) {
log() << "replSet reconfig error with member: " << m.h.toString() << rsLog;
uasserted(13432, "_id may not change for members");
}
- if( oldCfg.buildIndexes != m.buildIndexes ) {
+ if( oldCfg.buildIndexes != m.buildIndexes ) {
log() << "replSet reconfig error with member: " << m.h.toString() << rsLog;
uasserted(13476, "buildIndexes may not change for members");
}
/* are transitions to and from arbiterOnly guaranteed safe? if not, we should disallow here.
there is a test at replsets/replsetarb3.js */
- if( oldCfg.arbiterOnly != m.arbiterOnly ) {
+ if( oldCfg.arbiterOnly != m.arbiterOnly ) {
log() << "replSet reconfig error with member: " << m.h.toString() << " arbiterOnly cannot change. remove and readd the member instead " << rsLog;
uasserted(13510, "arbiterOnly may not change for members");
}
}
- if( m.h.isSelf() )
+ if( m.h.isSelf() )
me++;
}
@@ -224,21 +224,21 @@ namespace mongo {
return true;
}
- void ReplSetConfig::clear() {
+ void ReplSetConfig::clear() {
version = -5;
_ok = false;
}
- void ReplSetConfig::checkRsConfig() const {
+ void ReplSetConfig::checkRsConfig() const {
uassert(13132,
- "nonmatching repl set name in _id field; check --replSet command line",
- _id == cmdLine.ourSetName());
+ "nonmatching repl set name in _id field; check --replSet command line",
+ _id == cmdLine.ourSetName());
uassert(13308, "replSet bad config version #", version > 0);
uassert(13133, "replSet bad config no members", members.size() >= 1);
uassert(13309, "replSet bad config maximum number of members is 12", members.size() <= 12);
{
unsigned voters = 0;
- for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); ++i ) {
+ for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); ++i ) {
if( i->votes )
voters++;
}
@@ -268,7 +268,8 @@ namespace mongo {
if( settings["heartbeatTimeout"].ok() )
ho.heartbeatTimeoutMillis = (unsigned) (settings["heartbeatTimeout"].Number() * 1000);
ho.check();
- try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); } catch(...) { }
+ try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); }
+ catch(...) { }
}
set<string> hosts;
@@ -286,33 +287,35 @@ namespace mongo {
BSONObj mobj = members[i].Obj();
MemberCfg m;
try {
- static const string legal[] =
- {"_id","votes","priority","host", "hidden","slaveDelay",
- "arbiterOnly","buildIndexes","tags","initialSync"};
+ static const string legal[] = {
+ "_id","votes","priority","host", "hidden","slaveDelay",
+ "arbiterOnly","buildIndexes","tags","initialSync"
+ };
static const set<string> legals(legal, legal + 10);
assertOnlyHas(mobj, legals);
- try {
+ try {
m._id = (int) mobj["_id"].Number();
- } catch(...) {
+ }
+ catch(...) {
/* TODO: use of string exceptions may be problematic for reconfig case! */
- throw "_id must be numeric";
+ throw "_id must be numeric";
}
string s;
try {
s = mobj["host"].String();
m.h = HostAndPort(s);
}
- catch(...) {
+ catch(...) {
throw string("bad or missing host field? ") + mobj.toString();
}
- if( m.h.isLocalHost() )
+ if( m.h.isLocalHost() )
localhosts++;
m.arbiterOnly = mobj.getBoolField("arbiterOnly");
m.slaveDelay = mobj["slaveDelay"].numberInt();
if( mobj.hasElement("hidden") )
m.hidden = mobj.getBoolField("hidden");
- if( mobj.hasElement("buildIndexes") )
+ if( mobj.hasElement("buildIndexes") )
m.buildIndexes = mobj.getBoolField("buildIndexes");
if( mobj.hasElement("priority") )
m.priority = mobj["priority"].Number();
@@ -328,13 +331,13 @@ namespace mongo {
}
m.check();
}
- catch( const char * p ) {
+ catch( const char * p ) {
log() << "replSet cfg parsing exception for members[" << i << "] " << p << rsLog;
stringstream ss;
ss << "replSet members[" << i << "] " << p;
uassert(13107, ss.str(), false);
}
- catch(DBException& e) {
+ catch(DBException& e) {
log() << "replSet cfg parsing exception for members[" << i << "] " << e.what() << rsLog;
stringstream ss;
ss << "bad config for member[" << i << "] " << e.what();
@@ -356,7 +359,7 @@ namespace mongo {
uassert(13122, "bad repl set config?", expr);
}
- ReplSetConfig::ReplSetConfig(BSONObj cfg) {
+ ReplSetConfig::ReplSetConfig(BSONObj cfg) {
clear();
from(cfg);
configAssert( version < 0 /*unspecified*/ || (version >= 1 && version <= 5000) );
@@ -384,17 +387,17 @@ namespace mongo {
BSONObj info;
log() << "trying to contact " << h.toString() << rsLog;
bool ok = requestHeartbeat(setname, "", h.toString(), info, -2, theirVersion);
- if( info["rs"].trueValue() ) {
+ if( info["rs"].trueValue() ) {
// yes, it is a replicate set, although perhaps not yet initialized
}
else {
if( !ok ) {
log() << "replSet TEMP !ok heartbeating " << h.toString() << " on cfg load" << rsLog;
- if( !info.isEmpty() )
+ if( !info.isEmpty() )
log() << "replSet info " << h.toString() << " : " << info.toString() << rsLog;
return;
}
- {
+ {
stringstream ss;
ss << "replSet error: member " << h.toString() << " is not in --replSet mode";
msgassertedNoTrace(13260, ss.str().c_str()); // not caught as not a user exception - we want it not caught
@@ -424,14 +427,14 @@ namespace mongo {
if( count > 1 )
uasserted(13109, str::stream() << "multiple rows in " << rsConfigNs << " not supported host: " << h.toString());
-
+
if( cfg.isEmpty() ) {
version = EMPTYCONFIG;
return;
}
version = -1;
}
- catch( DBException& e) {
+ catch( DBException& e) {
version = v;
log(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
return;
diff --git a/db/repl/rs_config.h b/db/repl/rs_config.h
index aef32389832..7d43fe6de6f 100644
--- a/db/repl/rs_config.h
+++ b/db/repl/rs_config.h
@@ -23,7 +23,7 @@
#include "../../util/hostandport.h"
#include "health.h"
-namespace mongo {
+namespace mongo {
/* singleton config object is stored here */
const string rsConfigNs = "local.system.replset";
@@ -31,7 +31,7 @@ namespace mongo {
class ReplSetConfig {
enum { EMPTYCONFIG = -2 };
public:
- /* if something is misconfigured, throws an exception.
+ /* if something is misconfigured, throws an exception.
if couldn't be queried or is just blank, ok() will be false.
*/
ReplSetConfig(const HostAndPort& h);
@@ -56,10 +56,10 @@ namespace mongo {
void check() const; /* check validity, assert if not. */
BSONObj asBson() const;
bool potentiallyHot() const { return !arbiterOnly && priority > 0; }
- bool operator==(const MemberCfg& r) const {
- return _id==r._id && votes == r.votes && h == r.h && priority == r.priority &&
- arbiterOnly == r.arbiterOnly && slaveDelay == r.slaveDelay && hidden == r.hidden &&
- buildIndexes == buildIndexes;
+ bool operator==(const MemberCfg& r) const {
+ return _id==r._id && votes == r.votes && h == r.h && priority == r.priority &&
+ arbiterOnly == r.arbiterOnly && slaveDelay == r.slaveDelay && hidden == r.hidden &&
+ buildIndexes == buildIndexes;
}
bool operator!=(const MemberCfg& r) const { return !(*this == r); }
};
diff --git a/db/repl/rs_exception.h b/db/repl/rs_exception.h
index e71cad277c1..fc372fc241c 100755..100644
--- a/db/repl/rs_exception.h
+++ b/db/repl/rs_exception.h
@@ -1,15 +1,15 @@
-// @file rs_exception.h
-
-#pragma once
-
-namespace mongo {
-
- class VoteException : public std::exception {
+// @file rs_exception.h
+
+#pragma once
+
+namespace mongo {
+
+ class VoteException : public std::exception {
public:
- const char * what() const throw () { return "VoteException"; }
+ const char * what() const throw () { return "VoteException"; }
};
- class RetryAfterSleepException : public std::exception {
+ class RetryAfterSleepException : public std::exception {
public:
const char * what() const throw () { return "RetryAfterSleepException"; }
};
diff --git a/db/repl/rs_initialsync.cpp b/db/repl/rs_initialsync.cpp
index 2d8b3303b1e..5a540594305 100644
--- a/db/repl/rs_initialsync.cpp
+++ b/db/repl/rs_initialsync.cpp
@@ -34,17 +34,17 @@ namespace mongo {
// add try/catch with sleep
- void isyncassert(const char *msg, bool expr) {
- if( !expr ) {
+ void isyncassert(const char *msg, bool expr) {
+ if( !expr ) {
string m = str::stream() << "initial sync " << msg;
theReplSet->sethbmsg(m, 0);
uasserted(13404, m);
}
}
- void ReplSetImpl::syncDoInitialSync() {
+ void ReplSetImpl::syncDoInitialSync() {
createOplog();
-
+
while( 1 ) {
try {
_syncDoInitialSync();
@@ -57,14 +57,14 @@ namespace mongo {
}
}
- bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
- bool slaveOk, bool useReplAuth, bool snapshot);
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot);
/* todo : progress metering to sethbmsg. */
static bool clone(const char *master, string db) {
string err;
return cloneFrom(master, err, db, false,
- /* slave_ok */ true, true, false);
+ /* slave_ok */ true, true, false);
}
void _logOpObjRS(const BSONObj& op);
@@ -74,11 +74,11 @@ namespace mongo {
static void emptyOplog() {
writelock lk(rsoplog);
Client::Context ctx(rsoplog);
- NamespaceDetails *d = nsdetails(rsoplog);
+ NamespaceDetails *d = nsdetails(rsoplog);
- // temp
- if( d && d->stats.nrecords == 0 )
- return; // already empty, ok.
+ // temp
+ if( d && d->stats.nrecords == 0 )
+ return; // already empty, ok.
log(1) << "replSet empty oplog" << rsLog;
d->emptyCappedCollection(rsoplog);
@@ -87,10 +87,10 @@ namespace mongo {
string errmsg;
bob res;
dropCollection(rsoplog, errmsg, res);
- log() << "replSet recreated oplog so it is empty. todo optimize this..." << rsLog;
- createOplog();*/
+ log() << "replSet recreated oplog so it is empty. todo optimize this..." << rsLog;
+ createOplog();*/
- // TEMP: restart to recreate empty oplog
+ // TEMP: restart to recreate empty oplog
//log() << "replSet FATAL error during initial sync. mongod restart required." << rsLog;
//dbexit( EXIT_CLEAN );
@@ -104,10 +104,10 @@ namespace mongo {
}
/**
- * Choose a member to sync from.
+ * Choose a member to sync from.
*
* The initalSync option is an object with 1 k/v pair:
- *
+ *
* "state" : 1|2
* "name" : "host"
* "_id" : N
@@ -155,44 +155,44 @@ namespace mongo {
optime = sync["optime"]._opTime();
}
}
-
+
for( Member *m = head(); m; m = m->next() ) {
if (!m->hbinfo().up() ||
- (m->state() != MemberState::RS_SECONDARY &&
- m->state() != MemberState::RS_PRIMARY) ||
- (secondaryOnly && m->state() != MemberState::RS_SECONDARY) ||
- (id != -1 && (int)m->id() != id) ||
- (name != 0 && strcmp(name, m->fullName().c_str()) != 0) ||
- (isOpTime && optime >= m->hbinfo().opTime)) {
+ (m->state() != MemberState::RS_SECONDARY &&
+ m->state() != MemberState::RS_PRIMARY) ||
+ (secondaryOnly && m->state() != MemberState::RS_SECONDARY) ||
+ (id != -1 && (int)m->id() != id) ||
+ (name != 0 && strcmp(name, m->fullName().c_str()) != 0) ||
+ (isOpTime && optime >= m->hbinfo().opTime)) {
continue;
}
sethbmsg( str::stream() << "syncing to: " << m->fullName(), 0);
return const_cast<Member*>(m);
}
-
+
sethbmsg( str::stream() << "couldn't find a member matching the sync criteria: " <<
"\nstate? " << (secondaryOnly ? "2" : "none") <<
"\nname? " << (name ? name : "none") <<
"\n_id? " << id <<
"\noptime? " << optime.toStringPretty() );
-
+
return NULL;
}
-
+
/**
* Do the initial sync for this member.
*/
- void ReplSetImpl::_syncDoInitialSync() {
+ void ReplSetImpl::_syncDoInitialSync() {
sethbmsg("initial sync pending",0);
-
+
const Member *source = getMemberToSyncTo();
if (!source) {
sethbmsg("initial sync need a member to be primary or secondary to do our initial sync", 0);
sleepsecs(15);
return;
}
-
+
string sourceHostname = source->h().toString();
OplogReader r;
if( !r.connect(sourceHostname) ) {
@@ -202,13 +202,13 @@ namespace mongo {
}
BSONObj lastOp = r.getLastOp(rsoplog);
- if( lastOp.isEmpty() ) {
+ if( lastOp.isEmpty() ) {
sethbmsg("initial sync couldn't read remote oplog", 0);
sleepsecs(15);
return;
}
OpTime startingTS = lastOp["ts"]._opTime();
-
+
if (replSettings.fastsync) {
log() << "fastsync: skipping database clone" << rsLog;
}
@@ -229,7 +229,7 @@ namespace mongo {
Client::Context ctx(db);
ok = clone(sourceHostname.c_str(), db);
}
- if( !ok ) {
+ if( !ok ) {
sethbmsg( str::stream() << "initial sync error clone of " << db << " failed sleeping 5 minutes" ,0);
sleepsecs(300);
return;
@@ -242,14 +242,14 @@ namespace mongo {
isyncassert( "initial sync source must remain readable throughout our initial sync", source->state().readable() );
- /* our cloned copy will be strange until we apply oplog events that occurred
+ /* our cloned copy will be strange until we apply oplog events that occurred
through the process. we note that time point here. */
BSONObj minValid = r.getLastOp(rsoplog);
isyncassert( "getLastOp is empty ", !minValid.isEmpty() );
OpTime mvoptime = minValid["ts"]._opTime();
assert( !mvoptime.isNull() );
- /* apply relevant portion of the oplog
+ /* apply relevant portion of the oplog
*/
{
sethbmsg("initial sync initial oplog application");
@@ -257,13 +257,13 @@ namespace mongo {
if( ! initialSyncOplogApplication(source, /*applyGTE*/startingTS, /*minValid*/mvoptime) ) { // note we assume here that this call does not throw
log() << "replSet initial sync failed during applyoplog" << rsLog;
emptyOplog(); // otherwise we'll be up!
- lastOpTimeWritten = OpTime();
- lastH = 0;
+ lastOpTimeWritten = OpTime();
+ lastH = 0;
log() << "replSet cleaning up [1]" << rsLog;
{
writelock lk("local.");
Client::Context cx( "local." );
- cx.db()->flushFiles(true);
+ cx.db()->flushFiles(true);
}
log() << "replSet cleaning up [2]" << rsLog;
sleepsecs(5);
@@ -272,13 +272,13 @@ namespace mongo {
}
sethbmsg("initial sync finishing up",0);
-
+
assert( !box.getState().primary() ); // wouldn't make sense if we were.
{
writelock lk("local.");
Client::Context cx( "local." );
- cx.db()->flushFiles(true);
+ cx.db()->flushFiles(true);
try {
log() << "replSet set minValid=" << minValid["ts"]._opTime().toString() << rsLog;
}
diff --git a/db/repl/rs_initiate.cpp b/db/repl/rs_initiate.cpp
index 01178c52168..66134b6938b 100644
--- a/db/repl/rs_initiate.cpp
+++ b/db/repl/rs_initiate.cpp
@@ -31,10 +31,10 @@
using namespace bson;
using namespace mongoutils;
-namespace mongo {
+namespace mongo {
/* called on a reconfig AND on initiate
- throws
+ throws
@param initial true when initiating
*/
void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial) {
@@ -44,10 +44,10 @@ namespace mongo {
for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
if( i->h.isSelf() ) {
me++;
- if( me > 1 )
+ if( me > 1 )
selfs << ',';
selfs << i->h.toString();
- if( !i->potentiallyHot() ) {
+ if( !i->potentiallyHot() ) {
uasserted(13420, "initiation and reconfiguration of a replica set must be sent to a node that can become primary");
}
}
@@ -67,17 +67,17 @@ namespace mongo {
bool ok = false;
try {
int theirVersion = -1000;
- ok = requestHeartbeat(cfg._id, "", i->h.toString(), res, -1, theirVersion, initial/*check if empty*/);
- if( theirVersion >= cfg.version ) {
+ ok = requestHeartbeat(cfg._id, "", i->h.toString(), res, -1, theirVersion, initial/*check if empty*/);
+ if( theirVersion >= cfg.version ) {
stringstream ss;
ss << "replSet member " << i->h.toString() << " has too new a config version (" << theirVersion << ") to reconfigure";
uasserted(13259, ss.str());
}
}
- catch(DBException& e) {
+ catch(DBException& e) {
log() << "replSet cmufcc requestHeartbeat " << i->h.toString() << " : " << e.toString() << rsLog;
}
- catch(...) {
+ catch(...) {
log() << "replSet cmufcc error exception in requestHeartbeat?" << rsLog;
}
if( res.getBoolField("mismatch") )
@@ -107,7 +107,7 @@ namespace mongo {
trying to keep change small as release is near.
*/
const Member* m = theReplSet->findById( i->_id );
- if( m ) {
+ if( m ) {
// ok, so this was an existing member (wouldn't make sense to add to config a new member that is down)
assert( m->h().toString() == i->h.toString() );
allowFailure = true;
@@ -124,24 +124,24 @@ namespace mongo {
}
if( initial ) {
bool hasData = res["hasData"].Bool();
- uassert(13311, "member " + i->h.toString() + " has data already, cannot initiate set. All members except initiator must be empty.",
- !hasData || i->h.isSelf());
+ uassert(13311, "member " + i->h.toString() + " has data already, cannot initiate set. All members except initiator must be empty.",
+ !hasData || i->h.isSelf());
}
}
}
- class CmdReplSetInitiate : public ReplSetCommand {
+ class CmdReplSetInitiate : public ReplSetCommand {
public:
virtual LockType locktype() const { return NONE; }
CmdReplSetInitiate() : ReplSetCommand("replSetInitiate") { }
- virtual void help(stringstream& h) const {
- h << "Initiate/christen a replica set.";
+ virtual void help(stringstream& h) const {
+ h << "Initiate/christen a replica set.";
h << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
}
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
log() << "replSet replSetInitiate admin command received from client" << rsLog;
- if( !replSet ) {
+ if( !replSet ) {
errmsg = "server is not running with --replSet";
return false;
}
@@ -152,12 +152,12 @@ namespace mongo {
}
{
- // just make sure we can get a write lock before doing anything else. we'll reacquire one
- // later. of course it could be stuck then, but this check lowers the risk if weird things
+ // just make sure we can get a write lock before doing anything else. we'll reacquire one
+ // later. of course it could be stuck then, but this check lowers the risk if weird things
// are up.
time_t t = time(0);
writelock lk("");
- if( time(0)-t > 10 ) {
+ if( time(0)-t > 10 ) {
errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?";
return false;
}
@@ -166,7 +166,7 @@ namespace mongo {
it is ok if the initiating member has *other* data than that.
*/
BSONObj o;
- if( Helpers::getFirst(rsoplog, o) ) {
+ if( Helpers::getFirst(rsoplog, o) ) {
errmsg = rsoplog + string(" is not empty on the initiating member. cannot initiate.");
return false;
}
@@ -205,7 +205,7 @@ namespace mongo {
configObj = b.obj();
log() << "replSet created this configuration for initiation : " << configObj.toString() << rsLog;
}
- else {
+ else {
configObj = cmdObj["replSetInitiate"].Obj();
}
@@ -214,7 +214,7 @@ namespace mongo {
ReplSetConfig newConfig(configObj);
parsed = true;
- if( newConfig.version > 1 ) {
+ if( newConfig.version > 1 ) {
errmsg = "can't initiate with a version number greater than 1";
return false;
}
@@ -226,7 +226,7 @@ namespace mongo {
log() << "replSet replSetInitiate all members seem up" << rsLog;
createOplog();
-
+
writelock lk("");
bo comment = BSON( "msg" << "initiating set");
newConfig.saveConfigLocally(comment);
@@ -235,9 +235,9 @@ namespace mongo {
ReplSet::startupStatus = ReplSet::SOON;
ReplSet::startupStatusMsg = "Received replSetInitiate - should come online shortly.";
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
log() << "replSet replSetInitiate exception: " << e.what() << rsLog;
- if( !parsed )
+ if( !parsed )
errmsg = string("couldn't parse cfg object ") + e.what();
else
errmsg = string("couldn't initiate : ") + e.what();
diff --git a/db/repl/rs_member.h b/db/repl/rs_member.h
index 0f049280f7f..017b6ea3ea5 100644
--- a/db/repl/rs_member.h
+++ b/db/repl/rs_member.h
@@ -30,8 +30,8 @@ namespace mongo {
RS_FATAL something bad has occurred and server is not completely offline with regard to the replica set. fatal error.
RS_STARTUP2 loaded config, still determining who is primary
*/
- struct MemberState {
- enum MS {
+ struct MemberState {
+ enum MS {
RS_STARTUP = 0,
RS_PRIMARY = 1,
RS_SECONDARY = 2,
@@ -61,9 +61,9 @@ namespace mongo {
bool operator!=(const MemberState& r) const { return s != r.s; }
};
- /* this is supposed to be just basic information on a member,
+ /* this is supposed to be just basic information on a member,
and copy constructable. */
- class HeartbeatInfo {
+ class HeartbeatInfo {
unsigned _id;
public:
HeartbeatInfo() : _id(0xffffffff),hbstate(MemberState::RS_UNKNOWN),health(-1.0),downSince(0),skew(INT_MIN) { }
@@ -89,15 +89,15 @@ namespace mongo {
bool changed(const HeartbeatInfo& old) const;
};
- inline HeartbeatInfo::HeartbeatInfo(unsigned id) : _id(id) {
+ inline HeartbeatInfo::HeartbeatInfo(unsigned id) : _id(id) {
hbstate = MemberState::RS_UNKNOWN;
health = -1.0;
downSince = 0;
- lastHeartbeat = upSince = 0;
+ lastHeartbeat = upSince = 0;
skew = INT_MIN;
}
- inline bool HeartbeatInfo::changed(const HeartbeatInfo& old) const {
+ inline bool HeartbeatInfo::changed(const HeartbeatInfo& old) const {
return health != old.health ||
hbstate != old.hbstate;
}
diff --git a/db/repl/rs_optime.h b/db/repl/rs_optime.h
index fee3dcb62b5..f0ca56927ad 100644
--- a/db/repl/rs_optime.h
+++ b/db/repl/rs_optime.h
@@ -25,19 +25,19 @@ namespace mongo {
const char rsoplog[] = "local.oplog.rs";
/*
- class RSOpTime : public OpTime {
+ class RSOpTime : public OpTime {
public:
bool initiated() const { return getSecs() != 0; }
};*/
- /*struct RSOpTime {
+ /*struct RSOpTime {
unsigned long long ord;
RSOpTime() : ord(0) { }
bool initiated() const { return ord > 0; }
- void initiate() {
+ void initiate() {
assert( !initiated() );
ord = 1000000;
}
diff --git a/db/repl/rs_rollback.cpp b/db/repl/rs_rollback.cpp
index 0fcaaece31f..cf68b861ae8 100644
--- a/db/repl/rs_rollback.cpp
+++ b/db/repl/rs_rollback.cpp
@@ -1,5 +1,5 @@
/* @file rs_rollback.cpp
-*
+*
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
@@ -25,7 +25,7 @@
/* Scenarios
We went offline with ops not replicated out.
-
+
F = node that failed and coming back.
P = node that took over, new primary
@@ -33,11 +33,11 @@
F : a b c d e f g
P : a b c d q
- The design is "keep P". One could argue here that "keep F" has some merits, however, in most cases P
- will have significantly more data. Also note that P may have a proper subset of F's stream if there were
+ The design is "keep P". One could argue here that "keep F" has some merits, however, in most cases P
+ will have significantly more data. Also note that P may have a proper subset of F's stream if there were
no subsequent writes.
- For now the model is simply : get F back in sync with P. If P was really behind or something, we should have
+ For now the model is simply : get F back in sync with P. If P was really behind or something, we should have
just chosen not to fail over anyway.
#2:
@@ -50,9 +50,9 @@
Steps
find an event in common. 'd'.
- undo our events beyond that by:
+ undo our events beyond that by:
(1) taking copy from other server of those objects
- (2) do not consider copy valid until we pass reach an optime after when we fetched the new version of object
+ (2) do not consider copy valid until we pass reach an optime after when we fetched the new version of object
-- i.e., reset minvalid.
(3) we could skip operations on objects that are previous in time to our capture of the object as an optimization.
@@ -65,15 +65,15 @@ namespace mongo {
bool copyCollectionFromRemote(const string& host, const string& ns, const BSONObj& query, string& errmsg, bool logforrepl);
void incRBID();
- class rsfatal : public std::exception {
+ class rsfatal : public std::exception {
public:
- virtual const char* what() const throw(){ return "replica set fatal exception"; }
+ virtual const char* what() const throw() { return "replica set fatal exception"; }
};
struct DocID {
const char *ns;
be _id;
- bool operator<(const DocID& d) const {
+ bool operator<(const DocID& d) const {
int c = strcmp(ns, d.ns);
if( c < 0 ) return true;
if( c > 0 ) return false;
@@ -82,7 +82,7 @@ namespace mongo {
};
struct HowToFixUp {
- /* note this is a set -- if there are many $inc's on a single document we need to rollback, we only
+ /* note this is a set -- if there are many $inc's on a single document we need to rollback, we only
need to refetch it once. */
set<DocID> toRefetch;
@@ -97,9 +97,9 @@ namespace mongo {
int rbid; // remote server's current rollback sequence #
};
- static void refetch(HowToFixUp& h, const BSONObj& ourObj) {
+ static void refetch(HowToFixUp& h, const BSONObj& ourObj) {
const char *op = ourObj.getStringField("op");
- if( *op == 'n' )
+ if( *op == 'n' )
return;
unsigned long long totSize = 0;
@@ -109,52 +109,52 @@ namespace mongo {
DocID d;
d.ns = ourObj.getStringField("ns");
- if( *d.ns == 0 ) {
+ if( *d.ns == 0 ) {
log() << "replSet WARNING ignoring op on rollback no ns TODO : " << ourObj.toString() << rsLog;
return;
}
bo o = ourObj.getObjectField(*op=='u' ? "o2" : "o");
- if( o.isEmpty() ) {
+ if( o.isEmpty() ) {
log() << "replSet warning ignoring op on rollback : " << ourObj.toString() << rsLog;
return;
}
- if( *op == 'c' ) {
+ if( *op == 'c' ) {
be first = o.firstElement();
NamespaceString s(d.ns); // foo.$cmd
string cmdname = first.fieldName();
Command *cmd = Command::findCommand(cmdname.c_str());
- if( cmd == 0 ) {
+ if( cmd == 0 ) {
log() << "replSet warning rollback no suchcommand " << first.fieldName() << " - different mongod versions perhaps?" << rsLog;
return;
}
else {
/* findandmodify - tranlated?
- godinsert?,
+ godinsert?,
renamecollection a->b. just resync a & b
*/
if( cmdname == "create" ) {
- /* Create collection operation
+ /* Create collection operation
{ ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
*/
string ns = s.db + '.' + o["create"].String(); // -> foo.abc
h.toDrop.insert(ns);
return;
}
- else if( cmdname == "drop" ) {
+ else if( cmdname == "drop" ) {
string ns = s.db + '.' + first.valuestr();
h.collectionsToResync.insert(ns);
return;
}
- else if( cmdname == "dropIndexes" || cmdname == "deleteIndexes" ) {
+ else if( cmdname == "dropIndexes" || cmdname == "deleteIndexes" ) {
/* TODO: this is bad. we simply full resync the collection here, which could be very slow. */
log() << "replSet info rollback of dropIndexes is slow in this version of mongod" << rsLog;
string ns = s.db + '.' + first.valuestr();
h.collectionsToResync.insert(ns);
return;
}
- else if( cmdname == "renameCollection" ) {
+ else if( cmdname == "renameCollection" ) {
/* TODO: slow. */
log() << "replSet info rollback of renameCollection is slow in this version of mongod" << rsLog;
string from = first.valuestr();
@@ -163,15 +163,15 @@ namespace mongo {
h.collectionsToResync.insert(to);
return;
}
- else if( cmdname == "reIndex" ) {
+ else if( cmdname == "reIndex" ) {
return;
}
- else if( cmdname == "dropDatabase" ) {
+ else if( cmdname == "dropDatabase" ) {
log() << "replSet error rollback : can't rollback drop database full resync will be required" << rsLog;
log() << "replSet " << o.toString() << rsLog;
throw rsfatal();
}
- else {
+ else {
log() << "replSet error can't rollback this command yet: " << o.toString() << rsLog;
log() << "replSet cmdname=" << cmdname << rsLog;
throw rsfatal();
@@ -190,9 +190,9 @@ namespace mongo {
int getRBID(DBClientConnection*);
- static void syncRollbackFindCommonPoint(DBClientConnection *them, HowToFixUp& h) {
+ static void syncRollbackFindCommonPoint(DBClientConnection *them, HowToFixUp& h) {
static time_t last;
- if( time(0)-last < 60 ) {
+ if( time(0)-last < 60 ) {
throw "findcommonpoint waiting a while before trying again";
}
last = time(0);
@@ -226,7 +226,7 @@ namespace mongo {
log() << "replSet info rollback our last optime: " << ourTime.toStringPretty() << rsLog;
log() << "replSet info rollback their last optime: " << theirTime.toStringPretty() << rsLog;
log() << "replSet info rollback diff in end of log times: " << diff << " seconds" << rsLog;
- if( diff > 3600 ) {
+ if( diff > 3600 ) {
log() << "replSet rollback too long a time period for a rollback." << rsLog;
throw "error not willing to roll back more than one hour of data";
}
@@ -236,8 +236,8 @@ namespace mongo {
while( 1 ) {
scanned++;
/* todo add code to assure no excessive scanning for too long */
- if( ourTime == theirTime ) {
- if( ourObj["h"].Long() == theirObj["h"].Long() ) {
+ if( ourTime == theirTime ) {
+ if( ourObj["h"].Long() == theirObj["h"].Long() ) {
// found the point back in time where we match.
// todo : check a few more just to be careful about hash collisions.
log() << "replSet rollback found matching events at " << ourTime.toStringPretty() << rsLog;
@@ -249,7 +249,7 @@ namespace mongo {
refetch(h, ourObj);
- if( !t->more() ) {
+ if( !t->more() ) {
log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
@@ -270,8 +270,8 @@ namespace mongo {
ourObj = u.current();
ourTime = ourObj["ts"]._opTime();
}
- else if( theirTime > ourTime ) {
- if( !t->more() ) {
+ else if( theirTime > ourTime ) {
+ if( !t->more() ) {
log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
@@ -281,11 +281,11 @@ namespace mongo {
theirObj = t->nextSafe();
theirTime = theirObj["ts"]._opTime();
}
- else {
+ else {
// theirTime < ourTime
refetch(h, ourObj);
u.advance();
- if( !u.ok() ) {
+ if( !u.ok() ) {
log() << "replSet rollback error RS101 reached beginning of local oplog" << rsLog;
log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
@@ -298,299 +298,301 @@ namespace mongo {
}
}
- struct X {
+ struct X {
const bson::bo *op;
bson::bo goodVersionOfObject;
};
- static void setMinValid(bo newMinValid) {
- try {
- log() << "replSet minvalid=" << newMinValid["ts"]._opTime().toStringLong() << rsLog;
- }
- catch(...) { }
- {
- Helpers::putSingleton("local.replset.minvalid", newMinValid);
- Client::Context cx( "local." );
- cx.db()->flushFiles(true);
- }
+ static void setMinValid(bo newMinValid) {
+ try {
+ log() << "replSet minvalid=" << newMinValid["ts"]._opTime().toStringLong() << rsLog;
+ }
+ catch(...) { }
+ {
+ Helpers::putSingleton("local.replset.minvalid", newMinValid);
+ Client::Context cx( "local." );
+ cx.db()->flushFiles(true);
+ }
}
void ReplSetImpl::syncFixUp(HowToFixUp& h, OplogReader& r) {
- DBClientConnection *them = r.conn();
-
- // fetch all first so we needn't handle interruption in a fancy way
-
- unsigned long long totSize = 0;
-
- list< pair<DocID,bo> > goodVersions;
-
- bo newMinValid;
-
- /* fetch all the goodVersions of each document from current primary */
- DocID d;
- unsigned long long n = 0;
- try {
- for( set<DocID>::iterator i = h.toRefetch.begin(); i != h.toRefetch.end(); i++ ) {
- d = *i;
-
- assert( !d._id.eoo() );
-
- {
- /* TODO : slow. lots of round trips. */
- n++;
- bo good= them->findOne(d.ns, d._id.wrap()).getOwned();
- totSize += good.objsize();
- uassert( 13410, "replSet too much data to roll back", totSize < 300 * 1024 * 1024 );
-
- // note good might be eoo, indicating we should delete it
- goodVersions.push_back(pair<DocID,bo>(d,good));
- }
- }
- newMinValid = r.getLastOp(rsoplog);
- if( newMinValid.isEmpty() ) {
- sethbmsg("rollback error newMinValid empty?");
- return;
- }
- }
- catch(DBException& e) {
- sethbmsg(str::stream() << "rollback re-get objects: " << e.toString(),0);
- log() << "rollback couldn't re-get ns:" << d.ns << " _id:" << d._id << ' ' << n << '/' << h.toRefetch.size() << rsLog;
- throw e;
- }
-
- MemoryMappedFile::flushAll(true);
-
- sethbmsg("rollback 3.5");
- if( h.rbid != getRBID(r.conn()) ) {
- // our source rolled back itself. so the data we received isn't necessarily consistent.
- sethbmsg("rollback rbid on source changed during rollback, cancelling this attempt");
- return;
- }
-
- // update them
- sethbmsg(str::stream() << "rollback 4 n:" << goodVersions.size());
-
- bool warn = false;
-
- assert( !h.commonPointOurDiskloc.isNull() );
-
- dbMutex.assertWriteLocked();
-
- /* we have items we are writing that aren't from a point-in-time. thus best not to come online
- until we get to that point in freshness. */
- setMinValid(newMinValid);
-
- /** any full collection resyncs required? */
- if( !h.collectionsToResync.empty() ) {
- for( set<string>::iterator i = h.collectionsToResync.begin(); i != h.collectionsToResync.end(); i++ ) {
- string ns = *i;
- sethbmsg(str::stream() << "rollback 4.1 coll resync " << ns);
- Client::Context c(*i, dbpath, 0, /*doauth*/false);
- try {
- bob res;
- string errmsg;
- dropCollection(ns, errmsg, res);
- {
- dbtemprelease r;
- bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false);
- if( !ok ) {
- log() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg << rsLog;
- throw "rollback error resyncing rollection [1]";
- }
- }
- }
- catch(...) {
- log() << "replset rollback error resyncing collection " << ns << rsLog;
- throw "rollback error resyncing rollection [2]";
- }
- }
-
- /* we did more reading from primary, so check it again for a rollback (which would mess us up), and
- make minValid newer.
- */
- sethbmsg("rollback 4.2");
- {
- string err;
- try {
- newMinValid = r.getLastOp(rsoplog);
- if( newMinValid.isEmpty() ) {
- err = "can't get minvalid from primary";
- } else {
- setMinValid(newMinValid);
- }
- }
- catch(...) {
- err = "can't get/set minvalid";
- }
- if( h.rbid != getRBID(r.conn()) ) {
- // our source rolled back itself. so the data we received isn't necessarily consistent.
- // however, we've now done writes. thus we have a problem.
- err += "rbid at primary changed during resync/rollback";
- }
- if( !err.empty() ) {
- log() << "replSet error rolling back : " << err << ". A full resync will be necessary." << rsLog;
- /* todo: reset minvalid so that we are permanently in fatal state */
- /* todo: don't be fatal, but rather, get all the data first. */
- sethbmsg("rollback error");
- throw rsfatal();
- }
- }
- sethbmsg("rollback 4.3");
- }
-
- sethbmsg("rollback 4.6");
- /** drop collections to drop before doing individual fixups - that might make things faster below actually if there were subsequent inserts to rollback */
- for( set<string>::iterator i = h.toDrop.begin(); i != h.toDrop.end(); i++ ) {
- Client::Context c(*i, dbpath, 0, /*doauth*/false);
- try {
- bob res;
- string errmsg;
- log(1) << "replSet rollback drop: " << *i << rsLog;
- dropCollection(*i, errmsg, res);
- }
- catch(...) {
- log() << "replset rollback error dropping collection " << *i << rsLog;
- }
- }
-
- sethbmsg("rollback 4.7");
- Client::Context c(rsoplog, dbpath, 0, /*doauth*/false);
- NamespaceDetails *oplogDetails = nsdetails(rsoplog);
- uassert(13423, str::stream() << "replSet error in rollback can't find " << rsoplog, oplogDetails);
-
- map<string,shared_ptr<RemoveSaver> > removeSavers;
-
- unsigned deletes = 0, updates = 0;
- for( list<pair<DocID,bo> >::iterator i = goodVersions.begin(); i != goodVersions.end(); i++ ) {
- const DocID& d = i->first;
- bo pattern = d._id.wrap(); // { _id : ... }
- try {
- assert( d.ns && *d.ns );
- if( h.collectionsToResync.count(d.ns) ) {
- /* we just synced this entire collection */
- continue;
- }
-
- /* keep an archive of items rolled back */
- shared_ptr<RemoveSaver>& rs = removeSavers[d.ns];
- if ( ! rs )
- rs.reset( new RemoveSaver( "rollback" , "" , d.ns ) );
-
- // todo: lots of overhead in context, this can be faster
- Client::Context c(d.ns, dbpath, 0, /*doauth*/false);
- if( i->second.isEmpty() ) {
- // wasn't on the primary; delete.
- /* TODO1.6 : can't delete from a capped collection. need to handle that here. */
- deletes++;
-
- NamespaceDetails *nsd = nsdetails(d.ns);
- if( nsd ) {
- if( nsd->capped ) {
- /* can't delete from a capped collection - so we truncate instead. if this item must go,
- so must all successors!!! */
- try {
- /** todo: IIRC cappedTrunateAfter does not handle completely empty. todo. */
- // this will crazy slow if no _id index.
- long long start = Listener::getElapsedTimeMillis();
- DiskLoc loc = Helpers::findOne(d.ns, pattern, false);
- if( Listener::getElapsedTimeMillis() - start > 200 )
- log() << "replSet warning roll back slow no _id index for " << d.ns << " perhaps?" << rsLog;
- //would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
- if( !loc.isNull() ) {
- try {
- nsd->cappedTruncateAfter(d.ns, loc, true);
- }
- catch(DBException& e) {
- if( e.getCode() == 13415 ) {
- // hack: need to just make cappedTruncate do this...
- nsd->emptyCappedCollection(d.ns);
- } else {
- throw;
- }
- }
- }
- }
- catch(DBException& e) {
- log() << "replSet error rolling back capped collection rec " << d.ns << ' ' << e.toString() << rsLog;
- }
- }
- else {
- try {
- deletes++;
- deleteObjects(d.ns, pattern, /*justone*/true, /*logop*/false, /*god*/true, rs.get() );
- }
- catch(...) {
- log() << "replSet error rollback delete failed ns:" << d.ns << rsLog;
- }
- }
- // did we just empty the collection? if so let's check if it even exists on the source.
- if( nsd->stats.nrecords == 0 ) {
- try {
- string sys = cc().database()->name + ".system.namespaces";
- bo o = them->findOne(sys, QUERY("name"<<d.ns));
- if( o.isEmpty() ) {
- // we should drop
- try {
- bob res;
- string errmsg;
- dropCollection(d.ns, errmsg, res);
- }
- catch(...) {
- log() << "replset error rolling back collection " << d.ns << rsLog;
- }
- }
- }
- catch(DBException& ) {
- /* this isn't *that* big a deal, but is bad. */
- log() << "replSet warning rollback error querying for existence of " << d.ns << " at the primary, ignoring" << rsLog;
- }
- }
- }
- }
- else {
- // todo faster...
- OpDebug debug;
- updates++;
- _updateObjects(/*god*/true, d.ns, i->second, pattern, /*upsert=*/true, /*multi=*/false , /*logtheop=*/false , debug, rs.get() );
- }
- }
- catch(DBException& e) {
- log() << "replSet exception in rollback ns:" << d.ns << ' ' << pattern.toString() << ' ' << e.toString() << " ndeletes:" << deletes << rsLog;
- warn = true;
- }
- }
-
- removeSavers.clear(); // this effectively closes all of them
-
- sethbmsg(str::stream() << "rollback 5 d:" << deletes << " u:" << updates);
- MemoryMappedFile::flushAll(true);
- sethbmsg("rollback 6");
-
- // clean up oplog
- log(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
- // todo: fatal error if this throws?
- oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
-
- /* reset cached lastoptimewritten and h value */
- loadLastOpTimeWritten();
-
- sethbmsg("rollback 7");
- MemoryMappedFile::flushAll(true);
-
- // done
- if( warn )
- sethbmsg("issues during syncRollback, see log");
- else
- sethbmsg("rollback done");
- }
-
- void ReplSetImpl::syncRollback(OplogReader&r) {
+ DBClientConnection *them = r.conn();
+
+ // fetch all first so we needn't handle interruption in a fancy way
+
+ unsigned long long totSize = 0;
+
+ list< pair<DocID,bo> > goodVersions;
+
+ bo newMinValid;
+
+ /* fetch all the goodVersions of each document from current primary */
+ DocID d;
+ unsigned long long n = 0;
+ try {
+ for( set<DocID>::iterator i = h.toRefetch.begin(); i != h.toRefetch.end(); i++ ) {
+ d = *i;
+
+ assert( !d._id.eoo() );
+
+ {
+ /* TODO : slow. lots of round trips. */
+ n++;
+ bo good= them->findOne(d.ns, d._id.wrap()).getOwned();
+ totSize += good.objsize();
+ uassert( 13410, "replSet too much data to roll back", totSize < 300 * 1024 * 1024 );
+
+ // note good might be eoo, indicating we should delete it
+ goodVersions.push_back(pair<DocID,bo>(d,good));
+ }
+ }
+ newMinValid = r.getLastOp(rsoplog);
+ if( newMinValid.isEmpty() ) {
+ sethbmsg("rollback error newMinValid empty?");
+ return;
+ }
+ }
+ catch(DBException& e) {
+ sethbmsg(str::stream() << "rollback re-get objects: " << e.toString(),0);
+ log() << "rollback couldn't re-get ns:" << d.ns << " _id:" << d._id << ' ' << n << '/' << h.toRefetch.size() << rsLog;
+ throw e;
+ }
+
+ MemoryMappedFile::flushAll(true);
+
+ sethbmsg("rollback 3.5");
+ if( h.rbid != getRBID(r.conn()) ) {
+ // our source rolled back itself. so the data we received isn't necessarily consistent.
+ sethbmsg("rollback rbid on source changed during rollback, cancelling this attempt");
+ return;
+ }
+
+ // update them
+ sethbmsg(str::stream() << "rollback 4 n:" << goodVersions.size());
+
+ bool warn = false;
+
+ assert( !h.commonPointOurDiskloc.isNull() );
+
+ dbMutex.assertWriteLocked();
+
+ /* we have items we are writing that aren't from a point-in-time. thus best not to come online
+ until we get to that point in freshness. */
+ setMinValid(newMinValid);
+
+ /** any full collection resyncs required? */
+ if( !h.collectionsToResync.empty() ) {
+ for( set<string>::iterator i = h.collectionsToResync.begin(); i != h.collectionsToResync.end(); i++ ) {
+ string ns = *i;
+ sethbmsg(str::stream() << "rollback 4.1 coll resync " << ns);
+ Client::Context c(*i, dbpath, 0, /*doauth*/false);
+ try {
+ bob res;
+ string errmsg;
+ dropCollection(ns, errmsg, res);
+ {
+ dbtemprelease r;
+ bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, bo(), errmsg, false);
+ if( !ok ) {
+ log() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg << rsLog;
+ throw "rollback error resyncing rollection [1]";
+ }
+ }
+ }
+ catch(...) {
+ log() << "replset rollback error resyncing collection " << ns << rsLog;
+ throw "rollback error resyncing rollection [2]";
+ }
+ }
+
+ /* we did more reading from primary, so check it again for a rollback (which would mess us up), and
+ make minValid newer.
+ */
+ sethbmsg("rollback 4.2");
+ {
+ string err;
+ try {
+ newMinValid = r.getLastOp(rsoplog);
+ if( newMinValid.isEmpty() ) {
+ err = "can't get minvalid from primary";
+ }
+ else {
+ setMinValid(newMinValid);
+ }
+ }
+ catch(...) {
+ err = "can't get/set minvalid";
+ }
+ if( h.rbid != getRBID(r.conn()) ) {
+ // our source rolled back itself. so the data we received isn't necessarily consistent.
+ // however, we've now done writes. thus we have a problem.
+ err += "rbid at primary changed during resync/rollback";
+ }
+ if( !err.empty() ) {
+ log() << "replSet error rolling back : " << err << ". A full resync will be necessary." << rsLog;
+ /* todo: reset minvalid so that we are permanently in fatal state */
+ /* todo: don't be fatal, but rather, get all the data first. */
+ sethbmsg("rollback error");
+ throw rsfatal();
+ }
+ }
+ sethbmsg("rollback 4.3");
+ }
+
+ sethbmsg("rollback 4.6");
+ /** drop collections to drop before doing individual fixups - that might make things faster below actually if there were subsequent inserts to rollback */
+ for( set<string>::iterator i = h.toDrop.begin(); i != h.toDrop.end(); i++ ) {
+ Client::Context c(*i, dbpath, 0, /*doauth*/false);
+ try {
+ bob res;
+ string errmsg;
+ log(1) << "replSet rollback drop: " << *i << rsLog;
+ dropCollection(*i, errmsg, res);
+ }
+ catch(...) {
+ log() << "replset rollback error dropping collection " << *i << rsLog;
+ }
+ }
+
+ sethbmsg("rollback 4.7");
+ Client::Context c(rsoplog, dbpath, 0, /*doauth*/false);
+ NamespaceDetails *oplogDetails = nsdetails(rsoplog);
+ uassert(13423, str::stream() << "replSet error in rollback can't find " << rsoplog, oplogDetails);
+
+ map<string,shared_ptr<RemoveSaver> > removeSavers;
+
+ unsigned deletes = 0, updates = 0;
+ for( list<pair<DocID,bo> >::iterator i = goodVersions.begin(); i != goodVersions.end(); i++ ) {
+ const DocID& d = i->first;
+ bo pattern = d._id.wrap(); // { _id : ... }
+ try {
+ assert( d.ns && *d.ns );
+ if( h.collectionsToResync.count(d.ns) ) {
+ /* we just synced this entire collection */
+ continue;
+ }
+
+ /* keep an archive of items rolled back */
+ shared_ptr<RemoveSaver>& rs = removeSavers[d.ns];
+ if ( ! rs )
+ rs.reset( new RemoveSaver( "rollback" , "" , d.ns ) );
+
+ // todo: lots of overhead in context, this can be faster
+ Client::Context c(d.ns, dbpath, 0, /*doauth*/false);
+ if( i->second.isEmpty() ) {
+ // wasn't on the primary; delete.
+ /* TODO1.6 : can't delete from a capped collection. need to handle that here. */
+ deletes++;
+
+ NamespaceDetails *nsd = nsdetails(d.ns);
+ if( nsd ) {
+ if( nsd->capped ) {
+ /* can't delete from a capped collection - so we truncate instead. if this item must go,
+ so must all successors!!! */
+ try {
+ /** todo: IIRC cappedTrunateAfter does not handle completely empty. todo. */
+ // this will crazy slow if no _id index.
+ long long start = Listener::getElapsedTimeMillis();
+ DiskLoc loc = Helpers::findOne(d.ns, pattern, false);
+ if( Listener::getElapsedTimeMillis() - start > 200 )
+ log() << "replSet warning roll back slow no _id index for " << d.ns << " perhaps?" << rsLog;
+ //would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
+ if( !loc.isNull() ) {
+ try {
+ nsd->cappedTruncateAfter(d.ns, loc, true);
+ }
+ catch(DBException& e) {
+ if( e.getCode() == 13415 ) {
+ // hack: need to just make cappedTruncate do this...
+ nsd->emptyCappedCollection(d.ns);
+ }
+ else {
+ throw;
+ }
+ }
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet error rolling back capped collection rec " << d.ns << ' ' << e.toString() << rsLog;
+ }
+ }
+ else {
+ try {
+ deletes++;
+ deleteObjects(d.ns, pattern, /*justone*/true, /*logop*/false, /*god*/true, rs.get() );
+ }
+ catch(...) {
+ log() << "replSet error rollback delete failed ns:" << d.ns << rsLog;
+ }
+ }
+ // did we just empty the collection? if so let's check if it even exists on the source.
+ if( nsd->stats.nrecords == 0 ) {
+ try {
+ string sys = cc().database()->name + ".system.namespaces";
+ bo o = them->findOne(sys, QUERY("name"<<d.ns));
+ if( o.isEmpty() ) {
+ // we should drop
+ try {
+ bob res;
+ string errmsg;
+ dropCollection(d.ns, errmsg, res);
+ }
+ catch(...) {
+ log() << "replset error rolling back collection " << d.ns << rsLog;
+ }
+ }
+ }
+ catch(DBException& ) {
+ /* this isn't *that* big a deal, but is bad. */
+ log() << "replSet warning rollback error querying for existence of " << d.ns << " at the primary, ignoring" << rsLog;
+ }
+ }
+ }
+ }
+ else {
+ // todo faster...
+ OpDebug debug;
+ updates++;
+ _updateObjects(/*god*/true, d.ns, i->second, pattern, /*upsert=*/true, /*multi=*/false , /*logtheop=*/false , debug, rs.get() );
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet exception in rollback ns:" << d.ns << ' ' << pattern.toString() << ' ' << e.toString() << " ndeletes:" << deletes << rsLog;
+ warn = true;
+ }
+ }
+
+ removeSavers.clear(); // this effectively closes all of them
+
+ sethbmsg(str::stream() << "rollback 5 d:" << deletes << " u:" << updates);
+ MemoryMappedFile::flushAll(true);
+ sethbmsg("rollback 6");
+
+ // clean up oplog
+ log(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
+ // todo: fatal error if this throws?
+ oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
+
+ /* reset cached lastoptimewritten and h value */
+ loadLastOpTimeWritten();
+
+ sethbmsg("rollback 7");
+ MemoryMappedFile::flushAll(true);
+
+ // done
+ if( warn )
+ sethbmsg("issues during syncRollback, see log");
+ else
+ sethbmsg("rollback done");
+ }
+
+ void ReplSetImpl::syncRollback(OplogReader&r) {
unsigned s = _syncRollback(r);
- if( s )
+ if( s )
sleepsecs(s);
}
- unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
+ unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
assert( !lockedByMe() );
assert( !dbMutex.atLeastReadLocked() );
@@ -604,7 +606,7 @@ namespace mongo {
if( box.getState().secondary() ) {
/* by doing this, we will not service reads (return an error as we aren't in secondary staate.
- that perhaps is moot becasue of the write lock above, but that write lock probably gets deferred
+ that perhaps is moot becasue of the write lock above, but that write lock probably gets deferred
or removed or yielded later anyway.
also, this is better for status reporting - we know what is happening.
@@ -618,7 +620,7 @@ namespace mongo {
r.resetCursor();
/*DBClientConnection us(false, 0, 0);
string errmsg;
- if( !us.connect(HostAndPort::me().toString(),errmsg) ) {
+ if( !us.connect(HostAndPort::me().toString(),errmsg) ) {
sethbmsg("rollback connect to self failure" + errmsg);
return;
}*/
@@ -627,15 +629,15 @@ namespace mongo {
try {
syncRollbackFindCommonPoint(r.conn(), how);
}
- catch( const char *p ) {
+ catch( const char *p ) {
sethbmsg(string("rollback 2 error ") + p);
return 10;
}
- catch( rsfatal& ) {
+ catch( rsfatal& ) {
_fatal();
return 2;
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
sethbmsg(string("rollback 2 exception ") + e.toString() + "; sleeping 1 min");
dbtemprelease r;
sleepsecs(60);
@@ -647,20 +649,20 @@ namespace mongo {
{
incRBID();
- try {
+ try {
syncFixUp(how, r);
}
- catch( rsfatal& ) {
+ catch( rsfatal& ) {
sethbmsg("rollback fixup error");
_fatal();
return 2;
}
- catch(...) {
+ catch(...) {
incRBID(); throw;
}
incRBID();
- /* success - leave "ROLLBACK" state
+ /* success - leave "ROLLBACK" state
can go to SECONDARY once minvalid is achieved
*/
box.change(MemberState::RS_RECOVERING, _self);
diff --git a/db/repl/rs_sync.cpp b/db/repl/rs_sync.cpp
index 0c0811fdf4a..6cb86ecd9a4 100644
--- a/db/repl/rs_sync.cpp
+++ b/db/repl/rs_sync.cpp
@@ -32,8 +32,8 @@ namespace mongo {
nsToDatabase(ns, db);
if ( *ns == '.' || *ns == 0 ) {
- if( *o.getStringField("op") == 'n' )
- return;
+ if( *o.getStringField("op") == 'n' )
+ return;
log() << "replSet skipping bad op in oplog: " << o.toString() << endl;
return;
}
@@ -45,22 +45,21 @@ namespace mongo {
applyOperation_inlock(o);
}
- /* initial oplog application, during initial sync, after cloning.
- @return false on failure.
+ /* initial oplog application, during initial sync, after cloning.
+ @return false on failure.
this method returns an error and doesn't throw exceptions (i think).
*/
bool ReplSetImpl::initialSyncOplogApplication(
const Member *source,
OpTime applyGTE,
- OpTime minValid)
- {
+ OpTime minValid) {
if( source == 0 ) return false;
const string hn = source->h().toString();
OpTime ts;
try {
OplogReader r;
- if( !r.connect(hn) ) {
+ if( !r.connect(hn) ) {
log() << "replSet initial sync error can't connect to " << hn << " to read " << rsoplog << rsLog;
return false;
}
@@ -79,7 +78,7 @@ namespace mongo {
writelock lk("");
{
- if( !r.more() ) {
+ if( !r.more() ) {
sethbmsg("replSet initial sync error reading remote oplog");
log() << "replSet initial sync error remote oplog (" << rsoplog << ") on host " << hn << " is empty?" << rsLog;
return false;
@@ -88,7 +87,7 @@ namespace mongo {
OpTime t = op["ts"]._opTime();
r.putBack(op);
- if( op.firstElement().fieldName() == string("$err") ) {
+ if( op.firstElement().fieldName() == string("$err") ) {
log() << "replSet initial sync error querying " << rsoplog << " on " << hn << " : " << op.toString() << rsLog;
return false;
}
@@ -104,7 +103,7 @@ namespace mongo {
// todo : use exhaust
unsigned long long n = 0;
- while( 1 ) {
+ while( 1 ) {
if( !r.more() )
break;
@@ -113,12 +112,12 @@ namespace mongo {
ts = o["ts"]._opTime();
/* if we have become primary, we dont' want to apply things from elsewhere
- anymore. assumePrimary is in the db lock so we are safe as long as
+ anymore. assumePrimary is in the db lock so we are safe as long as
we check after we locked above. */
if( (source->state() != MemberState::RS_PRIMARY &&
- source->state() != MemberState::RS_SECONDARY) ||
- replSetForceInitialSyncFailure ) {
-
+ source->state() != MemberState::RS_SECONDARY) ||
+ replSetForceInitialSyncFailure ) {
+
int f = replSetForceInitialSyncFailure;
if( f > 0 ) {
replSetForceInitialSyncFailure = f-1;
@@ -135,13 +134,13 @@ namespace mongo {
}
_logOpObjRS(o); /* with repl sets we write the ops to our oplog too */
}
- if( ++n % 100000 == 0 ) {
+ if( ++n % 100000 == 0 ) {
// simple progress metering
log() << "replSet initialSyncOplogApplication " << n << rsLog;
}
}
}
- catch(DBException& e) {
+ catch(DBException& e) {
if( ts <= minValid ) {
// didn't make it far enough
log() << "replSet initial sync failing, error applying oplog " << e.toString() << rsLog;
@@ -151,22 +150,22 @@ namespace mongo {
return true;
}
- /* should be in RECOVERING state on arrival here.
+ /* should be in RECOVERING state on arrival here.
readlocks
@return true if transitioned to SECONDARY
*/
- bool ReplSetImpl::tryToGoLiveAsASecondary(OpTime& /*out*/ minvalid) {
+ bool ReplSetImpl::tryToGoLiveAsASecondary(OpTime& /*out*/ minvalid) {
bool golive = false;
{
readlock lk("local.replset.minvalid");
BSONObj mv;
- if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
minvalid = mv["ts"]._opTime();
- if( minvalid <= lastOpTimeWritten ) {
+ if( minvalid <= lastOpTimeWritten ) {
golive=true;
}
}
- else
+ else
golive = true; /* must have been the original member */
}
if( golive ) {
@@ -189,12 +188,12 @@ namespace mongo {
OpTime ts = remoteOldestOp["ts"]._opTime();
DEV log() << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
else log(3) << "replSet remoteOldestOp: " << ts.toStringLong() << rsLog;
- DEV {
+ DEV {
// debugging sync1.js...
log() << "replSet lastOpTimeWritten: " << lastOpTimeWritten.toStringLong() << rsLog;
log() << "replSet our state: " << state().toString() << rsLog;
}
- if( lastOpTimeWritten < ts ) {
+ if( lastOpTimeWritten < ts ) {
log() << "replSet error RS102 too stale to catch up, at least from " << hn << rsLog;
log() << "replSet our last optime : " << lastOpTimeWritten.toStringLong() << rsLog;
log() << "replSet oldest at " << hn << " : " << ts.toStringLong() << rsLog;
@@ -218,7 +217,7 @@ namespace mongo {
*/
bool ReplSetImpl::_getOplogReader(OplogReader& r, string& hn) {
assert(r.conn() == 0);
-
+
if( !r.connect(hn) ) {
log(2) << "replSet can't connect to " << hn << " to read operations" << rsLog;
r.resetConnection();
@@ -229,14 +228,14 @@ namespace mongo {
return false;
}
return true;
- }
-
+ }
+
/* tail an oplog. ok to return, will be re-called. */
- void ReplSetImpl::syncTail() {
+ void ReplSetImpl::syncTail() {
// todo : locking vis a vis the mgr...
OplogReader r;
string hn;
-
+
const Member *target = box.getPrimary();
if (target != 0) {
hn = target->h().toString();
@@ -246,15 +245,15 @@ namespace mongo {
target = 0;
}
}
-
+
// if we cannot reach the master but someone else is more up-to-date
- // than we are, sync from them.
+ // than we are, sync from them.
if( target == 0 ) {
for(Member *m = head(); m; m=m->next()) {
hn = m->h().toString();
if (m->hbinfo().up() && m->state().readable() &&
- (m->hbinfo().opTime > lastOpTimeWritten) &&
- _getOplogReader(r, hn)) {
+ (m->hbinfo().opTime > lastOpTimeWritten) &&
+ _getOplogReader(r, hn)) {
target = m;
break;
}
@@ -285,7 +284,7 @@ namespace mongo {
return;
}
OpTime theirTS = theirLastOp["ts"]._opTime();
- if( theirTS < lastOpTimeWritten ) {
+ if( theirTS < lastOpTimeWritten ) {
log() << "replSet we are ahead of the primary, will try to roll back" << rsLog;
syncRollback(r);
return;
@@ -294,7 +293,7 @@ namespace mongo {
log() << "replSet syncTail condition 1" << rsLog;
sleepsecs(1);
}
- catch(DBException& e) {
+ catch(DBException& e) {
log() << "replSet error querying " << hn << ' ' << e.toString() << rsLog;
sleepsecs(2);
}
@@ -312,7 +311,7 @@ namespace mongo {
BSONObj o = r.nextSafe();
OpTime ts = o["ts"]._opTime();
long long h = o["h"].numberLong();
- if( ts != lastOpTimeWritten || h != lastH ) {
+ if( ts != lastOpTimeWritten || h != lastH ) {
log() << "replSet our last op time written: " << lastOpTimeWritten.toStringPretty() << endl;
log() << "replset source's GTE: " << ts.toStringPretty() << endl;
syncRollback(r);
@@ -328,38 +327,38 @@ namespace mongo {
while( 1 ) {
while( 1 ) {
- if( !r.moreInCurrentBatch() ) {
- /* we need to occasionally check some things. between
+ if( !r.moreInCurrentBatch() ) {
+ /* we need to occasionally check some things. between
batches is probably a good time. */
/* perhaps we should check this earlier? but not before the rollback checks. */
- if( state().recovering() ) {
+ if( state().recovering() ) {
/* can we go to RS_SECONDARY state? we can if not too old and if minvalid achieved */
OpTime minvalid;
bool golive = ReplSetImpl::tryToGoLiveAsASecondary(minvalid);
if( golive ) {
;
}
- else {
+ else {
sethbmsg(str::stream() << "still syncing, not yet to minValid optime" << minvalid.toString());
}
/* todo: too stale capability */
}
-
+
if( !target->hbinfo().hbstate.readable() ) {
return;
}
}
if( !r.more() )
break;
- {
+ {
BSONObj o = r.nextSafe(); /* note we might get "not master" at some point */
int sd = myConfig().slaveDelay;
// ignore slaveDelay if the box is still initializing. once
// it becomes secondary we can worry about it.
- if( sd && box.getState().secondary() ) {
+ if( sd && box.getState().secondary() ) {
const OpTime ts = o["ts"]._opTime();
long long a = ts.getSecs();
long long b = time(0);
@@ -386,14 +385,14 @@ namespace mongo {
}
}
}
-
+
}
{
writelock lk("");
/* if we have become primary, we dont' want to apply things from elsewhere
- anymore. assumePrimary is in the db lock so we are safe as long as
+ anymore. assumePrimary is in the db lock so we are safe as long as
we check after we locked above. */
if( box.getState().primary() ) {
log(0) << "replSet stopping syncTail we are now primary" << rsLog;
@@ -401,7 +400,7 @@ namespace mongo {
}
syncApply(o);
- _logOpObjRS(o); /* with repl sets we write the ops to our oplog too: */
+ _logOpObjRS(o); /* with repl sets we write the ops to our oplog too: */
}
}
}
@@ -424,7 +423,7 @@ namespace mongo {
sleepsecs(1);
return;
}
- if( sp.state.fatal() ) {
+ if( sp.state.fatal() ) {
sleepsecs(5);
return;
}
@@ -450,7 +449,7 @@ namespace mongo {
c.runCommand("admin", BSON("sleep"<<120), info);
log() << "temp" << endl;
}
- catch( DBException& e ) {
+ catch( DBException& e ) {
log() << e.toString() << endl;
c.runCommand("admin", BSON("sleep"<<120), info);
log() << "temp" << endl;
@@ -460,24 +459,24 @@ namespace mongo {
while( 1 ) {
if( myConfig().arbiterOnly )
return;
-
+
try {
_syncThread();
}
- catch(DBException& e) {
+ catch(DBException& e) {
sethbmsg("syncThread: " + e.toString());
sleepsecs(10);
}
- catch(...) {
+ catch(...) {
sethbmsg("unexpected exception in syncThread()");
// TODO : SET NOT SECONDARY here?
sleepsecs(60);
}
sleepsecs(1);
- /* normally msgCheckNewState gets called periodically, but in a single node repl set there
- are no heartbeat threads, so we do it here to be sure. this is relevant if the singleton
- member has done a stepDown() and needs to come back up.
+ /* normally msgCheckNewState gets called periodically, but in a single node repl set there
+ are no heartbeat threads, so we do it here to be sure. this is relevant if the singleton
+ member has done a stepDown() and needs to come back up.
*/
OCCASIONALLY mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
}
diff --git a/db/repl_block.cpp b/db/repl_block.cpp
index 8b6b439cc38..dbd27667186 100644
--- a/db/repl_block.cpp
+++ b/db/repl_block.cpp
@@ -40,8 +40,8 @@ namespace mongo {
static const char * NS;
struct Ident {
-
- Ident(BSONObj r,string h,string n){
+
+ Ident(BSONObj r,string h,string n) {
BSONObjBuilder b;
b.appendElements( r );
b.append( "host" , h );
@@ -52,14 +52,14 @@ namespace mongo {
bool operator<( const Ident& other ) const {
return obj.woCompare( other.obj ) < 0;
}
-
+
BSONObj obj;
};
struct Info {
- Info() : loc(0){}
- ~Info(){
- if ( loc && owned ){
+ Info() : loc(0) {}
+ ~Info() {
+ if ( loc && owned ) {
delete loc;
}
}
@@ -72,33 +72,33 @@ namespace mongo {
_started = false;
}
- void run(){
+ void run() {
Client::initThread( "slaveTracking" );
DBDirectClient db;
- while ( ! inShutdown() ){
+ while ( ! inShutdown() ) {
sleepsecs( 1 );
if ( ! _dirty )
continue;
-
+
writelock lk(NS);
list< pair<BSONObj,BSONObj> > todo;
-
+
{
scoped_lock mylk(_mutex);
-
- for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++ ){
+
+ for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++ ) {
BSONObjBuilder temp;
temp.appendTimestamp( "syncedTo" , i->second.loc[0].asDate() );
- todo.push_back( pair<BSONObj,BSONObj>( i->first.obj.getOwned() ,
+ todo.push_back( pair<BSONObj,BSONObj>( i->first.obj.getOwned() ,
BSON( "$set" << temp.obj() ).getOwned() ) );
}
-
+
_slaves.clear();
}
- for ( list< pair<BSONObj,BSONObj> >::iterator i=todo.begin(); i!=todo.end(); i++ ){
+ for ( list< pair<BSONObj,BSONObj> >::iterator i=todo.begin(); i!=todo.end(); i++ ) {
db.update( NS , i->first , i->second , true );
}
@@ -106,52 +106,52 @@ namespace mongo {
}
}
- void reset(){
+ void reset() {
scoped_lock mylk(_mutex);
_slaves.clear();
}
- void update( const BSONObj& rid , const string& host , const string& ns , OpTime last ){
+ void update( const BSONObj& rid , const string& host , const string& ns , OpTime last ) {
REPLDEBUG( host << " " << rid << " " << ns << " " << last );
scoped_lock mylk(_mutex);
-
+
#ifdef _DEBUG
MongoFileAllowWrites allowWrites;
#endif
Ident ident(rid,host,ns);
Info& i = _slaves[ ident ];
- if ( i.loc ){
+ if ( i.loc ) {
i.loc[0] = last;
return;
}
-
+
dbMutex.assertAtLeastReadLocked();
BSONObj res;
- if ( Helpers::findOne( NS , ident.obj , res ) ){
+ if ( Helpers::findOne( NS , ident.obj , res ) ) {
assert( res["syncedTo"].type() );
i.owned = false;
i.loc = (OpTime*)res["syncedTo"].value();
i.loc[0] = last;
return;
}
-
+
i.owned = true;
i.loc = new OpTime[1];
i.loc[0] = last;
_dirty = true;
- if ( ! _started ){
+ if ( ! _started ) {
// start background thread here since we definitely need it
_started = true;
go();
}
}
-
- bool opReplicatedEnough( OpTime op , int w ){
+
+ bool opReplicatedEnough( OpTime op , int w ) {
RARELY {
REPLDEBUG( "looking for : " << op << " w=" << w );
}
@@ -161,9 +161,9 @@ namespace mongo {
w--; // now this is the # of slaves i need
scoped_lock mylk(_mutex);
- for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++){
+ for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++) {
OpTime s = *(i->second.loc);
- if ( s < op ){
+ if ( s < op ) {
continue;
}
if ( --w == 0 )
@@ -177,7 +177,7 @@ namespace mongo {
return _slaves.size();
}
-
+
// need to be careful not to deadlock with this
mutable mongo::mutex _mutex;
map<Ident,Info> _slaves;
@@ -188,12 +188,12 @@ namespace mongo {
const char * SlaveTracking::NS = "local.slaves";
- void updateSlaveLocation( CurOp& curop, const char * ns , OpTime lastOp ){
+ void updateSlaveLocation( CurOp& curop, const char * ns , OpTime lastOp ) {
if ( lastOp.isNull() )
return;
-
+
assert( str::startsWith(ns, "local.oplog.") );
-
+
Client * c = curop.getClient();
assert(c);
BSONObj rid = c->getRemoteID();
@@ -203,15 +203,15 @@ namespace mongo {
slaveTracking.update( rid , curop.getRemoteString( false ) , ns , lastOp );
}
- bool opReplicatedEnough( OpTime op , int w ){
+ bool opReplicatedEnough( OpTime op , int w ) {
return slaveTracking.opReplicatedEnough( op , w );
}
- void resetSlaveCache(){
+ void resetSlaveCache() {
slaveTracking.reset();
}
- unsigned getSlaveCount(){
+ unsigned getSlaveCount() {
return slaveTracking.getSlaveCount();
}
}
diff --git a/db/repl_block.h b/db/repl_block.h
index 9ad05315bde..3e867113585 100644
--- a/db/repl_block.h
+++ b/db/repl_block.h
@@ -24,10 +24,10 @@
/**
local.slaves - current location for all slaves
-
+
*/
namespace mongo {
-
+
void updateSlaveLocation( CurOp& curop, const char * ns , OpTime lastOp );
bool opReplicatedEnough( OpTime op , int w );
void resetSlaveCache();
diff --git a/db/replpair.h b/db/replpair.h
index 753f8940606..a55130819cd 100644
--- a/db/replpair.h
+++ b/db/replpair.h
@@ -55,8 +55,8 @@ namespace mongo {
int remotePort;
string remoteHost;
string remote; // host:port if port specified.
- // int date; // -1 not yet set; 0=slave; 1=master
-
+ // int date; // -1 not yet set; 0=slave; 1=master
+
string getInfo() {
stringstream ss;
ss << " state: ";
@@ -113,12 +113,12 @@ namespace mongo {
*/
inline bool _isMaster() {
if( replSet ) {
- if( theReplSet )
+ if( theReplSet )
return theReplSet->isPrimary();
return false;
}
- if( ! replSettings.slave )
+ if( ! replSettings.slave )
return true;
if ( replAllDead )
@@ -128,17 +128,17 @@ namespace mongo {
if( replPair->state == ReplPair::State_Master )
return true;
}
- else {
+ else {
if( replSettings.master ) {
- // if running with --master --slave, allow. note that master is also true
+ // if running with --master --slave, allow. note that master is also true
// for repl pairs so the check for replPair above is important.
return true;
}
}
-
+
if ( cc().isGod() )
return true;
-
+
return false;
}
inline bool isMaster(const char *client = 0) {
@@ -152,12 +152,12 @@ namespace mongo {
return strcmp( client, "local" ) == 0;
}
- inline void notMasterUnless(bool expr) {
+ inline void notMasterUnless(bool expr) {
uassert( 10107 , "not master" , expr );
}
- /* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
- so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
+ /* we allow queries to SimpleSlave's -- but not to the slave (nonmaster) member of a replica pair
+ so that queries to a pair are realtime consistent as much as possible. use setSlaveOk() to
query the nonmaster member of a replica pair.
*/
inline void replVerifyReadsOk(ParsedQuery& pq) {
@@ -166,7 +166,8 @@ namespace mongo {
if( isMaster() ) return;
uassert(13435, "not master and slaveok=false", pq.hasOption(QueryOption_SlaveOk));
uassert(13436, "not master or secondary, can't read", theReplSet && theReplSet->isSecondary() );
- } else {
+ }
+ else {
notMasterUnless(isMaster() || pq.hasOption(QueryOption_SlaveOk) || replSettings.slave == SimpleSlave );
}
}
diff --git a/db/resource.h b/db/resource.h
index bee8d30987f..9ba1ed26a0c 100755..100644
--- a/db/resource.h
+++ b/db/resource.h
@@ -1,16 +1,16 @@
-//{{NO_DEPENDENCIES}}
-// Microsoft Visual C++ generated include file.
-// Used by db.rc
-//
-#define IDI_ICON2 102
-
-// Next default values for new objects
-//
-#ifdef APSTUDIO_INVOKED
-#ifndef APSTUDIO_READONLY_SYMBOLS
-#define _APS_NEXT_RESOURCE_VALUE 104
-#define _APS_NEXT_COMMAND_VALUE 40001
-#define _APS_NEXT_CONTROL_VALUE 1001
-#define _APS_NEXT_SYMED_VALUE 101
-#endif
-#endif
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by db.rc
+//
+#define IDI_ICON2 102
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 104
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1001
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/db/restapi.cpp b/db/restapi.cpp
index 91ef7d50328..7460c94febe 100644
--- a/db/restapi.cpp
+++ b/db/restapi.cpp
@@ -41,17 +41,17 @@ namespace mongo {
class RESTHandler : public DbWebHandler {
public:
- RESTHandler() : DbWebHandler( "DUMMY REST" , 1000 , true ){}
+ RESTHandler() : DbWebHandler( "DUMMY REST" , 1000 , true ) {}
- virtual bool handles( const string& url ) const {
- return
+ virtual bool handles( const string& url ) const {
+ return
url[0] == '/' &&
url.find_last_of( '/' ) > 0;
}
- virtual void handle( const char *rq, string url, BSONObj params,
+ virtual void handle( const char *rq, string url, BSONObj params,
string& responseMsg, int& responseCode,
- vector<string>& headers, const SockAddr &from ){
+ vector<string>& headers, const SockAddr &from ) {
string::size_type first = url.find( "/" , 1 );
if ( first == string::npos ) {
@@ -103,7 +103,7 @@ namespace mongo {
out() << "don't know how to handle a [" << method << "]" << endl;
}
- if( html )
+ if( html )
headers.push_back("Content-Type: text/html;charset=utf-8");
else
headers.push_back("Content-Type: text/plain;charset=utf-8");
@@ -114,7 +114,7 @@ namespace mongo {
bool handleRESTQuery( string ns , string action , BSONObj & params , int & responseCode , stringstream & out ) {
Timer t;
- int html = _getOption( params["html"] , 0 );
+ int html = _getOption( params["html"] , 0 );
int skip = _getOption( params["skip"] , 0 );
int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
@@ -127,7 +127,7 @@ namespace mongo {
BSONObjBuilder queryBuilder;
BSONObjIterator i(params);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
string name = e.fieldName();
if ( ! name.find( "filter_" ) == 0 )
@@ -163,10 +163,11 @@ namespace mongo {
if( html ) {
string title = string("query ") + ns;
- out << start(title)
+ out << start(title)
<< p(title)
<< "<pre>";
- } else {
+ }
+ else {
out << "{\n";
out << " \"offset\" : " << skip << ",\n";
out << " \"rows\": [\n";
@@ -191,7 +192,7 @@ namespace mongo {
}
}
- if( html ) {
+ if( html ) {
out << "</pre>\n";
if( howMany == 0 ) out << p("Collection is empty");
out << _end();
@@ -212,7 +213,8 @@ namespace mongo {
try {
BSONObj obj = fromjson( body );
db.insert( ns.c_str(), obj );
- } catch ( ... ) {
+ }
+ catch ( ... ) {
responseCode = 400; // Bad Request. Seems reasonable for now.
out << "{ \"ok\" : false }";
return;
@@ -229,7 +231,7 @@ namespace mongo {
return atoi( e.valuestr() );
return def;
}
-
+
DBDirectClient db;
} restHandler;
@@ -252,19 +254,19 @@ namespace mongo {
class LowLevelMongodStatus : public WebStatusPlugin {
public:
- LowLevelMongodStatus() : WebStatusPlugin( "low level" , 5 , "requires read lock" ){}
+ LowLevelMongodStatus() : WebStatusPlugin( "low level" , 5 , "requires read lock" ) {}
- virtual void init(){}
+ virtual void init() {}
- void _gotLock( int millis , stringstream& ss ){
+ void _gotLock( int millis , stringstream& ss ) {
ss << "<pre>\n";
ss << "time to get readlock: " << millis << "ms\n";
-
+
ss << "# databases: " << dbHolder.size() << '\n';
-
+
if( ClientCursor::numCursors()>500 )
ss << "# Cursors: " << ClientCursor::numCursors() << '\n';
-
+
ss << "\nreplication: ";
if( *replInfo )
ss << "\nreplInfo: " << replInfo << "\n\n";
@@ -292,10 +294,10 @@ namespace mongo {
ss << "</pre>\n";
}
- virtual void run( stringstream& ss ){
+ virtual void run( stringstream& ss ) {
Timer t;
readlocktry lk( "" , 300 );
- if ( lk.got() ){
+ if ( lk.got() ) {
_gotLock( t.millis() , ss );
}
else {
diff --git a/db/restapi.h b/db/restapi.h
index fa76d5f326b..e5ac52083fe 100644
--- a/db/restapi.h
+++ b/db/restapi.h
@@ -25,7 +25,7 @@ namespace mongo {
class RestAdminAccess : public AdminAccess {
public:
- virtual ~RestAdminAccess() { }
+ virtual ~RestAdminAccess() { }
virtual bool haveAdminUsers() const;
virtual BSONObj getAdminUser( const string& username ) const;
diff --git a/db/scanandorder.h b/db/scanandorder.h
index e829f65b6ff..4c491fa629c 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -57,16 +57,18 @@ namespace mongo {
if (loc)
b.append("$diskLoc", loc->toBSONObj());
b.done();
- } else if (loc) {
+ }
+ else if (loc) {
BSONObjBuilder b( bb );
b.appendElements(js);
b.append("$diskLoc", loc->toBSONObj());
b.done();
- } else {
+ }
+ else {
bb.appendBuf((void*) js.objdata(), js.objsize());
}
}
-
+
typedef multimap<BSONObj,BSONObj,BSONObjCmp> BestMap;
class ScanAndOrder {
BestMap best; // key -> full object
@@ -76,9 +78,10 @@ namespace mongo {
unsigned approxSize;
void _add(BSONObj& k, BSONObj o, DiskLoc* loc) {
- if (!loc){
+ if (!loc) {
best.insert(make_pair(k.getOwned(),o.getOwned()));
- } else {
+ }
+ else {
BSONObjBuilder b;
b.appendElements(o);
b.append("$diskLoc", loc->toBSONObj());
@@ -99,8 +102,8 @@ namespace mongo {
public:
ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
- best( BSONObjCmp( _order ) ),
- startFrom(_startFrom), order(_order) {
+ best( BSONObjCmp( _order ) ),
+ startFrom(_startFrom), order(_order) {
limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
approxSize = 0;
}
diff --git a/db/security.cpp b/db/security.cpp
index 2b43e6798c7..1ec42189b57 100644
--- a/db/security.cpp
+++ b/db/security.cpp
@@ -28,9 +28,9 @@ namespace mongo {
int AuthenticationInfo::warned = 0;
- void AuthenticationInfo::print(){
+ void AuthenticationInfo::print() {
cout << "AuthenticationInfo: " << this << '\n';
- for ( map<string,Auth>::iterator i=m.begin(); i!=m.end(); i++ ){
+ for ( map<string,Auth>::iterator i=m.begin(); i!=m.end(); i++ ) {
cout << "\t" << i->first << "\t" << i->second.level << '\n';
}
cout << "END" << endl;
@@ -38,16 +38,16 @@ namespace mongo {
bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) {
- if ( cc().isGod() ){
+ if ( cc().isGod() ) {
return true;
}
-
- if ( isLocalHost ){
- atleastreadlock l("");
+
+ if ( isLocalHost ) {
+ atleastreadlock l("");
Client::GodScope gs;
Client::Context c("admin.system.users");
BSONObj result;
- if( ! Helpers::getSingleton("admin.system.users", result) ){
+ if( ! Helpers::getSingleton("admin.system.users", result) ) {
if( warned == 0 ) {
warned++;
log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
diff --git a/db/security.h b/db/security.h
index 577af9b434a..2b947c1ace6 100644
--- a/db/security.h
+++ b/db/security.h
@@ -33,36 +33,36 @@ namespace mongo {
class AuthenticationInfo : boost::noncopyable {
mongo::mutex _lock;
map<string, Auth> m; // dbname -> auth
- static int warned;
+ static int warned;
public:
- bool isLocalHost;
+ bool isLocalHost;
AuthenticationInfo() : _lock("AuthenticationInfo") { isLocalHost = false; }
~AuthenticationInfo() {
}
- void logout(const string& dbname ) {
+ void logout(const string& dbname ) {
scoped_lock lk(_lock);
- m.erase(dbname);
- }
- void authorize(const string& dbname ) {
+ m.erase(dbname);
+ }
+ void authorize(const string& dbname ) {
scoped_lock lk(_lock);
m[dbname].level = 2;
}
void authorizeReadOnly(const string& dbname) {
scoped_lock lk(_lock);
- m[dbname].level = 1;
+ m[dbname].level = 1;
}
bool isAuthorized(const string& dbname) { return _isAuthorized( dbname, 2 ); }
bool isAuthorizedReads(const string& dbname) { return _isAuthorized( dbname, 1 ); }
bool isAuthorizedForLock(const string& dbname, int lockType ) { return _isAuthorized( dbname , lockType > 0 ? 2 : 1 ); }
-
+
void print();
protected:
- bool _isAuthorized(const string& dbname, int level) {
+ bool _isAuthorized(const string& dbname, int level) {
if( m[dbname].level >= level ) return true;
- if( noauth ) return true;
- if( m["admin"].level >= level ) return true;
- if( m["local"].level >= level ) return true;
+ if( noauth ) return true;
+ if( m["admin"].level >= level ) return true;
+ if( m["local"].level >= level ) return true;
return _isAuthorizedSpecialChecks( dbname );
}
diff --git a/db/security_commands.cpp b/db/security_commands.cpp
index 19ebc55b93f..67605aab77d 100644
--- a/db/security_commands.cpp
+++ b/db/security_commands.cpp
@@ -22,7 +22,7 @@
#include "pch.h"
#include "security.h"
#include "../util/md5.hpp"
-#include "json.h"
+#include "json.h"
#include "pdfile.h"
#include "db.h"
#include "dbhelpers.h"
@@ -32,17 +32,17 @@
namespace mongo {
-/* authentication
+ /* authentication
- system.users contains
- { user : <username>, pwd : <pwd_digest>, ... }
+ system.users contains
+ { user : <username>, pwd : <pwd_digest>, ... }
- getnonce sends nonce to client
+ getnonce sends nonce to client
- client then sends { authenticate:1, nonce:<nonce_str>, user:<username>, key:<key> }
+ client then sends { authenticate:1, nonce:<nonce_str>, user:<username>, key:<key> }
- where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
-*/
+ where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
+ */
boost::thread_specific_ptr<nonce> lastNonce;
@@ -83,7 +83,7 @@ namespace mongo {
return true;
}
} cmdLogout;
-
+
class CmdAuthenticate : public Command {
public:
virtual bool requiresAuth() { return false; }
@@ -102,16 +102,16 @@ namespace mongo {
string user = cmdObj.getStringField("user");
string key = cmdObj.getStringField("key");
string received_nonce = cmdObj.getStringField("nonce");
-
- if( user.empty() || key.empty() || received_nonce.empty() ) {
- log() << "field missing/wrong type in received authenticate command "
- << dbname
- << endl;
+
+ if( user.empty() || key.empty() || received_nonce.empty() ) {
+ log() << "field missing/wrong type in received authenticate command "
+ << dbname
+ << endl;
errmsg = "auth fails";
sleepmillis(10);
return false;
}
-
+
stringstream digestBuilder;
{
@@ -120,12 +120,13 @@ namespace mongo {
if ( ln == 0 ) {
reject = true;
log(1) << "auth: no lastNonce" << endl;
- } else {
+ }
+ else {
digestBuilder << hex << *ln;
reject = digestBuilder.str() != received_nonce;
if ( reject ) log(1) << "auth: different lastNonce" << endl;
}
-
+
if ( reject ) {
log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << cc().database()->name << endl;
errmsg = "auth fails";
@@ -133,7 +134,7 @@ namespace mongo {
return false;
}
}
-
+
BSONObj userObj;
string pwd;
@@ -143,12 +144,12 @@ namespace mongo {
else {
static BSONObj userPattern = fromjson("{\"user\":1}");
string systemUsers = dbname + ".system.users";
- OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
+ OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
{
BSONObjBuilder b;
b << "user" << user;
BSONObj query = b.done();
- if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
+ if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
errmsg = "auth fails";
return false;
@@ -158,35 +159,36 @@ namespace mongo {
pwd = userObj.getStringField("pwd");
}
-
+
md5digest d;
{
digestBuilder << user << pwd;
string done = digestBuilder.str();
-
+
md5_state_t st;
md5_init(&st);
md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
md5_finish(&st, d);
}
-
+
string computed = digestToString( d );
-
- if ( key != computed ){
+
+ if ( key != computed ) {
log() << "auth: key mismatch " << user << ", ns:" << dbname << endl;
errmsg = "auth fails";
return false;
}
AuthenticationInfo *ai = cc().getAuthenticationInfo();
-
+
if ( userObj[ "readOnly" ].isBoolean() && userObj[ "readOnly" ].boolean() ) {
ai->authorizeReadOnly( cc().database()->name.c_str() );
- } else {
+ }
+ else {
ai->authorize( cc().database()->name.c_str() );
}
return true;
}
} cmdAuthenticate;
-
+
} // namespace mongo
diff --git a/db/security_key.cpp b/db/security_key.cpp
index 9796ef5cfff..1ea702127bd 100644
--- a/db/security_key.cpp
+++ b/db/security_key.cpp
@@ -31,7 +31,7 @@ namespace mongo {
bool noauth = true;
AuthInfo internalSecurity;
-
+
bool setUpSecurityKey(const string& filename) {
struct stat stats;
@@ -50,12 +50,12 @@ namespace mongo {
#endif
const unsigned long long fileLength = stats.st_size;
- if (fileLength < 6 || fileLength > 1024) {
+ if (fileLength < 6 || fileLength > 1024) {
log() << " key file " << filename << " has length " << stats.st_size
<< ", must be between 6 and 1024 chars" << endl;
return false;
}
-
+
FILE* file = fopen( filename.c_str(), "rb" );
if (!file) {
log() << "error opening file: " << filename << ": " << strerror(errno) << endl;
@@ -63,9 +63,9 @@ namespace mongo {
}
string str = "";
-
+
// strip key file
- unsigned long long read = 0;
+ unsigned long long read = 0;
while (read < fileLength) {
char buf;
int readLength = fread(&buf, 1, 1, file);
@@ -85,7 +85,7 @@ namespace mongo {
log() << "invalid char in key file " << filename << ": " << buf << endl;
return false;
}
-
+
str += buf;
}
@@ -93,13 +93,13 @@ namespace mongo {
log() << "security key must be at least 6 characters" << endl;
return false;
}
-
+
log(1) << "security key: " << str << endl;
// createPWDigest should really not be a member func
DBClientConnection conn;
internalSecurity.pwd = conn.createPasswordDigest(internalSecurity.user, str);
-
+
return true;
}
} // namespace mongo
diff --git a/db/stats/counters.cpp b/db/stats/counters.cpp
index eb82a9871d9..889e8a86c4c 100644
--- a/db/stats/counters.cpp
+++ b/db/stats/counters.cpp
@@ -22,7 +22,7 @@
namespace mongo {
- OpCounters::OpCounters(){
+ OpCounters::OpCounters() {
int zero = 0;
BSONObjBuilder b;
@@ -42,16 +42,16 @@ namespace mongo {
_command = (AtomicUInt*)_obj["command"].value();
}
- void OpCounters::gotOp( int op , bool isCommand ){
- switch ( op ){
+ void OpCounters::gotOp( int op , bool isCommand ) {
+ switch ( op ) {
case dbInsert: /*gotInsert();*/ break; // need to handle multi-insert
- case dbQuery:
+ case dbQuery:
if ( isCommand )
gotCommand();
- else
- gotQuery();
+ else
+ gotQuery();
break;
-
+
case dbUpdate: gotUpdate(); break;
case dbDelete: gotDelete(); break;
case dbGetMore: gotGetMore(); break;
@@ -62,19 +62,19 @@ namespace mongo {
default: log() << "OpCounters::gotOp unknown op: " << op << endl;
}
}
-
- BSONObj& OpCounters::getObj(){
+
+ BSONObj& OpCounters::getObj() {
const unsigned MAX = 1 << 30;
RARELY {
- bool wrap =
- _insert->get() > MAX ||
- _query->get() > MAX ||
- _update->get() > MAX ||
- _delete->get() > MAX ||
- _getmore->get() > MAX ||
- _command->get() > MAX;
-
- if ( wrap ){
+ bool wrap =
+ _insert->get() > MAX ||
+ _query->get() > MAX ||
+ _update->get() > MAX ||
+ _delete->get() > MAX ||
+ _getmore->get() > MAX ||
+ _command->get() > MAX;
+
+ if ( wrap ) {
_insert->zero();
_query->zero();
_update->zero();
@@ -82,28 +82,28 @@ namespace mongo {
_getmore->zero();
_command->zero();
}
-
+
}
- return _obj;
+ return _obj;
}
- IndexCounters::IndexCounters(){
+ IndexCounters::IndexCounters() {
_memSupported = _pi.blockCheckSupported();
-
+
_btreeMemHits = 0;
_btreeMemMisses = 0;
_btreeAccesses = 0;
-
-
+
+
_maxAllowed = ( numeric_limits< long long >::max() ) / 2;
_resets = 0;
_sampling = 0;
_samplingrate = 100;
}
-
- void IndexCounters::append( BSONObjBuilder& b ){
- if ( ! _memSupported ){
+
+ void IndexCounters::append( BSONObjBuilder& b ) {
+ if ( ! _memSupported ) {
b.append( "note" , "not supported on this platform" );
return;
}
@@ -114,33 +114,33 @@ namespace mongo {
bb.appendNumber( "misses" , _btreeMemMisses );
bb.append( "resets" , _resets );
-
+
bb.append( "missRatio" , (_btreeAccesses ? (_btreeMemMisses / (double)_btreeAccesses) : 0) );
-
+
bb.done();
-
- if ( _btreeAccesses > _maxAllowed ){
+
+ if ( _btreeAccesses > _maxAllowed ) {
_btreeAccesses = 0;
_btreeMemMisses = 0;
_btreeMemHits = 0;
_resets++;
}
}
-
+
FlushCounters::FlushCounters()
: _total_time(0)
, _flushes(0)
, _last()
{}
- void FlushCounters::flushed(int ms){
+ void FlushCounters::flushed(int ms) {
_flushes++;
_total_time += ms;
_last_time = ms;
_last = jsTime();
}
- void FlushCounters::append( BSONObjBuilder& b ){
+ void FlushCounters::append( BSONObjBuilder& b ) {
b.appendNumber( "flushes" , _flushes );
b.appendNumber( "total_ms" , _total_time );
b.appendNumber( "average_ms" , (_flushes ? (_total_time / double(_flushes)) : 0.0) );
@@ -149,16 +149,16 @@ namespace mongo {
}
- void GenericCounter::hit( const string& name , int count ){
+ void GenericCounter::hit( const string& name , int count ) {
scoped_lock lk( _mutex );
_counts[name]++;
}
-
+
BSONObj GenericCounter::getObj() {
BSONObjBuilder b(128);
{
mongo::mutex::scoped_lock lk( _mutex );
- for ( map<string,long long>::iterator i=_counts.begin(); i!=_counts.end(); i++ ){
+ for ( map<string,long long>::iterator i=_counts.begin(); i!=_counts.end(); i++ ) {
b.appendNumber( i->first , i->second );
}
}
@@ -172,7 +172,7 @@ namespace mongo {
// don't care about the race as its just a counter
bool overflow = _bytesIn > MAX || _bytesOut > MAX;
- if ( overflow ){
+ if ( overflow ) {
_lock.lock();
_overflows++;
_bytesIn = bytesIn;
@@ -185,10 +185,10 @@ namespace mongo {
_bytesIn += bytesIn;
_bytesOut += bytesOut;
_requests++;
- _lock.unlock();
+ _lock.unlock();
}
}
-
+
void NetworkCounter::append( BSONObjBuilder& b ) {
_lock.lock();
b.appendNumber( "bytesIn" , _bytesIn );
@@ -196,12 +196,12 @@ namespace mongo {
b.appendNumber( "numRequests" , _requests );
_lock.unlock();
}
-
+
OpCounters globalOpCounters;
OpCounters replOpCounters;
IndexCounters globalIndexCounters;
FlushCounters globalFlushCounters;
NetworkCounter networkCounter;
-
+
}
diff --git a/db/stats/counters.h b/db/stats/counters.h
index 8e8f31de86f..ed95cc95184 100644
--- a/db/stats/counters.h
+++ b/db/stats/counters.h
@@ -31,22 +31,22 @@ namespace mongo {
*/
class OpCounters {
public:
-
+
OpCounters();
- AtomicUInt * getInsert(){ return _insert; }
- AtomicUInt * getQuery(){ return _query; }
- AtomicUInt * getUpdate(){ return _update; }
- AtomicUInt * getDelete(){ return _delete; }
- AtomicUInt * getGetMore(){ return _getmore; }
- AtomicUInt * getCommand(){ return _command; }
-
- void gotInsert(){ _insert[0]++; }
- void gotQuery(){ _query[0]++; }
- void gotUpdate(){ _update[0]++; }
- void gotDelete(){ _delete[0]++; }
- void gotGetMore(){ _getmore[0]++; }
- void gotCommand(){ _command[0]++; }
+ AtomicUInt * getInsert() { return _insert; }
+ AtomicUInt * getQuery() { return _query; }
+ AtomicUInt * getUpdate() { return _update; }
+ AtomicUInt * getDelete() { return _delete; }
+ AtomicUInt * getGetMore() { return _getmore; }
+ AtomicUInt * getCommand() { return _command; }
+
+ void gotInsert() { _insert[0]++; }
+ void gotQuery() { _query[0]++; }
+ void gotUpdate() { _update[0]++; }
+ void gotDelete() { _delete[0]++; }
+ void gotGetMore() { _getmore[0]++; }
+ void gotCommand() { _command[0]++; }
void gotOp( int op , bool isCommand );
@@ -61,7 +61,7 @@ namespace mongo {
AtomicUInt * _getmore;
AtomicUInt * _command;
};
-
+
extern OpCounters globalOpCounters;
extern OpCounters replOpCounters;
@@ -69,8 +69,8 @@ namespace mongo {
class IndexCounters {
public:
IndexCounters();
-
- void btree( char * node ){
+
+ void btree( char * node ) {
if ( ! _memSupported )
return;
if ( _sampling++ % _samplingrate )
@@ -78,28 +78,28 @@ namespace mongo {
btree( _pi.blockInMemory( node ) );
}
- void btree( bool memHit ){
+ void btree( bool memHit ) {
if ( memHit )
_btreeMemHits++;
else
_btreeMemMisses++;
_btreeAccesses++;
}
- void btreeHit(){ _btreeMemHits++; _btreeAccesses++; }
- void btreeMiss(){ _btreeMemMisses++; _btreeAccesses++; }
-
+ void btreeHit() { _btreeMemHits++; _btreeAccesses++; }
+ void btreeMiss() { _btreeMemMisses++; _btreeAccesses++; }
+
void append( BSONObjBuilder& b );
-
+
private:
ProcessInfo _pi;
bool _memSupported;
int _sampling;
int _samplingrate;
-
+
int _resets;
long long _maxAllowed;
-
+
long long _btreeMemMisses;
long long _btreeMemHits;
long long _btreeAccesses;
@@ -112,7 +112,7 @@ namespace mongo {
FlushCounters();
void flushed(int ms);
-
+
void append( BSONObjBuilder& b );
private:
@@ -137,7 +137,7 @@ namespace mongo {
class NetworkCounter {
public:
- NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overflows(0){}
+ NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overflows(0) {}
void hit( long long bytesIn , long long bytesOut );
void append( BSONObjBuilder& b );
private:
@@ -149,6 +149,6 @@ namespace mongo {
SpinLock _lock;
};
-
+
extern NetworkCounter networkCounter;
}
diff --git a/db/stats/fine_clock.h b/db/stats/fine_clock.h
index 1f23175cd51..02600e718c4 100644
--- a/db/stats/fine_clock.h
+++ b/db/stats/fine_clock.h
@@ -36,29 +36,30 @@ namespace mongo {
* Really, you shouldn't be using this class in hot code paths for
* platforms you're not sure whether the overhead is low.
*/
- class FineClock{
+ class FineClock {
public:
typedef timespec WallTime;
- static WallTime now(){
+ static WallTime now() {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts;
}
- static uint64_t diffInNanos( WallTime end, WallTime start ){
+ static uint64_t diffInNanos( WallTime end, WallTime start ) {
uint64_t diff;
- if ( end.tv_nsec < start.tv_nsec ){
+ if ( end.tv_nsec < start.tv_nsec ) {
diff = 1000000000 * ( end.tv_sec - start.tv_sec - 1);
diff += 1000000000 + end.tv_nsec - start.tv_nsec;
- } else {
+ }
+ else {
diff = 1000000000 * ( end.tv_sec - start.tv_sec );
diff += end.tv_nsec - start.tv_nsec;
}
return diff;
}
-
+
};
}
diff --git a/db/stats/service_stats.cpp b/db/stats/service_stats.cpp
index 5574ecb108a..d69147fe969 100644
--- a/db/stats/service_stats.cpp
+++ b/db/stats/service_stats.cpp
@@ -25,7 +25,7 @@ namespace mongo {
using std::ostringstream;
- ServiceStats::ServiceStats(){
+ ServiceStats::ServiceStats() {
// Time histogram covers up to 128msec in exponential intervals
// starting at 125usec.
Histogram::Options timeOpts;
@@ -43,12 +43,12 @@ namespace mongo {
_spaceHistogram = new Histogram( spaceOpts );
}
- ServiceStats::~ServiceStats(){
+ ServiceStats::~ServiceStats() {
delete _timeHistogram;
delete _spaceHistogram;
}
- void ServiceStats::logResponse( uint64_t duration, uint64_t bytes ){
+ void ServiceStats::logResponse( uint64_t duration, uint64_t bytes ) {
_spinLock.lock();
_timeHistogram->insert( duration / 1000 /* in usecs */ );
_spaceHistogram->insert( bytes );
diff --git a/db/stats/snapshots.cpp b/db/stats/snapshots.cpp
index 78f34345547..a81568dc319 100644
--- a/db/stats/snapshots.cpp
+++ b/db/stats/snapshots.cpp
@@ -27,28 +27,27 @@
handles snapshotting performance metrics and other such things
*/
namespace mongo {
- void SnapshotData::takeSnapshot(){
- _created = curTimeMicros64();
- _globalUsage = Top::global.getGlobalData();
+ void SnapshotData::takeSnapshot() {
+ _created = curTimeMicros64();
+ _globalUsage = Top::global.getGlobalData();
_totalWriteLockedTime = dbMutex.info().getTimeLocked();
Top::global.cloneMap(_usage);
}
SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
- : _older( older ) , _newer( newer )
- {
+ : _older( older ) , _newer( newer ) {
assert( _newer._created > _older._created );
_elapsed = _newer._created - _older._created;
-
+
}
-
- Top::CollectionData SnapshotDelta::globalUsageDiff(){
+
+ Top::CollectionData SnapshotDelta::globalUsageDiff() {
return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
}
- Top::UsageMap SnapshotDelta::collectionUsageDiff(){
+ Top::UsageMap SnapshotDelta::collectionUsageDiff() {
Top::UsageMap u;
-
- for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ){
+
+ for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ) {
Top::UsageMap::const_iterator j = _older._usage.find(i->first);
if (j != _older._usage.end())
u[i->first] = Top::CollectionData( j->second , i->second );
@@ -62,8 +61,8 @@ namespace mongo {
, _loc(0)
, _stored(0)
{}
-
- const SnapshotData* Snapshots::takeSnapshot(){
+
+ const SnapshotData* Snapshots::takeSnapshot() {
scoped_lock lk(_lock);
_loc = ( _loc + 1 ) % _n;
_snapshots[_loc].takeSnapshot();
@@ -72,7 +71,7 @@ namespace mongo {
return &_snapshots[_loc];
}
- auto_ptr<SnapshotDelta> Snapshots::computeDelta( int numBack ){
+ auto_ptr<SnapshotDelta> Snapshots::computeDelta( int numBack ) {
scoped_lock lk(_lock);
auto_ptr<SnapshotDelta> p;
if ( numBack < numDeltas() )
@@ -80,43 +79,43 @@ namespace mongo {
return p;
}
- const SnapshotData& Snapshots::getPrev( int numBack ){
+ const SnapshotData& Snapshots::getPrev( int numBack ) {
int x = _loc - numBack;
if ( x < 0 )
x += _n;
return _snapshots[x];
}
- void Snapshots::outputLockInfoHTML( stringstream& ss ){
+ void Snapshots::outputLockInfoHTML( stringstream& ss ) {
scoped_lock lk(_lock);
ss << "\n<div>";
- for ( int i=0; i<numDeltas(); i++ ){
+ for ( int i=0; i<numDeltas(); i++ ) {
SnapshotDelta d( getPrev(i+1) , getPrev(i) );
unsigned e = (unsigned) d.elapsed() / 1000;
ss << (unsigned)(100*d.percentWriteLocked());
- if( e < 3900 || e > 4100 )
+ if( e < 3900 || e > 4100 )
ss << '(' << e / 1000.0 << "s)";
ss << ' ';
}
ss << "</div>\n";
}
- void SnapshotThread::run(){
+ void SnapshotThread::run() {
Client::initThread("snapshotthread");
Client& client = cc();
long long numLoops = 0;
-
+
const SnapshotData* prev = 0;
- while ( ! inShutdown() ){
+ while ( ! inShutdown() ) {
try {
const SnapshotData* s = statsSnapshots.takeSnapshot();
-
- if ( prev ){
+
+ if ( prev ) {
unsigned long long elapsed = s->_created - prev->_created;
- if ( cmdLine.cpu ){
+ if ( cmdLine.cpu ) {
SnapshotDelta d( *prev , *s );
log() << "cpu: elapsed:" << (elapsed/1000) <<" writelock: " << (int)(100*d.percentWriteLocked()) << "%" << endl;
}
@@ -125,14 +124,14 @@ namespace mongo {
prev = s;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log() << "ERROR in SnapshotThread: " << e.what() << endl;
}
-
+
numLoops++;
sleepsecs(4);
}
-
+
client.shutdown();
}
@@ -140,15 +139,15 @@ namespace mongo {
class WriteLockStatus : public WebStatusPlugin {
public:
- WriteLockStatus() : WebStatusPlugin( "write lock" , 51 , "% time in write lock, by 4 sec periods" ){}
- virtual void init(){}
+ WriteLockStatus() : WebStatusPlugin( "write lock" , 51 , "% time in write lock, by 4 sec periods" ) {}
+ virtual void init() {}
- virtual void run( stringstream& ss ){
+ virtual void run( stringstream& ss ) {
statsSnapshots.outputLockInfoHTML( ss );
ss << "<a "
- "href=\"http://www.mongodb.org/pages/viewpage.action?pageId=7209296\" "
- "title=\"snapshot: was the db in the write lock when this page was generated?\">";
+ "href=\"http://www.mongodb.org/pages/viewpage.action?pageId=7209296\" "
+ "title=\"snapshot: was the db in the write lock when this page was generated?\">";
ss << "write locked now:</a> " << (dbMutex.info().isLocked() ? "true" : "false") << "\n";
}
@@ -156,9 +155,9 @@ namespace mongo {
class DBTopStatus : public WebStatusPlugin {
public:
- DBTopStatus() : WebStatusPlugin( "dbtop" , 50 , "(occurences|percent of elapsed)" ){}
+ DBTopStatus() : WebStatusPlugin( "dbtop" , 50 , "(occurences|percent of elapsed)" ) {}
- void display( stringstream& ss , double elapsed , const Top::UsageData& usage ){
+ void display( stringstream& ss , double elapsed , const Top::UsageData& usage ) {
ss << "<td>";
ss << usage.count;
ss << "</td><td>";
@@ -171,11 +170,11 @@ namespace mongo {
ss << "</td>";
}
- void display( stringstream& ss , double elapsed , const string& ns , const Top::CollectionData& data ){
+ void display( stringstream& ss , double elapsed , const string& ns , const Top::CollectionData& data ) {
if ( ns != "TOTAL" && data.total.count == 0 )
return;
ss << "<tr><th>" << ns << "</th>";
-
+
display( ss , elapsed , data.total );
display( ss , elapsed , data.readLock );
@@ -186,43 +185,43 @@ namespace mongo {
display( ss , elapsed , data.insert );
display( ss , elapsed , data.update );
display( ss , elapsed , data.remove );
-
+
ss << "</tr>\n";
}
- void run( stringstream& ss ){
+ void run( stringstream& ss ) {
auto_ptr<SnapshotDelta> delta = statsSnapshots.computeDelta();
if ( ! delta.get() )
return;
-
+
ss << "<table border=1 cellpadding=2 cellspacing=0>";
ss << "<tr align='left'><th>";
- ss << a("http://www.mongodb.org/display/DOCS/Developer+FAQ#DeveloperFAQ-What%27sa%22namespace%22%3F", "namespace") <<
- "NS</a></th>"
- "<th colspan=2>total</th>"
- "<th colspan=2>Reads</th>"
- "<th colspan=2>Writes</th>"
- "<th colspan=2>Queries</th>"
- "<th colspan=2>GetMores</th>"
- "<th colspan=2>Inserts</th>"
- "<th colspan=2>Updates</th>"
- "<th colspan=2>Removes</th>";
+ ss << a("http://www.mongodb.org/display/DOCS/Developer+FAQ#DeveloperFAQ-What%27sa%22namespace%22%3F", "namespace") <<
+ "NS</a></th>"
+ "<th colspan=2>total</th>"
+ "<th colspan=2>Reads</th>"
+ "<th colspan=2>Writes</th>"
+ "<th colspan=2>Queries</th>"
+ "<th colspan=2>GetMores</th>"
+ "<th colspan=2>Inserts</th>"
+ "<th colspan=2>Updates</th>"
+ "<th colspan=2>Removes</th>";
ss << "</tr>\n";
-
+
display( ss , (double) delta->elapsed() , "TOTAL" , delta->globalUsageDiff() );
-
+
Top::UsageMap usage = delta->collectionUsageDiff();
- for ( Top::UsageMap::iterator i=usage.begin(); i != usage.end(); i++ ){
+ for ( Top::UsageMap::iterator i=usage.begin(); i != usage.end(); i++ ) {
display( ss , (double) delta->elapsed() , i->first , i->second );
}
-
+
ss << "</table>";
-
+
}
- virtual void init(){}
+ virtual void init() {}
} dbtopStatus;
Snapshots statsSnapshots;
- SnapshotThread snapshotThread;
+ SnapshotThread snapshotThread;
}
diff --git a/db/stats/snapshots.h b/db/stats/snapshots.h
index e03bf37a608..2f12864f3a0 100644
--- a/db/stats/snapshots.h
+++ b/db/stats/snapshots.h
@@ -28,7 +28,7 @@
namespace mongo {
class SnapshotThread;
-
+
/**
* stores a point in time snapshot
* i.e. all counters at a given time
@@ -45,14 +45,14 @@ namespace mongo {
friend class SnapshotDelta;
friend class Snapshots;
};
-
+
/**
* contains performance information for a time period
*/
class SnapshotDelta {
public:
SnapshotDelta( const SnapshotData& older , const SnapshotData& newer );
-
+
unsigned long long start() const {
return _older._created;
}
@@ -60,7 +60,7 @@ namespace mongo {
unsigned long long elapsed() const {
return _elapsed;
}
-
+
unsigned long long timeInWriteLock() const {
return _newer._totalWriteLockedTime - _older._totalWriteLockedTime;
}
@@ -83,15 +83,15 @@ namespace mongo {
class Snapshots {
public:
Snapshots(int n=100);
-
+
const SnapshotData* takeSnapshot();
-
+
int numDeltas() const { return _stored-1; }
const SnapshotData& getPrev( int numBack = 0 );
auto_ptr<SnapshotDelta> computeDelta( int numBack = 0 );
-
-
+
+
void outputLockInfoHTML( stringstream& ss );
private:
mongo::mutex _lock;
@@ -106,7 +106,7 @@ namespace mongo {
string name() const { return "snapshot"; }
void run();
};
-
+
extern Snapshots statsSnapshots;
extern SnapshotThread snapshotThread;
diff --git a/db/stats/top.cpp b/db/stats/top.cpp
index 7357fc63886..3f1426b4de2 100644
--- a/db/stats/top.cpp
+++ b/db/stats/top.cpp
@@ -22,17 +22,16 @@
#include "../commands.h"
namespace mongo {
-
- Top::UsageData::UsageData( const UsageData& older , const UsageData& newer )
- {
+
+ Top::UsageData::UsageData( const UsageData& older , const UsageData& newer ) {
// this won't be 100% accurate on rollovers and drop(), but at least it won't be negative
time = (newer.time > older.time) ? (newer.time - older.time) : newer.time;
count = (newer.count > older.count) ? (newer.count - older.count) : newer.count;
-
+
}
Top::CollectionData::CollectionData( const CollectionData& older , const CollectionData& newer )
- : total( older.total , newer.total ) ,
+ : total( older.total , newer.total ) ,
readLock( older.readLock , newer.readLock ) ,
writeLock( older.writeLock , newer.writeLock ) ,
queries( older.queries , newer.queries ) ,
@@ -40,16 +39,15 @@ namespace mongo {
insert( older.insert , newer.insert ) ,
update( older.update , newer.update ) ,
remove( older.remove , newer.remove ),
- commands( older.commands , newer.commands )
- {
-
+ commands( older.commands , newer.commands ) {
+
}
- void Top::record( const string& ns , int op , int lockType , long long micros , bool command ){
+ void Top::record( const string& ns , int op , int lockType , long long micros , bool command ) {
//cout << "record: " << ns << "\t" << op << "\t" << command << endl;
scoped_lock lk(_lock);
-
- if ( ( command || op == dbQuery ) && ns == _lastDropped ){
+
+ if ( ( command || op == dbQuery ) && ns == _lastDropped ) {
_lastDropped = "";
return;
}
@@ -59,15 +57,15 @@ namespace mongo {
_record( _global , op , lockType , micros , command );
}
- void Top::_record( CollectionData& c , int op , int lockType , long long micros , bool command ){
+ void Top::_record( CollectionData& c , int op , int lockType , long long micros , bool command ) {
c.total.inc( micros );
-
+
if ( lockType > 0 )
c.writeLock.inc( micros );
else if ( lockType < 0 )
c.readLock.inc( micros );
-
- switch ( op ){
+
+ switch ( op ) {
case 0:
// use 0 for unknown, non-specific
break;
@@ -91,7 +89,7 @@ namespace mongo {
break;
case dbKillCursors:
break;
- case opReply:
+ case opReply:
case dbMsg:
log() << "unexpected op in Top::record: " << op << endl;
break;
@@ -101,31 +99,31 @@ namespace mongo {
}
- void Top::collectionDropped( const string& ns ){
+ void Top::collectionDropped( const string& ns ) {
//cout << "collectionDropped: " << ns << endl;
scoped_lock lk(_lock);
_usage.erase(ns);
_lastDropped = ns;
}
-
+
void Top::cloneMap(Top::UsageMap& out) const {
scoped_lock lk(_lock);
out = _usage;
}
- void Top::append( BSONObjBuilder& b ){
+ void Top::append( BSONObjBuilder& b ) {
scoped_lock lk( _lock );
_appendToUsageMap( b , _usage );
}
void Top::_appendToUsageMap( BSONObjBuilder& b , const UsageMap& map ) const {
- for ( UsageMap::const_iterator i=map.begin(); i!=map.end(); i++ ){
+ for ( UsageMap::const_iterator i=map.begin(); i!=map.end(); i++ ) {
BSONObjBuilder bb( b.subobjStart( i->first ) );
-
+
const CollectionData& coll = i->second;
-
+
_appendStatsEntry( b , "total" , coll.total );
-
+
_appendStatsEntry( b , "readLock" , coll.readLock );
_appendStatsEntry( b , "writeLock" , coll.writeLock );
@@ -135,7 +133,7 @@ namespace mongo {
_appendStatsEntry( b , "update" , coll.update );
_appendStatsEntry( b , "remove" , coll.remove );
_appendStatsEntry( b , "commands" , coll.commands );
-
+
bb.done();
}
}
@@ -149,14 +147,14 @@ namespace mongo {
class TopCmd : public Command {
public:
- TopCmd() : Command( "top", true ){}
+ TopCmd() : Command( "top", true ) {}
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream& help ) const { help << "usage by collection"; }
- virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl){
+ virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
{
BSONObjBuilder b( result.subobjStart( "totals" ) );
Top::global.append( b );
@@ -164,11 +162,11 @@ namespace mongo {
}
return true;
}
-
+
} topCmd;
Top Top::global;
-
+
TopOld::T TopOld::_snapshotStart = TopOld::currentTime();
TopOld::D TopOld::_snapshotDuration;
TopOld::UsageMap TopOld::_totalUsage;
diff --git a/db/stats/top.h b/db/stats/top.h
index b0efb97eea3..9645ed1a3a6 100644
--- a/db/stats/top.h
+++ b/db/stats/top.h
@@ -32,12 +32,12 @@ namespace mongo {
Top() : _lock("Top") { }
struct UsageData {
- UsageData() : time(0) , count(0){}
+ UsageData() : time(0) , count(0) {}
UsageData( const UsageData& older , const UsageData& newer );
long long time;
long long count;
- void inc( long long micros ){
+ void inc( long long micros ) {
count++;
time += micros;
}
@@ -47,11 +47,11 @@ namespace mongo {
/**
* constructs a diff
*/
- CollectionData(){}
+ CollectionData() {}
CollectionData( const CollectionData& older , const CollectionData& newer );
-
+
UsageData total;
-
+
UsageData readLock;
UsageData writeLock;
@@ -64,7 +64,7 @@ namespace mongo {
};
typedef map<string,CollectionData> UsageMap;
-
+
public:
void record( const string& ns , int op , int lockType , long long micros , bool command );
void append( BSONObjBuilder& b );
@@ -74,10 +74,10 @@ namespace mongo {
public: // static stuff
static Top global;
-
+
private:
- void _appendToUsageMap( BSONObjBuilder& b , const UsageMap& map ) const;
- void _appendStatsEntry( BSONObjBuilder& b , const char * statsName , const UsageData& map ) const;
+ void _appendToUsageMap( BSONObjBuilder& b , const UsageMap& map ) const;
+ void _appendStatsEntry( BSONObjBuilder& b , const char * statsName , const UsageData& map ) const;
void _record( CollectionData& c , int op , int lockType , long long micros , bool command );
mutable mongo::mutex _lock;
@@ -95,9 +95,9 @@ namespace mongo {
typedef boost::tuple< D, int, int, int > UsageData;
public:
TopOld() : _read(false), _write(false) { }
-
+
/* these are used to record activity: */
-
+
void clientStart( const char *client ) {
clientStop();
_currentStart = currentTime();
@@ -126,11 +126,11 @@ namespace mongo {
/* these are used to fetch the stats: */
- struct Usage {
- string ns;
- D time;
- double pct;
- int reads, writes, calls;
+ struct Usage {
+ string ns;
+ D time;
+ double pct;
+ int reads, writes, calls;
};
static void usage( vector< Usage > &res ) {
@@ -141,7 +141,7 @@ namespace mongo {
UsageMap totalUsage;
fillParentNamespaces( snapshot, _snapshot );
fillParentNamespaces( totalUsage, _totalUsage );
-
+
multimap< D, string, more > sorted;
for( UsageMap::iterator i = snapshot.begin(); i != snapshot.end(); ++i )
sorted.insert( make_pair( i->second.get<0>(), i->first ) );
@@ -177,7 +177,8 @@ namespace mongo {
if ( &_snapshot == &_snapshotA ) {
_snapshot = _snapshotB;
_nextSnapshot = _snapshotA;
- } else {
+ }
+ else {
_snapshot = _snapshotA;
_nextSnapshot = _snapshotB;
}
@@ -207,7 +208,7 @@ namespace mongo {
g.get< 1 >()++;
else if ( !_read && _write )
g.get< 2 >()++;
- g.get< 3 >()++;
+ g.get< 3 >()++;
}
static void fillParentNamespaces( UsageMap &to, const UsageMap &from ) {
for( UsageMap::const_iterator i = from.begin(); i != from.end(); ++i ) {
@@ -220,8 +221,8 @@ namespace mongo {
current = current.substr( 0, dot );
inc( to[ current ], i->second );
dot = current.rfind( "." );
- }
- }
+ }
+ }
}
static void inc( UsageData &to, const UsageData &from ) {
to.get<0>() += from.get<0>();
diff --git a/db/taskqueue.h b/db/taskqueue.h
index 62bb565d0a4..cfb29739e6b 100644
--- a/db/taskqueue.h
+++ b/db/taskqueue.h
@@ -22,38 +22,38 @@
namespace mongo {
- /** defer work items by queueing them for invocation by another thread. presumption is that
- consumer thread is outside of locks more than the source thread. Additional presumption
- is that several objects or micro-tasks will be queued and that having a single thread
- processing them in batch is hepful as they (in the first use case) use a common data
+ /** defer work items by queueing them for invocation by another thread. presumption is that
+ consumer thread is outside of locks more than the source thread. Additional presumption
+ is that several objects or micro-tasks will be queued and that having a single thread
+ processing them in batch is hepful as they (in the first use case) use a common data
structure that can then be in local cpu classes.
this class is in db/ as it is dbMutex (mongomutex) specific (so far).
- using a functor instead of go() might be more elegant too, once again, would like to test any
+ using a functor instead of go() might be more elegant too, once again, would like to test any
performance differential. also worry that operator() hides things?
MT - copyable "micro task" object we can queue
- must have a static method void MT::go(const MT&)
+ must have a static method void MT::go(const MT&)
see DefInvoke in dbtests/ for an example.
*/
template< class MT >
class TaskQueue {
public:
- TaskQueue() : _which(0), _invokeMutex("deferredinvoker"){ }
+ TaskQueue() : _which(0), _invokeMutex("deferredinvoker") { }
- void defer(MT mt) {
- // only one writer allowed. however the invoke processing below can occur concurrently with
+ void defer(MT mt) {
+ // only one writer allowed. however the invoke processing below can occur concurrently with
// writes (for the most part)
- DEV dbMutex.assertWriteLocked();
+ DEV dbMutex.assertWriteLocked();
_queues[_which].push_back(mt);
}
/** call to process deferrals.
- concurrency: handled herein. multiple threads could call invoke(), but their efforts will be
+ concurrency: handled herein. multiple threads could call invoke(), but their efforts will be
serialized. the common case is that there is a single processor calling invoke().
normally, you call this outside of any lock. but if you want to fully drain the queue,
@@ -67,7 +67,7 @@ namespace mongo {
}
you can also call invoke periodically to do some work and then pick up later on more.
*/
- void invoke() {
+ void invoke() {
{
// flip queueing to the other queue (we are double buffered)
readlock lk;
@@ -92,12 +92,12 @@ namespace mongo {
void _drain(Queue& queue) {
unsigned oldCap = queue.capacity();
- for( typename Queue::iterator i = queue.begin(); i != queue.end(); i++ ) {
+ for( typename Queue::iterator i = queue.begin(); i != queue.end(); i++ ) {
const MT& v = *i;
MT::go(v);
}
queue.clear();
- DEV assert( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
+ DEV assert( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
}
};
diff --git a/db/update.cpp b/db/update.cpp
index 873b3913983..e4f68912b70 100644
--- a/db/update.cpp
+++ b/db/update.cpp
@@ -31,24 +31,25 @@
namespace mongo {
const char* Mod::modNames[] = { "$inc", "$set", "$push", "$pushAll", "$pull", "$pullAll" , "$pop", "$unset" ,
- "$bitand" , "$bitor" , "$bit" , "$addToSet", "$rename", "$rename" };
+ "$bitand" , "$bitor" , "$bit" , "$addToSet", "$rename", "$rename"
+ };
unsigned Mod::modNamesNum = sizeof(Mod::modNames)/sizeof(char*);
bool Mod::_pullElementMatch( BSONElement& toMatch ) const {
-
- if ( elt.type() != Object ){
+
+ if ( elt.type() != Object ) {
// if elt isn't an object, then comparison will work
return toMatch.woCompare( elt , false ) == 0;
}
-
+
if ( matcherOnPrimitive )
return matcher->matches( toMatch.wrap( "" ) );
- if ( toMatch.type() != Object ){
+ if ( toMatch.type() != Object ) {
// looking for an object, so this can't match
return false;
}
-
+
// now we have an object on both sides
return matcher->matches( toMatch.embeddedObject() );
}
@@ -57,18 +58,18 @@ namespace mongo {
void Mod::appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const {
BSONType a = in.type();
BSONType b = elt.type();
-
- if ( a == NumberDouble || b == NumberDouble ){
+
+ if ( a == NumberDouble || b == NumberDouble ) {
ms.incType = NumberDouble;
ms.incdouble = elt.numberDouble() + in.numberDouble();
}
- else if ( a == NumberLong || b == NumberLong ){
+ else if ( a == NumberLong || b == NumberLong ) {
ms.incType = NumberLong;
ms.inclong = elt.numberLong() + in.numberLong();
}
else {
int x = elt.numberInt() + in.numberInt();
- if ( x < 0 && elt.numberInt() > 0 && in.numberInt() > 0 ){
+ if ( x < 0 && elt.numberInt() > 0 && in.numberInt() > 0 ) {
// overflow
ms.incType = NumberLong;
ms.inclong = elt.numberLong() + in.numberLong();
@@ -78,32 +79,32 @@ namespace mongo {
ms.incint = elt.numberInt() + in.numberInt();
}
}
-
+
ms.appendIncValue( bb , false );
}
template< class Builder >
void appendUnset( Builder &b ) {
}
-
+
template<>
void appendUnset( BSONArrayBuilder &b ) {
b.appendNull();
}
-
+
template< class Builder >
void Mod::apply( Builder& b , BSONElement in , ModState& ms ) const {
if ( ms.dontApply ) {
return;
}
-
- switch ( op ){
-
+
+ switch ( op ) {
+
case INC: {
appendIncremented( b , in , ms );
break;
}
-
+
case SET: {
_checkForAppending( elt );
b.appendAs( elt , shortFieldName );
@@ -114,13 +115,13 @@ namespace mongo {
appendUnset( b );
break;
}
-
+
case PUSH: {
uassert( 10131 , "$push can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
BSONObjIterator i( in.embeddedObject() );
int n=0;
- while ( i.more() ){
+ while ( i.more() ) {
bb.append( i.next() );
n++;
}
@@ -131,31 +132,31 @@ namespace mongo {
bb.done();
break;
}
-
+
case ADDTOSET: {
uassert( 12592 , "$addToSet can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
-
+
BSONObjIterator i( in.embeddedObject() );
- int n=0;
+ int n=0;
+
+ if ( isEach() ) {
- if ( isEach() ){
-
BSONElementSet toadd;
parseEach( toadd );
-
- while ( i.more() ){
+
+ while ( i.more() ) {
BSONElement cur = i.next();
bb.append( cur );
- n++;
+ n++;
toadd.erase( cur );
}
-
+
{
BSONObjIterator i( getEach() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( toadd.count(e) ){
+ if ( toadd.count(e) ) {
bb.appendAs( e , BSONObjBuilder::numStr( n++ ) );
toadd.erase( e );
}
@@ -167,34 +168,34 @@ namespace mongo {
bool found = false;
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement cur = i.next();
bb.append( cur );
n++;
if ( elt.woCompare( cur , false ) == 0 )
found = true;
}
-
+
if ( ! found )
bb.appendAs( elt , bb.numStr( n ) );
-
+
}
-
+
bb.done();
break;
}
-
+
case PUSH_ALL: {
uassert( 10132 , "$pushAll can only be applied to an array" , in.type() == Array );
uassert( 10133 , "$pushAll has to be passed an array" , elt.type() );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
-
+
BSONObjIterator i( in.embeddedObject() );
int n=0;
- while ( i.more() ){
+ while ( i.more() ) {
bb.append( i.next() );
n++;
}
@@ -202,34 +203,34 @@ namespace mongo {
ms.pushStartSize = n;
i = BSONObjIterator( elt.embeddedObject() );
- while ( i.more() ){
+ while ( i.more() ) {
bb.appendAs( i.next() , bb.numStr( n++ ) );
}
bb.done();
break;
}
-
+
case PULL:
case PULL_ALL: {
uassert( 10134 , "$pull/$pullAll can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
-
+
int n = 0;
BSONObjIterator i( in.embeddedObject() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
bool allowed = true;
- if ( op == PULL ){
+ if ( op == PULL ) {
allowed = ! _pullElementMatch( e );
}
else {
BSONObjIterator j( elt.embeddedObject() );
while( j.more() ) {
BSONElement arrJ = j.next();
- if ( e.woCompare( arrJ, false ) == 0 ){
+ if ( e.woCompare( arrJ, false ) == 0 ) {
allowed = false;
break;
}
@@ -239,7 +240,7 @@ namespace mongo {
if ( allowed )
bb.appendAs( e , bb.numStr( n++ ) );
}
-
+
bb.done();
break;
}
@@ -247,13 +248,13 @@ namespace mongo {
case POP: {
uassert( 10135 , "$pop can only be applied to an array" , in.type() == Array );
BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
-
+
int n = 0;
BSONObjIterator i( in.embeddedObject() );
- if ( elt.isNumber() && elt.number() < 0 ){
+ if ( elt.isNumber() && elt.number() < 0 ) {
// pop from front
- if ( i.more() ){
+ if ( i.more() ) {
i.next();
n++;
}
@@ -268,7 +269,7 @@ namespace mongo {
while( i.more() ) {
n++;
BSONElement arrI = i.next();
- if ( i.more() ){
+ if ( i.more() ) {
bb.append( arrI );
}
}
@@ -284,23 +285,23 @@ namespace mongo {
uassert( 10136 , "$bit needs an array" , elt.type() == Object );
uassert( 10137 , "$bit can only be applied to numbers" , in.isNumber() );
uassert( 10138 , "$bit can't use a double" , in.type() != NumberDouble );
-
+
int x = in.numberInt();
long long y = in.numberLong();
BSONObjIterator it( elt.embeddedObject() );
- while ( it.more() ){
+ while ( it.more() ) {
BSONElement e = it.next();
uassert( 10139 , "$bit field must be number" , e.isNumber() );
- if ( strcmp( e.fieldName() , "and" ) == 0 ){
- switch( in.type() ){
+ if ( strcmp( e.fieldName() , "and" ) == 0 ) {
+ switch( in.type() ) {
case NumberInt: x = x&e.numberInt(); break;
case NumberLong: y = y&e.numberLong(); break;
default: assert( 0 );
}
}
- else if ( strcmp( e.fieldName() , "or" ) == 0 ){
- switch( in.type() ){
+ else if ( strcmp( e.fieldName() , "or" ) == 0 ) {
+ switch( in.type() ) {
case NumberInt: x = x|e.numberInt(); break;
case NumberLong: y = y|e.numberLong(); break;
default: assert( 0 );
@@ -311,8 +312,8 @@ namespace mongo {
throw UserException( 9016, (string)"unknown bit mod:" + e.fieldName() );
}
}
-
- switch( in.type() ){
+
+ switch( in.type() ) {
case NumberInt: b.append( shortFieldName , x ); break;
case NumberLong: b.append( shortFieldName , y ); break;
default: assert( 0 );
@@ -329,7 +330,7 @@ namespace mongo {
ms.handleRename( b, shortFieldName );
break;
}
-
+
default:
stringstream ss;
ss << "Mod::apply can't handle type: " << op;
@@ -355,12 +356,12 @@ namespace mongo {
}
return !obj.getField( path ).eoo();
}
-
+
auto_ptr<ModSetState> ModSet::prepare(const BSONObj &obj) const {
DEBUGUPDATE( "\t start prepare" );
ModSetState * mss = new ModSetState( obj );
-
-
+
+
// Perform this check first, so that we don't leave a partially modified object on uassert.
for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) {
DEBUGUPDATE( "\t\t prepare : " << i->first );
@@ -368,7 +369,7 @@ namespace mongo {
const Mod& m = i->second;
BSONElement e = obj.getFieldDotted(m.fieldName);
-
+
ms.m = &m;
ms.old = e;
@@ -380,7 +381,7 @@ namespace mongo {
}
continue;
}
-
+
if ( m.op == Mod::RENAME_TO ) {
int source = validRenamePath( obj, m.renameFrom() );
if ( source == 1 ) {
@@ -388,29 +389,30 @@ namespace mongo {
uassert( 13490, "$rename target field invalid", target != -1 );
ms.newVal = obj.getFieldDotted( m.renameFrom() );
mss->amIInPlacePossible( false );
- } else {
+ }
+ else {
ms.dontApply = true;
}
- continue;
+ continue;
}
-
+
if ( e.eoo() ) {
mss->amIInPlacePossible( m.op == Mod::UNSET );
continue;
- }
-
+ }
+
switch( m.op ) {
case Mod::INC:
uassert( 10140 , "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
- if ( mss->amIInPlacePossible( e.isNumber() ) ){
+ if ( mss->amIInPlacePossible( e.isNumber() ) ) {
// check more typing info here
- if ( m.elt.type() != e.type() ){
+ if ( m.elt.type() != e.type() ) {
// if i'm incrememnting with a double, then the storage has to be a double
- mss->amIInPlacePossible( m.elt.type() != NumberDouble );
+ mss->amIInPlacePossible( m.elt.type() != NumberDouble );
}
-
+
// check for overflow
- if ( e.type() == NumberInt && e.numberLong() + m.elt.numberLong() > numeric_limits<int>::max() ){
+ if ( e.type() == NumberInt && e.numberLong() + m.elt.numberLong() > numeric_limits<int>::max() ) {
mss->amIInPlacePossible( false );
}
}
@@ -420,7 +422,7 @@ namespace mongo {
mss->amIInPlacePossible( m.elt.type() == e.type() &&
m.elt.valuesize() == e.valuesize() );
break;
-
+
case Mod::PUSH:
case Mod::PUSH_ALL:
uassert( 10141 , "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
@@ -435,7 +437,7 @@ namespace mongo {
BSONElement arrI = i.next();
if ( m.op == Mod::PULL ) {
mss->amIInPlacePossible( ! m._pullElementMatch( arrI ) );
- }
+ }
else if ( m.op == Mod::PULL_ALL ) {
BSONObjIterator j( m.elt.embeddedObject() );
while( mss->_inPlacePossible && j.moreWithEOO() ) {
@@ -454,12 +456,12 @@ namespace mongo {
mss->amIInPlacePossible( e.embeddedObject().isEmpty() );
break;
}
-
+
case Mod::ADDTOSET: {
uassert( 12591 , "Cannot apply $addToSet modifier to non-array", e.type() == Array || e.eoo() );
-
+
BSONObjIterator i( e.embeddedObject() );
- if ( m.isEach() ){
+ if ( m.isEach() ) {
BSONElementSet toadd;
m.parseEach( toadd );
while( i.more() ) {
@@ -472,7 +474,7 @@ namespace mongo {
bool found = false;
while( i.more() ) {
BSONElement arrI = i.next();
- if ( arrI.woCompare( m.elt , false ) == 0 ){
+ if ( arrI.woCompare( m.elt , false ) == 0 ) {
found = true;
break;
}
@@ -481,7 +483,7 @@ namespace mongo {
}
break;
}
-
+
default:
// mods we don't know about shouldn't be done in place
mss->amIInPlacePossible( false );
@@ -489,7 +491,7 @@ namespace mongo {
}
DEBUGUPDATE( "\t mss\n" << mss->toString() << "\t--" );
-
+
return auto_ptr<ModSetState>( mss );
}
@@ -497,8 +499,8 @@ namespace mongo {
if ( dontApply ) {
return;
}
-
- if ( incType ){
+
+ if ( incType ) {
DEBUGUPDATE( "\t\t\t\t\t appendForOpLog inc fieldname: " << m->fieldName << " short:" << m->shortFieldName );
BSONObjBuilder bb( b.subobjStart( "$set" ) );
appendIncValue( bb , true );
@@ -513,14 +515,14 @@ namespace mongo {
bb.done();
return;
}
-
+
if ( m->op == Mod::RENAME_TO ) {
DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_TO fielName:" << m->fieldName );
BSONObjBuilder bb( b.subobjStart( "$set" ) );
bb.appendAs( newVal, m->fieldName );
return;
}
-
+
const char * name = fixedOpName ? fixedOpName : Mod::modNames[op()];
DEBUGUPDATE( "\t\t\t\t\t appendForOpLog name:" << name << " fixed: " << fixed << " fn: " << m->fieldName );
@@ -528,7 +530,8 @@ namespace mongo {
BSONObjBuilder bb( b.subobjStart( name ) );
if ( fixed ) {
bb.appendAs( *fixed , m->fieldName );
- } else {
+ }
+ else {
bb.appendAs( m->elt , m->fieldName );
}
bb.done();
@@ -542,7 +545,7 @@ namespace mongo {
ss << " fixed: " << fixed;
return ss.str();
}
-
+
template< class Builder >
void ModState::handleRename( Builder &newObjBuilder, const char *shortFieldName ) {
newObjBuilder.appendAs( newVal , shortFieldName );
@@ -550,8 +553,8 @@ namespace mongo {
b.appendAs( newVal, shortFieldName );
assert( _objData.isEmpty() );
_objData = b.obj();
- newVal = _objData.firstElement();
- }
+ newVal = _objData.firstElement();
+ }
void ModSetState::applyModsInPlace( bool isOnDisk ) {
// TODO i think this assert means that we can get rid of the isOnDisk param
@@ -559,13 +562,13 @@ namespace mongo {
DEV assert( isOnDisk == ! _obj.isOwned() );
for ( ModStateHolder::iterator i = _mods.begin(); i != _mods.end(); ++i ) {
- ModState& m = i->second;
-
+ ModState& m = i->second;
+
if ( m.dontApply ) {
continue;
}
-
- switch ( m.m->op ){
+
+ switch ( m.m->op ) {
case Mod::UNSET:
case Mod::PULL:
case Mod::PULL_ALL:
@@ -574,7 +577,7 @@ namespace mongo {
case Mod::RENAME_TO:
// this should have been handled by prepare
break;
- // [dm] the BSONElementManipulator statements below are for replication (correct?)
+ // [dm] the BSONElementManipulator statements below are for replication (correct?)
case Mod::INC:
if ( isOnDisk )
m.m->IncrementMe( m.old );
@@ -610,15 +613,15 @@ namespace mongo {
empty = false;
}
if ( empty )
- fields[ base + top.fieldName() ] = top;
+ fields[ base + top.fieldName() ] = top;
}
-
+
template< class Builder >
- void ModSetState::_appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen ){
+ void ModSetState::_appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen ) {
const char * temp = m.fieldName();
temp += root.size();
const char * dot = strchr( temp , '.' );
- if ( dot ){
+ if ( dot ) {
string nr( m.fieldName() , 0 , 1 + ( dot - m.fieldName() ) );
string nf( temp , 0 , dot - temp );
if ( onedownseen.count( nf ) )
@@ -631,40 +634,41 @@ namespace mongo {
else {
appendNewFromMod( m , b );
}
-
+
}
-
+
template< class Builder >
- void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ){
+ void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ) {
DEBUGUPDATE( "\t\t createNewFromMods root: " << root );
BSONObjIteratorSorted es( obj );
BSONElement e = es.next();
-
+
ModStateHolder::iterator m = _mods.lower_bound( root );
StringBuilder buf(root.size() + 2 );
buf << root << (char)255;
ModStateHolder::iterator mend = _mods.lower_bound( buf.str() );
-
+
set<string> onedownseen;
-
- while ( e.type() && m != mend ){
+
+ while ( e.type() && m != mend ) {
string field = root + e.fieldName();
FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
DEBUGUPDATE( "\t\t\t field:" << field << "\t mod:" << m->second.m->fieldName << "\t cmp:" << cmp << "\t short: " << e.fieldName() );
-
- switch ( cmp ){
-
+
+ switch ( cmp ) {
+
case LEFT_SUBFIELD: { // Mod is embeddeed under this element
uassert( 10145 , str::stream() << "LEFT_SUBFIELD only supports Object: " << field << " not: " << e.type() , e.type() == Object || e.type() == Array );
- if ( onedownseen.count( e.fieldName() ) == 0 ){
+ if ( onedownseen.count( e.fieldName() ) == 0 ) {
onedownseen.insert( e.fieldName() );
if ( e.type() == Object ) {
BSONObjBuilder bb( b.subobjStart( e.fieldName() ) );
stringstream nr; nr << root << e.fieldName() << ".";
createNewFromMods( nr.str() , bb , e.embeddedObject() );
- bb.done();
- } else {
+ bb.done();
+ }
+ else {
BSONArrayBuilder ba( b.subarrayStart( e.fieldName() ) );
stringstream nr; nr << root << e.fieldName() << ".";
createNewFromMods( nr.str() , ba , e.embeddedObject() );
@@ -700,22 +704,22 @@ namespace mongo {
e = es.next();
continue;
case RIGHT_SUBFIELD:
- massert( 10399 , "ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible" , 0 );
+ massert( 10399 , "ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible" , 0 );
break;
default:
massert( 10400 , "unhandled case" , 0 );
}
}
-
+
// finished looping the mods, just adding the rest of the elements
- while ( e.type() ){
+ while ( e.type() ) {
DEBUGUPDATE( "\t\t\t copying: " << e.fieldName() );
b.append( e ); // if array, ignore field name
e = es.next();
}
-
+
// do mods that don't have fields already
- for ( ; m != mend; m++ ){
+ for ( ; m != mend; m++ ) {
DEBUGUPDATE( "\t\t\t\t appending from mod at end: " << m->second.m->fieldName );
_appendNewFromMods( root , m->second , b , onedownseen );
}
@@ -729,25 +733,25 @@ namespace mongo {
string ModSetState::toString() const {
stringstream ss;
- for ( ModStateHolder::const_iterator i=_mods.begin(); i!=_mods.end(); ++i ){
+ for ( ModStateHolder::const_iterator i=_mods.begin(); i!=_mods.end(); ++i ) {
ss << "\t\t" << i->first << "\t" << i->second.toString() << "\n";
}
return ss.str();
}
- BSONObj ModSet::createNewFromQuery( const BSONObj& query ){
+ BSONObj ModSet::createNewFromQuery( const BSONObj& query ) {
BSONObj newObj;
{
BSONObjBuilder bb;
EmbeddedBuilder eb( &bb );
BSONObjIteratorSorted i( query );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( e.fieldName()[0] == '$' ) // for $atomic and anything else we add
continue;
- if ( e.type() == Object && e.embeddedObject().firstElement().fieldName()[0] == '$' ){
+ if ( e.type() == Object && e.embeddedObject().firstElement().fieldName()[0] == '$' ) {
// this means this is a $gt type filter, so don't make part of the new object
continue;
}
@@ -757,17 +761,17 @@ namespace mongo {
eb.done();
newObj = bb.obj();
}
-
+
auto_ptr<ModSetState> mss = prepare( newObj );
if ( mss->canApplyInPlace() )
mss->applyModsInPlace( false );
else
newObj = mss->createNewFromMods();
-
+
return newObj;
}
-
+
/* get special operations like $inc
{ $inc: { a:1, b:1 } }
{ $set: { a:77 } }
@@ -778,21 +782,21 @@ namespace mongo {
NOTE: MODIFIES source from object!
*/
ModSet::ModSet(
- const BSONObj &from ,
+ const BSONObj &from ,
const set<string>& idxKeys,
const set<string> *backgroundKeys)
: _isIndexed(0) , _hasDynamicArray( false ) {
-
+
BSONObjIterator it(from);
-
+
while ( it.more() ) {
BSONElement e = it.next();
const char *fn = e.fieldName();
-
+
uassert( 10147 , "Invalid modifier specified" + string( fn ), e.type() == Object );
BSONObj j = e.embeddedObject();
DEBUGUPDATE( "\t" << j );
-
+
BSONObjIterator jt(j);
Mod::Op op = opFromStr( fn );
@@ -807,7 +811,7 @@ namespace mongo {
uassert( 10151 , "have conflicting mods in update" , ! haveConflictingMod( fieldName ) );
uassert( 10152 , "Modifier $inc allowed for numbers only", f.isNumber() || op != Mod::INC );
uassert( 10153 , "Modifier $pushAll/pullAll allowed for arrays only", f.type() == Array || ( op != Mod::PUSH_ALL && op != Mod::PULL_ALL ) );
-
+
if ( op == Mod::RENAME_TO ) {
uassert( 13494, "$rename target must be a string", f.type() == String );
const char *target = f.valuestr();
@@ -835,13 +839,13 @@ namespace mongo {
to.setFieldName( target );
updateIsIndexed( to, idxKeys, backgroundKeys );
_mods[ to.fieldName ] = to;
-
+
DEBUGUPDATE( "\t\t " << fieldName << "\t" << from.fieldName << "\t" << to.fieldName );
continue;
}
-
+
_hasDynamicArray = _hasDynamicArray || strstr( fieldName , ".$" ) > 0;
-
+
Mod m;
m.init( op , f );
m.setFieldName( f.fieldName() );
@@ -858,10 +862,10 @@ namespace mongo {
ModSet * n = new ModSet();
n->_isIndexed = _isIndexed;
n->_hasDynamicArray = _hasDynamicArray;
- for ( ModHolder::const_iterator i=_mods.begin(); i!=_mods.end(); i++ ){
+ for ( ModHolder::const_iterator i=_mods.begin(); i!=_mods.end(); i++ ) {
string s = i->first;
size_t idx = s.find( ".$" );
- if ( idx == string::npos ){
+ if ( idx == string::npos ) {
n->_mods[s] = i->second;
continue;
}
@@ -875,7 +879,7 @@ namespace mongo {
}
return n;
}
-
+
void checkNoMods( BSONObj o ) {
BSONObjIterator i( o );
while( i.moreWithEOO() ) {
@@ -885,10 +889,10 @@ namespace mongo {
uassert( 10154 , "Modifiers and non-modifiers cannot be mixed", e.fieldName()[ 0 ] != '$' );
}
}
-
+
class UpdateOp : public MultiCursor::CursorOp {
public:
- UpdateOp( bool hasPositionalField ) : _nscanned(), _hasPositionalField( hasPositionalField ){}
+ UpdateOp( bool hasPositionalField ) : _nscanned(), _hasPositionalField( hasPositionalField ) {}
virtual void _init() {
_c = qp().newCursor();
if ( ! _c->ok() ) {
@@ -900,14 +904,14 @@ namespace mongo {
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , qp().ns() ) );
}
return _cc->prepareToYield( _yieldData );
- }
+ }
virtual void recoverFromYield() {
if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
_c.reset();
_cc.reset();
massert( 13339, "cursor dropped during update", false );
}
- }
+ }
virtual long long nscanned() {
assert( _c.get() );
return _c->nscanned();
@@ -945,60 +949,59 @@ namespace mongo {
uassert( 12522 , "$ operator made object too large" , newObj.objsize() <= BSONObjMaxUserSize );
}
- /* note: this is only (as-is) called for
+ /* note: this is only (as-is) called for
- not multi
- not mods is indexed
- not upsert
*/
- static UpdateResult _updateById(bool isOperatorUpdate, int idIdxNo, ModSet *mods, int profile, NamespaceDetails *d,
+ static UpdateResult _updateById(bool isOperatorUpdate, int idIdxNo, ModSet *mods, int profile, NamespaceDetails *d,
NamespaceDetailsTransient *nsdt,
- bool god, const char *ns,
- const BSONObj& updateobj, BSONObj patternOrig, bool logop, OpDebug& debug)
- {
+ bool god, const char *ns,
+ const BSONObj& updateobj, BSONObj patternOrig, bool logop, OpDebug& debug) {
DiskLoc loc;
{
IndexDetails& i = d->idx(idIdxNo);
BSONObj key = i.getKeyFromQuery( patternOrig );
loc = i.head.btree()->findSingle(i, i.head, key);
- if( loc.isNull() ) {
+ if( loc.isNull() ) {
// no upsert support in _updateById yet, so we are done.
return UpdateResult(0, 0, 0);
}
}
Record *r = loc.rec();
-
+
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
- if ( isOperatorUpdate ) {
- const BSONObj& onDisk = loc.obj();
+ if ( isOperatorUpdate ) {
+ const BSONObj& onDisk = loc.obj();
auto_ptr<ModSetState> mss = mods->prepare( onDisk );
-
+
if( mss->canApplyInPlace() ) {
mss->applyModsInPlace(true);
DEBUGUPDATE( "\t\t\t updateById doing in place update" );
/*if ( profile )
ss << " fastmod "; */
- }
+ }
else {
BSONObj newObj = mss->createNewFromMods();
checkTooLarge(newObj);
assert(nsdt);
- DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
+ DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
}
-
+
if ( logop ) {
DEV assert( mods->size() );
-
+
BSONObj pattern = patternOrig;
if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
patternBuilder.appendElements( pattern );
mss->appendSizeSpecForArrayDepMods( patternBuilder );
- pattern = patternBuilder.obj();
+ pattern = patternBuilder.obj();
}
-
+
if( mss->needOpLogRewrite() ) {
DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
logOp("u", ns, mss->getOpLogRewrite() , &pattern );
@@ -1009,7 +1012,7 @@ namespace mongo {
}
return UpdateResult( 1 , 1 , 1);
} // end $operator update
-
+
// regular update
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
@@ -1020,7 +1023,7 @@ namespace mongo {
}
return UpdateResult( 1 , 0 , 1 );
}
-
+
UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug, RemoveSaver* rs ) {
DEBUGUPDATE( "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi );
Client& client = cc();
@@ -1029,18 +1032,18 @@ namespace mongo {
if ( logLevel > 2 )
ss << " update: " << updateobj.toString();
-
+
/* idea with these here it to make them loop invariant for multi updates, and thus be a bit faster for that case */
/* NOTE: when yield() is added herein, these must be refreshed after each call to yield! */
NamespaceDetails *d = nsdetails(ns); // can be null if an upsert...
NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get_w(ns);
/* end note */
-
+
auto_ptr<ModSet> mods;
bool isOperatorUpdate = updateobj.firstElement().fieldName()[0] == '$';
int modsIsIndexed = false; // really the # of indexes
- if ( isOperatorUpdate ){
- if( d && d->backgroundIndexBuildInProgress ) {
+ if ( isOperatorUpdate ) {
+ if( d && d->backgroundIndexBuildInProgress ) {
set<string> bgKeys;
d->backgroundIdx().keyPattern().getFieldNames(bgKeys);
mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys) );
@@ -1060,30 +1063,30 @@ namespace mongo {
}
set<DiskLoc> seenObjects;
-
+
int numModded = 0;
long long nscanned = 0;
MatchDetails details;
shared_ptr< MultiCursor::CursorOp > opPtr( new UpdateOp( mods.get() && mods->hasDynamicArray() ) );
shared_ptr< MultiCursor > c( new MultiCursor( ns, patternOrig, BSONObj(), opPtr, true ) );
-
+
auto_ptr<ClientCursor> cc;
-
+
while ( c->ok() ) {
nscanned++;
bool atomic = c->matcher()->docMatcher().atomic();
-
+
// May have already matched in UpdateOp, but do again to get details set correctly
- if ( ! c->matcher()->matches( c->currKey(), c->currLoc(), &details ) ){
+ if ( ! c->matcher()->matches( c->currKey(), c->currLoc(), &details ) ) {
c->advance();
-
- if ( nscanned % 256 == 0 && ! atomic ){
+
+ if ( nscanned % 256 == 0 && ! atomic ) {
if ( cc.get() == 0 ) {
shared_ptr< Cursor > cPtr = c;
cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
}
- if ( ! cc->yield() ){
+ if ( ! cc->yield() ) {
cc.release();
// TODO should we assert or something?
break;
@@ -1094,20 +1097,20 @@ namespace mongo {
}
continue;
}
-
+
Record *r = c->_current();
DiskLoc loc = c->currLoc();
-
+
// TODO Maybe this is unnecessary since we have seenObjects
- if ( c->getsetdup( loc ) ){
+ if ( c->getsetdup( loc ) ) {
c->advance();
continue;
}
-
+
BSONObj js(r);
-
+
BSONObj pattern = patternOrig;
-
+
if ( logop ) {
BSONObjBuilder idPattern;
BSONElement id;
@@ -1123,54 +1126,54 @@ namespace mongo {
uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
}
}
-
+
if ( profile )
ss << " nscanned:" << nscanned;
-
+
/* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
regular ones at the moment. */
if ( isOperatorUpdate ) {
-
- if ( multi ){
+
+ if ( multi ) {
c->advance(); // go to next record in case this one moves
if ( seenObjects.count( loc ) )
continue;
}
-
+
const BSONObj& onDisk = loc.obj();
-
+
ModSet * useMods = mods.get();
bool forceRewrite = false;
-
+
auto_ptr<ModSet> mymodset;
- if ( details.elemMatchKey && mods->hasDynamicArray() ){
+ if ( details.elemMatchKey && mods->hasDynamicArray() ) {
useMods = mods->fixDynamicArray( details.elemMatchKey );
mymodset.reset( useMods );
forceRewrite = true;
}
-
+
auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
-
+
bool indexHack = multi && ( modsIsIndexed || ! mss->canApplyInPlace() );
-
- if ( indexHack ){
+
+ if ( indexHack ) {
if ( cc.get() )
cc->updateLocation();
else
c->noteLocation();
}
-
- if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ){
+
+ if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ) {
mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
-
+
DEBUGUPDATE( "\t\t\t doing in place update" );
if ( profile )
ss << " fastmod ";
-
- if ( modsIsIndexed ){
+
+ if ( modsIsIndexed ) {
seenObjects.insert( loc );
}
- }
+ }
else {
if ( rs )
rs->goingToDelete( onDisk );
@@ -1182,20 +1185,20 @@ namespace mongo {
// object moved, need to make sure we don' get again
seenObjects.insert( newLoc );
}
-
+
}
-
+
if ( logop ) {
DEV assert( mods->size() );
-
+
if ( mss->haveArrayDepMod() ) {
BSONObjBuilder patternBuilder;
patternBuilder.appendElements( pattern );
mss->appendSizeSpecForArrayDepMods( patternBuilder );
- pattern = patternBuilder.obj();
+ pattern = patternBuilder.obj();
}
-
- if ( forceRewrite || mss->needOpLogRewrite() ){
+
+ if ( forceRewrite || mss->needOpLogRewrite() ) {
DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
logOp("u", ns, mss->getOpLogRewrite() , &pattern );
}
@@ -1208,13 +1211,13 @@ namespace mongo {
return UpdateResult( 1 , 1 , numModded );
if ( indexHack )
c->checkLocation();
-
- if ( nscanned % 64 == 0 && ! atomic ){
+
+ if ( nscanned % 64 == 0 && ! atomic ) {
if ( cc.get() == 0 ) {
shared_ptr< Cursor > cPtr = c;
cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
}
- if ( ! cc->yield() ){
+ if ( ! cc->yield() ) {
cc.release();
break;
}
@@ -1222,12 +1225,12 @@ namespace mongo {
break;
}
}
-
+
continue;
}
-
+
uassert( 10158 , "multi update only works with $ operators" , ! multi );
-
+
BSONElementManipulator::lookForTimestamps( updateobj );
checkNoMods( updateobj );
theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, god);
@@ -1237,14 +1240,14 @@ namespace mongo {
}
return UpdateResult( 1 , 0 , 1 );
}
-
+
if ( numModded )
return UpdateResult( 1 , 1 , numModded );
-
+
if ( profile )
ss << " nscanned:" << nscanned;
-
+
if ( upsert ) {
if ( updateobj.firstElement().fieldName()[0] == '$' ) {
/* upsert of an $inc. build a default */
@@ -1254,7 +1257,7 @@ namespace mongo {
theDataFileMgr.insertWithObjMod(ns, newObj, god);
if ( logop )
logOp( "i", ns, newObj );
-
+
return UpdateResult( 0 , 1 , 1 , newObj );
}
uassert( 10159 , "multi update only works with $ operators" , ! multi );
@@ -1269,7 +1272,7 @@ namespace mongo {
}
return UpdateResult( 0 , 0 , 0 );
}
-
+
UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug ) {
uassert( 10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0 );
if ( strstr(ns, ".system.") ) {
@@ -1278,5 +1281,5 @@ namespace mongo {
}
return _updateObjects(false, ns, updateobj, patternOrig, upsert, multi, logop, debug);
}
-
+
}
diff --git a/db/update.h b/db/update.h
index 38614afcaef..d8396b5e815 100644
--- a/db/update.h
+++ b/db/update.h
@@ -26,7 +26,7 @@ namespace mongo {
class ModState;
class ModSetState;
- /* Used for modifiers such as $inc, $set, $push, ...
+ /* Used for modifiers such as $inc, $set, $push, ...
* stores the info about a single operation
* once created should never be modified
*/
@@ -34,23 +34,23 @@ namespace mongo {
// See opFromStr below
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13
enum Op { INC, SET, PUSH, PUSH_ALL, PULL, PULL_ALL , POP, UNSET, BITAND, BITOR , BIT , ADDTOSET, RENAME_FROM, RENAME_TO } op;
-
+
static const char* modNames[];
static unsigned modNamesNum;
const char *fieldName;
const char *shortFieldName;
-
+
BSONElement elt; // x:5 note: this is the actual element from the updateobj
boost::shared_ptr<Matcher> matcher;
bool matcherOnPrimitive;
- void init( Op o , BSONElement& e ){
+ void init( Op o , BSONElement& e ) {
op = o;
elt = e;
- if ( op == PULL && e.type() == Object ){
+ if ( op == PULL && e.type() == Object ) {
BSONObj t = e.embeddedObject();
- if ( t.firstElement().getGtLtOp() == 0 ){
+ if ( t.firstElement().getGtLtOp() == 0 ) {
matcher.reset( new Matcher( t ) );
matcherOnPrimitive = false;
}
@@ -61,7 +61,7 @@ namespace mongo {
}
}
- void setFieldName( const char * s ){
+ void setFieldName( const char * s ) {
fieldName = s;
shortFieldName = strrchr( fieldName , '.' );
if ( shortFieldName )
@@ -69,13 +69,13 @@ namespace mongo {
else
shortFieldName = fieldName;
}
-
+
/**
* @param in incrememnts the actual value inside in
*/
void incrementMe( BSONElement& in ) const {
- BSONElementManipulator manip( in );
- switch ( in.type() ){
+ BSONElementManipulator manip( in );
+ switch ( in.type() ) {
case NumberDouble:
manip.setNumber( elt.numberDouble() + in.numberDouble() );
break;
@@ -87,11 +87,11 @@ namespace mongo {
break;
default:
assert(0);
- }
+ }
}
void IncrementMe( BSONElement& in ) const {
- BSONElementManipulator manip( in );
- switch ( in.type() ){
+ BSONElementManipulator manip( in );
+ switch ( in.type() ) {
case NumberDouble:
manip.SetNumber( elt.numberDouble() + in.numberDouble() );
break;
@@ -103,18 +103,18 @@ namespace mongo {
break;
default:
assert(0);
- }
+ }
}
-
+
template< class Builder >
void appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const;
-
+
bool operator<( const Mod &other ) const {
return strcmp( fieldName, other.fieldName ) < 0;
}
-
+
bool arrayDep() const {
- switch (op){
+ switch (op) {
case PUSH:
case PUSH_ALL:
case POP:
@@ -123,8 +123,8 @@ namespace mongo {
return false;
}
}
-
- static bool isIndexed( const string& fullName , const set<string>& idxKeys ){
+
+ static bool isIndexed( const string& fullName , const set<string>& idxKeys ) {
const char * fieldName = fullName.c_str();
// check if there is an index key that is a parent of mod
for( const char *dot = strchr( fieldName, '.' ); dot; dot = strchr( dot + 1, '.' ) )
@@ -141,23 +141,23 @@ namespace mongo {
return false;
}
-
+
bool isIndexed( const set<string>& idxKeys ) const {
string fullName = fieldName;
-
+
if ( isIndexed( fullName , idxKeys ) )
return true;
-
- if ( strstr( fieldName , "." ) ){
+
+ if ( strstr( fieldName , "." ) ) {
// check for a.0.1
StringBuilder buf( fullName.size() + 1 );
- for ( size_t i=0; i<fullName.size(); i++ ){
+ for ( size_t i=0; i<fullName.size(); i++ ) {
char c = fullName[i];
-
- if ( c == '$' &&
- i > 0 && fullName[i-1] == '.' &&
- i+1<fullName.size() &&
- fullName[i+1] == '.' ){
+
+ if ( c == '$' &&
+ i > 0 && fullName[i-1] == '.' &&
+ i+1<fullName.size() &&
+ fullName[i+1] == '.' ) {
i++;
continue;
}
@@ -169,10 +169,10 @@ namespace mongo {
if ( ! isdigit( fullName[i+1] ) )
continue;
-
+
bool possible = true;
size_t j=i+2;
- for ( ; j<fullName.size(); j++ ){
+ for ( ; j<fullName.size(); j++ ) {
char d = fullName[j];
if ( d == '.' )
break;
@@ -181,7 +181,7 @@ namespace mongo {
possible = false;
break;
}
-
+
if ( possible )
i = j;
}
@@ -192,25 +192,25 @@ namespace mongo {
return false;
}
-
+
template< class Builder >
void apply( Builder& b , BSONElement in , ModState& ms ) const;
-
+
/**
* @return true iff toMatch should be removed from the array
*/
bool _pullElementMatch( BSONElement& toMatch ) const;
void _checkForAppending( const BSONElement& e ) const {
- if ( e.type() == Object ){
+ if ( e.type() == Object ) {
// this is a tiny bit slow, but rare and important
// only when setting something TO an object, not setting something in an object
- // and it checks for { $set : { x : { 'a.b' : 1 } } }
+ // and it checks for { $set : { x : { 'a.b' : 1 } } }
// which is feel has been common
uassert( 12527 , "not okForStorage" , e.embeddedObject().okForStorage() );
}
}
-
+
bool isEach() const {
if ( elt.type() != Object )
return false;
@@ -223,14 +223,14 @@ namespace mongo {
BSONObj getEach() const {
return elt.embeddedObjectUserCheck().firstElement().embeddedObjectUserCheck();
}
-
+
void parseEach( BSONElementSet& s ) const {
BSONObjIterator i(getEach());
- while ( i.more() ){
+ while ( i.more() ) {
s.insert( i.next() );
}
}
-
+
const char *renameFrom() const {
massert( 13492, "mod must be RENAME_TO type", op == Mod::RENAME_TO );
return elt.fieldName();
@@ -248,7 +248,7 @@ namespace mongo {
bool _hasDynamicArray;
static void extractFields( map< string, BSONElement > &fields, const BSONElement &top, const string &base );
-
+
FieldCompareResult compare( const ModHolder::iterator &m, map< string, BSONElement >::iterator &p, const map< string, BSONElement >::iterator &pEnd ) const {
bool mDone = ( m == _mods.end() );
bool pDone = ( p == pEnd );
@@ -264,11 +264,11 @@ namespace mongo {
return compareDottedFieldNames( m->first, p->first.c_str() );
}
-
+
bool mayAddEmbedded( map< string, BSONElement > &existing, string right ) {
for( string left = EmbeddedBuilder::splitDot( right );
- left.length() > 0 && left[ left.length() - 1 ] != '.';
- left += "." + EmbeddedBuilder::splitDot( right ) ) {
+ left.length() > 0 && left[ left.length() - 1 ] != '.';
+ left += "." + EmbeddedBuilder::splitDot( right ) ) {
if ( existing.count( left ) > 0 && existing[ left ].type() != Object )
return false;
if ( haveModForField( left.c_str() ) )
@@ -278,7 +278,7 @@ namespace mongo {
}
static Mod::Op opFromStr( const char *fn ) {
assert( fn[0] == '$' );
- switch( fn[1] ){
+ switch( fn[1] ) {
case 'i': {
if ( fn[2] == 'n' && fn[3] == 'c' && fn[4] == 0 )
return Mod::INC;
@@ -290,14 +290,14 @@ namespace mongo {
break;
}
case 'p': {
- if ( fn[2] == 'u' ){
- if ( fn[3] == 's' && fn[4] == 'h' ){
+ if ( fn[2] == 'u' ) {
+ if ( fn[3] == 's' && fn[4] == 'h' ) {
if ( fn[5] == 0 )
return Mod::PUSH;
if ( fn[5] == 'A' && fn[6] == 'l' && fn[7] == 'l' && fn[8] == 0 )
return Mod::PUSH_ALL;
}
- else if ( fn[3] == 'l' && fn[4] == 'l' ){
+ else if ( fn[3] == 'l' && fn[4] == 'l' ) {
if ( fn[5] == 0 )
return Mod::PULL;
if ( fn[5] == 'A' && fn[6] == 'l' && fn[7] == 'l' && fn[8] == 0 )
@@ -314,7 +314,7 @@ namespace mongo {
break;
}
case 'b': {
- if ( fn[2] == 'i' && fn[3] == 't' ){
+ if ( fn[2] == 'i' && fn[3] == 't' ) {
if ( fn[4] == 0 )
return Mod::BIT;
if ( fn[4] == 'a' && fn[5] == 'n' && fn[6] == 'd' && fn[7] == 0 )
@@ -325,11 +325,11 @@ namespace mongo {
break;
}
case 'a': {
- if ( fn[2] == 'd' && fn[3] == 'd' ){
+ if ( fn[2] == 'd' && fn[3] == 'd' ) {
// add
if ( fn[4] == 'T' && fn[5] == 'o' && fn[6] == 'S' && fn[7] == 'e' && fn[8] == 't' && fn[9] == 0 )
return Mod::ADDTOSET;
-
+
}
break;
}
@@ -344,22 +344,22 @@ namespace mongo {
uassert( 10161 , "Invalid modifier specified " + string( fn ), false );
return Mod::INC;
}
-
- ModSet(){}
+
+ ModSet() {}
void updateIsIndexed( const Mod &m, const set<string> &idxKeys, const set<string> *backgroundKeys ) {
if ( m.isIndexed( idxKeys ) ||
- (backgroundKeys && m.isIndexed(*backgroundKeys)) ) {
+ (backgroundKeys && m.isIndexed(*backgroundKeys)) ) {
_isIndexed++;
- }
+ }
}
-
+
public:
-
- ModSet( const BSONObj &from ,
- const set<string>& idxKeys = set<string>(),
- const set<string>* backgroundKeys = 0
- );
+
+ ModSet( const BSONObj &from ,
+ const set<string>& idxKeys = set<string>(),
+ const set<string>* backgroundKeys = 0
+ );
// TODO: this is inefficient - should probably just handle when iterating
ModSet * fixDynamicArray( const char * elemMatchKey ) const;
@@ -371,7 +371,7 @@ namespace mongo {
* doesn't change or modify this ModSet or any underying Mod
*/
auto_ptr<ModSetState> prepare( const BSONObj& obj ) const;
-
+
/**
* given a query pattern, builds an object suitable for an upsert
* will take the query spec and combine all $ operators
@@ -391,15 +391,15 @@ namespace mongo {
return _mods.find( fieldName ) != _mods.end();
}
- bool haveConflictingMod( const string& fieldName ){
+ bool haveConflictingMod( const string& fieldName ) {
size_t idx = fieldName.find( '.' );
if ( idx == string::npos )
idx = fieldName.size();
-
+
ModHolder::const_iterator start = _mods.lower_bound(fieldName.substr(0,idx));
- for ( ; start != _mods.end(); start++ ){
+ for ( ; start != _mods.end(); start++ ) {
FieldCompareResult r = compareDottedFieldNames( fieldName , start->first );
- switch ( r ){
+ switch ( r ) {
case LEFT_SUBFIELD: return true;
case LEFT_BEFORE: return false;
case SAME: return true;
@@ -409,9 +409,9 @@ namespace mongo {
}
return false;
-
+
}
-
+
};
/**
@@ -423,26 +423,26 @@ namespace mongo {
BSONElement old;
BSONElement newVal;
BSONObj _objData;
-
+
const char * fixedOpName;
BSONElement * fixed;
int pushStartSize;
-
+
BSONType incType;
int incint;
double incdouble;
long long inclong;
-
+
bool dontApply;
-
- ModState(){
+
+ ModState() {
fixedOpName = 0;
fixed = 0;
pushStartSize = -1;
incType = EOO;
dontApply = false;
}
-
+
Mod::Op op() const {
return m->op;
}
@@ -450,15 +450,15 @@ namespace mongo {
const char * fieldName() const {
return m->fieldName;
}
-
+
bool needOpLogRewrite() const {
if ( dontApply )
return false;
-
+
if ( fixed || fixedOpName || incType )
return true;
-
- switch( op() ){
+
+ switch( op() ) {
case Mod::RENAME_FROM:
case Mod::RENAME_TO:
return true;
@@ -471,19 +471,19 @@ namespace mongo {
return false;
}
}
-
+
void appendForOpLog( BSONObjBuilder& b ) const;
template< class Builder >
- void apply( Builder& b , BSONElement in ){
+ void apply( Builder& b , BSONElement in ) {
m->apply( b , in , *this );
}
-
+
template< class Builder >
void appendIncValue( Builder& b , bool useFullName ) const {
const char * n = useFullName ? m->fieldName : m->shortFieldName;
- switch ( incType ){
+ switch ( incType ) {
case NumberDouble:
b.append( n , incdouble ); break;
case NumberLong:
@@ -496,11 +496,11 @@ namespace mongo {
}
string toString() const;
-
+
template< class Builder >
void handleRename( Builder &newObjBuilder, const char *shortFieldName );
};
-
+
/**
* this is used to hold state, meta data while applying a ModSet to a BSONObj
* the goal is to make ModSet const so its re-usable
@@ -516,15 +516,15 @@ namespace mongo {
ModStateHolder _mods;
bool _inPlacePossible;
BSONObj _newFromMods; // keep this data alive, as oplog generation may depend on it
-
- ModSetState( const BSONObj& obj )
- : _obj( obj ) , _inPlacePossible(true){
+
+ ModSetState( const BSONObj& obj )
+ : _obj( obj ) , _inPlacePossible(true) {
}
-
+
/**
* @return if in place is still possible
*/
- bool amIInPlacePossible( bool inPlacePossible ){
+ bool amIInPlacePossible( bool inPlacePossible ) {
if ( ! inPlacePossible )
_inPlacePossible = false;
return _inPlacePossible;
@@ -535,21 +535,21 @@ namespace mongo {
template< class Builder >
void _appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen );
-
+
template< class Builder >
- void appendNewFromMod( ModState& ms , Builder& b ){
+ void appendNewFromMod( ModState& ms , Builder& b ) {
if ( ms.dontApply ) {
return;
}
-
+
//const Mod& m = *(ms.m); // HACK
Mod& m = *((Mod*)(ms.m)); // HACK
-
- switch ( m.op ){
-
- case Mod::PUSH:
- case Mod::ADDTOSET: {
- if ( m.isEach() ){
+
+ switch ( m.op ) {
+
+ case Mod::PUSH:
+ case Mod::ADDTOSET: {
+ if ( m.isEach() ) {
b.appendArray( m.shortFieldName , m.getEach() );
}
else {
@@ -558,19 +558,19 @@ namespace mongo {
arr.done();
}
break;
- }
-
+ }
+
case Mod::PUSH_ALL: {
b.appendAs( m.elt, m.shortFieldName );
break;
- }
-
+ }
+
case Mod::UNSET:
case Mod::PULL:
case Mod::PULL_ALL:
// no-op b/c unset/pull of nothing does nothing
break;
-
+
case Mod::INC:
ms.fixedOpName = "$set";
case Mod::SET: {
@@ -582,20 +582,20 @@ namespace mongo {
case Mod::RENAME_TO:
ms.handleRename( b, m.shortFieldName );
break;
- default:
+ default:
stringstream ss;
ss << "unknown mod in appendNewFromMod: " << m.op;
throw UserException( 9015, ss.str() );
}
-
+
}
public:
-
+
bool canApplyInPlace() const {
return _inPlacePossible;
}
-
+
/**
* modified underlying _obj
* @param isOnDisk - true means this is an on disk object, and this update needs to be made durable
@@ -610,9 +610,9 @@ namespace mongo {
for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
if ( i->second.needOpLogRewrite() )
return true;
- return false;
+ return false;
}
-
+
BSONObj getOpLogRewrite() const {
BSONObjBuilder b;
for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
@@ -630,7 +630,7 @@ namespace mongo {
void appendSizeSpecForArrayDepMods( BSONObjBuilder &b ) const {
for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ ) {
const ModState& m = i->second;
- if ( m.m->arrayDep() ){
+ if ( m.m->arrayDep() ) {
if ( m.pushStartSize == -1 )
b.appendNull( m.fieldName() );
else
@@ -643,6 +643,6 @@ namespace mongo {
friend class ModSet;
};
-
+
}
diff --git a/dbtests/background_job_test.cpp b/dbtests/background_job_test.cpp
index 44190a8f145..f2bf7d86244 100644
--- a/dbtests/background_job_test.cpp
+++ b/dbtests/background_job_test.cpp
@@ -31,10 +31,10 @@ namespace BackgroundJobTests {
class IncTester : public mongo::BackgroundJob {
public:
- explicit IncTester( long long millis , bool selfDelete = false )
+ explicit IncTester( long long millis , bool selfDelete = false )
: BackgroundJob(selfDelete), _val(0), _millis(millis) { GLOBAL_val = 0; }
- void waitAndInc( long long millis ){
+ void waitAndInc( long long millis ) {
if ( millis )
mongo::sleepmillis( millis );
++_val;
@@ -48,14 +48,14 @@ namespace BackgroundJobTests {
string name() const { return "IncTester"; }
void run() { waitAndInc( _millis ); }
-
+
private:
int _val;
long long _millis;
};
- class NormalCase {
+ class NormalCase {
public:
void run() {
IncTester tester( 0 /* inc without wait */ );
@@ -85,20 +85,20 @@ namespace BackgroundJobTests {
BackgroundJob* j = new IncTester( 0 /* inc without wait */ , true /* self delete */ );
j->go();
-
- // the background thread should have continued running and this test should pass the
+
+ // the background thread should have continued running and this test should pass the
// heap-checker as well
mongo::sleepmillis( 1000 );
ASSERT_EQUALS( GLOBAL_val, 1 );
}
};
-
- class BackgroundJobSuite : public Suite{
+
+ class BackgroundJobSuite : public Suite {
public:
- BackgroundJobSuite() : Suite( "background_job" ){}
+ BackgroundJobSuite() : Suite( "background_job" ) {}
- void setupTests(){
+ void setupTests() {
add< NormalCase >();
add< TimeOutCase >();
add< SelfDeletingCase >();
diff --git a/dbtests/balancer_policy_tests.cpp b/dbtests/balancer_policy_tests.cpp
index 36010719aaf..6f7c4a5dcd3 100644
--- a/dbtests/balancer_policy_tests.cpp
+++ b/dbtests/balancer_policy_tests.cpp
@@ -35,7 +35,7 @@ namespace BalancerPolicyTests {
class SizeMaxedShardTest {
public:
- void run(){
+ void run() {
BSONObj shard0 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) );
ASSERT( ! BalancerPolicy::isSizeMaxed( shard0 ) );
@@ -52,7 +52,7 @@ namespace BalancerPolicyTests {
class DrainingShardTest {
public:
- void run(){
+ void run() {
BSONObj shard0 = BSON( sf::draining(true) );
ASSERT( BalancerPolicy::isDraining( shard0 ) );
@@ -66,7 +66,7 @@ namespace BalancerPolicyTests {
class BalanceNormalTest {
public:
- void run(){
+ void run() {
// 2 chunks and 0 chunk shards
BalancerPolicy::ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
@@ -84,7 +84,7 @@ namespace BalancerPolicyTests {
BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) << lf::hasOpsQueued(false) );
limitsMap["shard0"] = limits0;
limitsMap["shard1"] = limits1;
-
+
BalancerPolicy::ChunkInfo* c = NULL;
c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 1 );
ASSERT( c );
@@ -93,7 +93,7 @@ namespace BalancerPolicyTests {
class BalanceDrainingTest {
public:
- void run(){
+ void run() {
// one normal, one draining
// 2 chunks and 0 chunk shards
BalancerPolicy::ShardToChunksMap chunkMap;
@@ -112,7 +112,7 @@ namespace BalancerPolicyTests {
BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) );
limitsMap["shard0"] = limits0;
limitsMap["shard1"] = limits1;
-
+
BalancerPolicy::ChunkInfo* c = NULL;
c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
ASSERT( c );
@@ -124,7 +124,7 @@ namespace BalancerPolicyTests {
class BalanceEndedDrainingTest {
public:
- void run(){
+ void run() {
// 2 chunks and 0 chunk (drain completed) shards
BalancerPolicy::ShardToChunksMap chunkMap;
vector<BSONObj> chunks;
@@ -142,7 +142,7 @@ namespace BalancerPolicyTests {
BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(true) );
limitsMap["shard0"] = limits0;
limitsMap["shard1"] = limits1;
-
+
BalancerPolicy::ChunkInfo* c = NULL;
c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
ASSERT( ! c );
@@ -151,7 +151,7 @@ namespace BalancerPolicyTests {
class BalanceImpasseTest {
public:
- void run(){
+ void run() {
// one maxed out, one draining
// 2 chunks and 0 chunk shards
BalancerPolicy::ShardToChunksMap chunkMap;
@@ -172,7 +172,7 @@ namespace BalancerPolicyTests {
limitsMap["shard0"] = limits0;
limitsMap["shard1"] = limits1;
limitsMap["shard2"] = limits2;
-
+
BalancerPolicy::ChunkInfo* c = NULL;
c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
ASSERT( ! c );
@@ -186,10 +186,10 @@ namespace BalancerPolicyTests {
class All : public Suite {
public:
- All() : Suite( "balancer_policy" ){
+ All() : Suite( "balancer_policy" ) {
}
- void setupTests(){
+ void setupTests() {
// TODO SERVER-1822
// add< SizeMaxedShardTest >();
// add< DrainingShardTest >();
@@ -197,7 +197,7 @@ namespace BalancerPolicyTests {
// add< BalanceDrainingTest >();
// add< BalanceEndedDrainingTest >();
// add< BalanceImpasseTest >();
- }
- } allTests;
-
+ }
+ } allTests;
+
} // namespace BalancerPolicyTests
diff --git a/dbtests/basictests.cpp b/dbtests/basictests.cpp
index a417f05b28c..6b802db0092 100644
--- a/dbtests/basictests.cpp
+++ b/dbtests/basictests.cpp
@@ -51,21 +51,21 @@ namespace BasicTests {
RARELY ++c;
}
};
-
+
class Base64Tests {
public:
-
- void roundTrip( string s ){
+
+ void roundTrip( string s ) {
ASSERT_EQUALS( s , base64::decode( base64::encode( s ) ) );
}
-
- void roundTrip( const unsigned char * _data , int len ){
+
+ void roundTrip( const unsigned char * _data , int len ) {
const char *data = (const char *) _data;
string s = base64::encode( data , len );
string out = base64::decode( s );
ASSERT_EQUALS( out.size() , static_cast<size_t>(len) );
bool broke = false;
- for ( int i=0; i<len; i++ ){
+ for ( int i=0; i<len; i++ ) {
if ( data[i] != out[i] )
broke = true;
}
@@ -79,16 +79,16 @@ namespace BasicTests {
for ( int i=0; i<len; i++ )
cout << hex << ( out[i] & 0xFF ) << dec << " ";
cout << endl;
-
+
ASSERT(0);
}
-
- void run(){
+
+ void run() {
ASSERT_EQUALS( "ZWxp" , base64::encode( "eli" , 3 ) );
ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" , 6 ) );
ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" ) );
-
+
ASSERT_EQUALS( "ZQ==" , base64::encode( "e" , 1 ) );
ASSERT_EQUALS( "ZWw=" , base64::encode( "el" , 2 ) );
@@ -99,10 +99,10 @@ namespace BasicTests {
roundTrip( "eliot" );
roundTrip( "eliots" );
roundTrip( "eliotsz" );
-
+
unsigned char z[] = { 0x1 , 0x2 , 0x3 , 0x4 };
roundTrip( z , 4 );
-
+
unsigned char y[] = {
0x01, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
@@ -117,15 +117,15 @@ namespace BasicTests {
namespace stringbuildertests {
#define SBTGB(x) ss << (x); sb << (x);
-
+
class Base {
virtual void pop() = 0;
-
+
public:
- Base(){}
- virtual ~Base(){}
+ Base() {}
+ virtual ~Base() {}
- void run(){
+ void run() {
pop();
ASSERT_EQUALS( ss.str() , sb.str() );
}
@@ -133,9 +133,9 @@ namespace BasicTests {
stringstream ss;
StringBuilder sb;
};
-
+
class simple1 : public Base {
- void pop(){
+ void pop() {
SBTGB(1);
SBTGB("yo");
SBTGB(2);
@@ -143,7 +143,7 @@ namespace BasicTests {
};
class simple2 : public Base {
- void pop(){
+ void pop() {
SBTGB(1);
SBTGB("yo");
SBTGB(2);
@@ -156,10 +156,10 @@ namespace BasicTests {
SBTGB( (short)(1231231231231LL) );
}
};
-
+
class reset1 {
public:
- void run(){
+ void run() {
StringBuilder sb;
sb << "1" << "abc" << "5.17";
ASSERT_EQUALS( "1abc5.17" , sb.str() );
@@ -173,7 +173,7 @@ namespace BasicTests {
class reset2 {
public:
- void run(){
+ void run() {
StringBuilder sb;
sb << "1" << "abc" << "5.17";
ASSERT_EQUALS( "1abc5.17" , sb.str() );
@@ -190,7 +190,7 @@ namespace BasicTests {
class sleeptest {
public:
- void run(){
+ void run() {
Timer t;
sleepsecs( 1 );
ASSERT_EQUALS( 1 , t.seconds() );
@@ -204,17 +204,17 @@ namespace BasicTests {
sleepmillis( 1727 );
ASSERT( t.millis() >= 1000 );
ASSERT( t.millis() <= 2500 );
-
+
{
int total = 1200;
int ms = 2;
t.reset();
- for ( int i=0; i<(total/ms); i++ ){
+ for ( int i=0; i<(total/ms); i++ ) {
sleepmillis( ms );
}
{
int x = t.millis();
- if ( x < 1000 || x > 2500 ){
+ if ( x < 1000 || x > 2500 ) {
cout << "sleeptest x: " << x << endl;
ASSERT( x >= 1000 );
ASSERT( x <= 20000 );
@@ -228,12 +228,12 @@ namespace BasicTests {
int micros = 100;
t.reset();
int numSleeps = 1000*(total/micros);
- for ( int i=0; i<numSleeps; i++ ){
+ for ( int i=0; i<numSleeps; i++ ) {
sleepmicros( micros );
}
{
int y = t.millis();
- if ( y < 1000 || y > 2500 ){
+ if ( y < 1000 || y > 2500 ) {
cout << "sleeptest y: " << y << endl;
ASSERT( y >= 1000 );
/* ASSERT( y <= 100000 ); */
@@ -241,9 +241,9 @@ namespace BasicTests {
}
}
#endif
-
+
}
-
+
};
class AssertTests {
@@ -251,15 +251,15 @@ namespace BasicTests {
int x;
- AssertTests(){
+ AssertTests() {
x = 0;
}
- string foo(){
+ string foo() {
x++;
return "";
}
- void run(){
+ void run() {
uassert( -1 , foo() , 1 );
if( x != 0 ) {
ASSERT_EQUALS( 0 , x );
@@ -267,7 +267,7 @@ namespace BasicTests {
try {
uassert( -1 , foo() , 0 );
}
- catch ( ... ){}
+ catch ( ... ) {}
ASSERT_EQUALS( 1 , x );
}
};
@@ -275,13 +275,13 @@ namespace BasicTests {
namespace ArrayTests {
class basic1 {
public:
- void run(){
+ void run() {
FastArray<int> a(100);
a.push_back( 5 );
a.push_back( 6 );
-
+
ASSERT_EQUALS( 2 , a.size() );
-
+
FastArray<int>::iterator i = a.begin();
ASSERT( i != a.end() );
ASSERT_EQUALS( 5 , *i );
@@ -293,10 +293,10 @@ namespace BasicTests {
}
};
};
-
+
class ThreadSafeStringTest {
public:
- void run(){
+ void run() {
ThreadSafeString s;
s = "eliot";
ASSERT_EQUALS( s , "eliot" );
@@ -304,8 +304,8 @@ namespace BasicTests {
ThreadSafeString s2 = s;
ASSERT_EQUALS( s2 , "eliot" );
-
-
+
+
{
string foo;
{
@@ -317,11 +317,11 @@ namespace BasicTests {
}
}
};
-
+
class LexNumCmp {
public:
void run() {
-
+
ASSERT( ! isNumber( (char)255 ) );
ASSERT_EQUALS( 0, lexNumCmp( "a", "a" ) );
@@ -357,7 +357,7 @@ namespace BasicTests {
ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
ASSERT_EQUALS( 1, lexNumCmp("21", "11") );
ASSERT_EQUALS( -1, lexNumCmp("11", "21") );
-
+
ASSERT_EQUALS( -1 , lexNumCmp( "a.0" , "a.1" ) );
ASSERT_EQUALS( -1 , lexNumCmp( "a.0.b" , "a.1" ) );
@@ -365,39 +365,39 @@ namespace BasicTests {
ASSERT_EQUALS( -1 , lexNumCmp( "b.0e" , (string("b.") + (char)255).c_str() ) );
ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.0e" ) );
- ASSERT_EQUALS( 0, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234"));
- ASSERT_EQUALS( 0, lexNumCmp( "000238947219478347782934718234", "238947219478347782934718234"));
- ASSERT_EQUALS( 1, lexNumCmp( "000238947219478347782934718235", "238947219478347782934718234"));
- ASSERT_EQUALS( -1, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234.1"));
- ASSERT_EQUALS( 0, lexNumCmp( "238", "000238"));
- ASSERT_EQUALS( 0, lexNumCmp( "002384", "0002384"));
- ASSERT_EQUALS( 0, lexNumCmp( "00002384", "0002384"));
- ASSERT_EQUALS( 0, lexNumCmp( "0", "0"));
- ASSERT_EQUALS( 0, lexNumCmp( "0000", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "000238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 1, lexNumCmp( "000238947219478347782934718235", "238947219478347782934718234"));
+ ASSERT_EQUALS( -1, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "238", "000238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "00002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000", "0"));
ASSERT_EQUALS( 0, lexNumCmp( "0", "000"));
ASSERT_EQUALS( -1, lexNumCmp( "0000", "0.0"));
- ASSERT_EQUALS( 1, lexNumCmp( "2380", "238"));
- ASSERT_EQUALS( 1, lexNumCmp( "2385", "2384"));
- ASSERT_EQUALS( 1, lexNumCmp( "2385", "02384"));
- ASSERT_EQUALS( 1, lexNumCmp( "2385", "002384"));
- ASSERT_EQUALS( -1, lexNumCmp( "123.234.4567", "00238"));
- ASSERT_EQUALS( 0, lexNumCmp( "123.234", "00123.234"));
- ASSERT_EQUALS( 0, lexNumCmp( "a.123.b", "a.00123.b"));
- ASSERT_EQUALS( 1, lexNumCmp( "a.123.b", "a.b.00123.b"));
- ASSERT_EQUALS( -1, lexNumCmp( "a.00.0", "a.0.1"));
- ASSERT_EQUALS( 0, lexNumCmp( "01.003.02", "1.3.2"));
- ASSERT_EQUALS( -1, lexNumCmp( "1.3.2", "10.300.20"));
- ASSERT_EQUALS( 0, lexNumCmp( "10.300.20", "000000000000010.0000300.000000020"));
- ASSERT_EQUALS( 0, lexNumCmp( "0000a", "0a"));
- ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
- ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
- ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2380", "238"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "2384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "02384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "002384"));
+ ASSERT_EQUALS( -1, lexNumCmp( "123.234.4567", "00238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "123.234", "00123.234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "a.123.b", "a.00123.b"));
+ ASSERT_EQUALS( 1, lexNumCmp( "a.123.b", "a.b.00123.b"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a.00.0", "a.0.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "01.003.02", "1.3.2"));
+ ASSERT_EQUALS( -1, lexNumCmp( "1.3.2", "10.300.20"));
+ ASSERT_EQUALS( 0, lexNumCmp( "10.300.20", "000000000000010.0000300.000000020"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
+ ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
}
};
class DatabaseValidNames {
public:
- void run(){
+ void run() {
ASSERT( Database::validDBName( "foo" ) );
ASSERT( ! Database::validDBName( "foo/bar" ) );
ASSERT( ! Database::validDBName( "foo.bar" ) );
@@ -410,14 +410,14 @@ namespace BasicTests {
class DatabaseOwnsNS {
public:
- void run(){
-
+ void run() {
+
bool isNew = false;
// this leaks as ~Database is private
// if that changes, should put this on the stack
Database * db = new Database( "dbtests_basictests_ownsns" , isNew );
assert( isNew );
-
+
ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x" ) );
ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x.y" ) );
ASSERT( ! db->ownsNS( "dbtests_basictests_ownsn.x.y" ) );
@@ -427,16 +427,16 @@ namespace BasicTests {
class NSValidNames {
public:
- void run(){
+ void run() {
ASSERT( isValidNS( "test.foo" ) );
ASSERT( ! isValidNS( "test." ) );
ASSERT( ! isValidNS( "test" ) );
}
};
-
+
class PtrTests {
public:
- void run(){
+ void run() {
scoped_ptr<int> p1 (new int(1));
boost::shared_ptr<int> p2 (new int(2));
scoped_ptr<const int> p3 (new int(3));
@@ -447,7 +447,7 @@ namespace BasicTests {
ASSERT_EQUALS( p2.get() , ptr<int>(p2) );
ASSERT_EQUALS( p2.get() , ptr<int>(p2.get()) ); // T* constructor
ASSERT_EQUALS( p2.get() , ptr<int>(ptr<int>(p2)) ); // copy constructor
- ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
ASSERT_EQUALS( p2.get() , ptr<boost::shared_ptr<int> >(&p2)->get() ); // operator->
//const
@@ -459,14 +459,14 @@ namespace BasicTests {
ASSERT_EQUALS( p4.get() , ptr<const int>(p4.get()) );
ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<const int>(p2)) );
ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<int>(p2)) ); // constizing copy constructor
- ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
ASSERT_EQUALS( p2.get() , ptr<const boost::shared_ptr<int> >(&p2)->get() );
//bool context
ASSERT( ptr<int>(p1) );
ASSERT( !ptr<int>(NULL) );
ASSERT( !ptr<int>() );
-
+
#if 0
// These shouldn't compile
ASSERT_EQUALS( p3.get() , ptr<int>(p3) );
@@ -478,12 +478,12 @@ namespace BasicTests {
struct StringSplitterTest {
- void test( string s ){
+ void test( string s ) {
vector<string> v = StringSplitter::split( s , "," );
ASSERT_EQUALS( s , StringSplitter::join( v , "," ) );
}
- void run(){
+ void run() {
test( "a" );
test( "a,b" );
test( "a,b,c" );
@@ -526,7 +526,7 @@ namespace BasicTests {
class QueueTest {
public:
- void run(){
+ void run() {
BlockingQueue<int> q;
Timer t;
int x;
@@ -538,32 +538,32 @@ namespace BasicTests {
class StrTests {
public:
-
- void run(){
+
+ void run() {
ASSERT_EQUALS( 1u , str::count( "abc" , 'b' ) );
ASSERT_EQUALS( 3u , str::count( "babab" , 'b' ) );
}
-
+
};
-
+
class HostAndPortTests {
public:
- void run(){
+ void run() {
HostAndPort a( "x1" , 1000 );
HostAndPort b( "x1" , 1000 );
HostAndPort c( "x1" , 1001 );
HostAndPort d( "x2" , 1000 );
-
+
ASSERT( a == b );
ASSERT( a != c );
ASSERT( a != d );
-
+
}
};
class RelativePathTest {
public:
- void run(){
+ void run() {
RelativePath a = RelativePath::fromRelativePath( "a" );
RelativePath b = RelativePath::fromRelativePath( "a" );
RelativePath c = RelativePath::fromRelativePath( "b" );
@@ -579,13 +579,13 @@ namespace BasicTests {
class All : public Suite {
public:
- All() : Suite( "basic" ){
+ All() : Suite( "basic" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< Rarely >();
add< Base64Tests >();
-
+
add< stringbuildertests::simple1 >();
add< stringbuildertests::simple2 >();
add< stringbuildertests::reset1 >();
@@ -593,7 +593,7 @@ namespace BasicTests {
add< sleeptest >();
add< AssertTests >();
-
+
add< ArrayTests::basic1 >();
add< LexNumCmp >();
@@ -610,11 +610,11 @@ namespace BasicTests {
add< QueueTest >();
add< StrTests >();
-
+
add< HostAndPortTests >();
add< RelativePathTest >();
}
} myall;
-
+
} // namespace BasicTests
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index 50e17ac64e2..d29f460e645 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -33,8 +33,8 @@ namespace BtreeTests {
// dummy, valid record loc
const DiskLoc recordLoc() {
return DiskLoc( 0, 2 );
- }
-
+ }
+
class Ensure {
public:
Ensure() {
@@ -46,11 +46,11 @@ namespace BtreeTests {
private:
DBDirectClient _c;
};
-
+
class Base : public Ensure {
public:
- Base() :
- _context( ns() ) {
+ Base() :
+ _context( ns() ) {
{
bool f = false;
assert( f = true );
@@ -64,9 +64,9 @@ namespace BtreeTests {
string val( len, ' ' );
for( int i = 0; i < len; ++i ) {
val[ i ] = sub[ i % 16 ];
- }
+ }
return val;
- }
+ }
protected:
const BtreeBucket* bt() {
return id().head.btree();
@@ -125,7 +125,8 @@ namespace BtreeTests {
DiskLoc d;
if ( i == b->nKeys() ) {
d = b->getNextChild();
- } else {
+ }
+ else {
d = const_cast< DiskLoc& >( b->keyNode( i ).prevChildBucket );
}
assert( !d.isNull() );
@@ -140,8 +141,8 @@ namespace BtreeTests {
BSONObj key = BSON( "" << k );
// log() << "key: " << key << endl;
ASSERT( present( key, 1 ) );
- ASSERT( present( key, -1 ) );
- }
+ ASSERT( present( key, -1 ) );
+ }
private:
dblock lk_;
Client::Context _context;
@@ -206,7 +207,7 @@ namespace BtreeTests {
}
virtual void checkSplit() {
ASSERT_EQUALS( 15, child( bt(), 0 )->nKeys() );
- ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
+ ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
}
};
@@ -220,7 +221,7 @@ namespace BtreeTests {
}
virtual void checkSplit() {
ASSERT_EQUALS( 4, child( bt(), 0 )->nKeys() );
- ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
+ ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
}
};
@@ -277,7 +278,7 @@ namespace BtreeTests {
}
void insert( int i ) {
BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
+ Base::insert( k );
}
};
@@ -299,10 +300,10 @@ namespace BtreeTests {
}
void insert( int i ) {
BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
+ Base::insert( k );
+ }
};
-
+
class DontReuseUnused : public Base {
public:
void run() {
@@ -321,10 +322,10 @@ namespace BtreeTests {
}
void insert( int i ) {
BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
+ Base::insert( k );
+ }
};
-
+
class PackUnused : public Base {
public:
void run() {
@@ -348,8 +349,9 @@ namespace BtreeTests {
while( c->ok() ) {
if ( !c->currKeyNode().prevChildBucket.isNull() ) {
toDel.push_back( c->currKey().firstElement().valuestr() );
- } else {
- other.push_back( c->currKey().firstElement().valuestr() );
+ }
+ else {
+ other.push_back( c->currKey().firstElement().valuestr() );
}
c->advance();
}
@@ -369,21 +371,21 @@ namespace BtreeTests {
for ( long long i = 50000; i < 50100; ++i ) {
insert( i );
- }
+ }
int unused2 = 0;
ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
-//
+//
ASSERT( unused2 <= unused );
}
protected:
void insert( long long n ) {
string val = bigNumString( n );
BSONObj k = BSON( "a" << val );
- Base::insert( k );
- }
+ Base::insert( k );
+ }
};
class DontDropReferenceKey : public PackUnused {
@@ -393,7 +395,7 @@ namespace BtreeTests {
for ( long long i = 0; i < 80; i += 1 ) {
insert( i );
}
-
+
BSONObjBuilder start;
start.appendMinKey( "a" );
BSONObjBuilder end;
@@ -440,8 +442,8 @@ namespace BtreeTests {
}
void insert( int i ) {
BSONObj k = key( 'b' + 2 * i );
- Base::insert( k );
- }
+ Base::insert( k );
+ }
virtual int unindexKeys() = 0;
};
@@ -471,7 +473,7 @@ namespace BtreeTests {
// class MergeBucketsHead : public MergeBuckets {
// virtual BSONObj unindexKey() { return key( 'p' ); }
// };
-
+
class MergeBucketsDontReplaceHead : public Base {
public:
void run() {
@@ -494,8 +496,8 @@ namespace BtreeTests {
}
void insert( int i ) {
BSONObj k = key( 'a' + i );
- Base::insert( k );
- }
+ Base::insert( k );
+ }
};
// Tool to construct custom trees for tests.
@@ -530,7 +532,8 @@ namespace BtreeTests {
}
if ( e.fieldName() == string( "_" ) ) {
n->setNext( child );
- } else {
+ }
+ else {
n->push( BSON( "" << expectedKey( e.fieldName() ) ), child );
}
}
@@ -570,19 +573,21 @@ namespace BtreeTests {
ASSERT_EQUALS( expected, kn.key.firstElement().valuestr() );
if ( kn.prevChildBucket.isNull() ) {
ASSERT( e.type() == jstNULL );
- } else {
+ }
+ else {
ASSERT( e.type() == Object );
checkStructure( e.embeddedObject(), id, kn.prevChildBucket );
}
}
if ( n->nextChild.isNull() ) {
// maybe should allow '_' field with null value?
- ASSERT( !j.more() );
- } else {
+ ASSERT( !j.more() );
+ }
+ else {
BSONElement e = j.next();
ASSERT_EQUALS( string( "_" ), e.fieldName() );
ASSERT( e.type() == Object );
- checkStructure( e.embeddedObject(), id, n->nextChild );
+ checkStructure( e.embeddedObject(), id, n->nextChild );
}
ASSERT( !j.more() );
}
@@ -594,7 +599,7 @@ namespace BtreeTests {
bool found;
id.head.btree()->locate( id, id.head, key, Ordering::make(id.keyPattern()), pos, found, recordLoc(), direction );
return found;
- }
+ }
int headerSize() const { return BtreeBucket::headerSize(); }
int packedDataSize( int pos ) const { return BtreeBucket::packedDataSize( pos ); }
void fixParentPtrs( const DiskLoc &thisLoc ) { BtreeBucket::fixParentPtrs( thisLoc ); }
@@ -611,11 +616,11 @@ namespace BtreeTests {
* We could probably refactor the following tests, but it's easier to debug
* them in the present state.
*/
-
+
class MergeBucketsDelInternal : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
@@ -633,30 +638,30 @@ namespace BtreeTests {
class MergeBucketsRightNull : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "bb" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
- }
+ }
};
-
+
// not yet handling this case
class DontMergeSingleBucket : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
BSONObj k = BSON( "" << "c" );
assert( unindex( k ) );
// dump();
@@ -669,12 +674,12 @@ namespace BtreeTests {
class ParentMergeNonRightToLeft : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "bb" );
assert( unindex( k ) );
// dump();
@@ -682,18 +687,18 @@ namespace BtreeTests {
// child does not currently replace parent in this case
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
- }
+ }
};
class ParentMergeNonRightToRight : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "ff" );
assert( unindex( k ) );
// dump();
@@ -701,108 +706,108 @@ namespace BtreeTests {
// child does not currently replace parent in this case
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
- }
+ }
};
-
+
class CantMergeRightNoMerge : public Base {
public:
void run() {
- ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "bb" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
- }
+ }
};
class CantMergeLeftNoMerge : public Base {
public:
void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
+ ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "g" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
- }
+ }
};
class MergeOption : public Base {
public:
void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "ee" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
- }
+ }
};
class ForceMergeLeft : public Base {
public:
void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "ee" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
- }
- };
+ }
+ };
class ForceMergeRight : public Base {
public:
void run() {
- ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+ ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "ee" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
- }
- };
-
+ }
+ };
+
class RecursiveMerge : public Base {
public:
void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "c" );
assert( unindex( k ) );
// dump();
@@ -810,36 +815,36 @@ namespace BtreeTests {
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
// height is not currently reduced in this case
ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
- }
+ }
};
-
+
class RecursiveMergeRightBucket : public Base {
public:
void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "c" );
assert( unindex( k ) );
// dump();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
- }
+ }
};
class RecursiveMergeDoubleRightBucket : public Base {
public:
void run() {
- ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
// dump();
string ns = id().indexNamespace();
ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
-
+
BSONObj k = BSON( "" << "c" );
assert( unindex( k ) );
// dump();
@@ -847,9 +852,9 @@ namespace BtreeTests {
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
// no recursion currently in this case
ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
- }
+ }
};
-
+
class MergeSizeBase : public Base {
public:
MergeSizeBase() : _count() {}
@@ -866,7 +871,7 @@ namespace BtreeTests {
root->setNext( right );
A* r = A::is( right );
root->fixParentPtrs( dl() );
-
+
ASSERT_EQUALS( bigSize(), bigSize() / 2 * 2 );
fillToExactSize( l, leftSize(), 'a' );
fillToExactSize( r, rightSize(), 'n' );
@@ -887,7 +892,7 @@ namespace BtreeTests {
_count += leftAdditional() + rightAdditional();
// dump();
-
+
initCheck();
string ns = id().indexNamespace();
const char *keys = delKeys();
@@ -901,18 +906,19 @@ namespace BtreeTests {
// dump();
--_count;
}
-
+
// dump();
-
+
int unused = 0;
ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
validate();
if ( !merge() ) {
- ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- } else {
- ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- }
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ else {
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ }
}
protected:
virtual int leftAdditional() const { return 2; }
@@ -957,7 +963,7 @@ namespace BtreeTests {
}
int _count;
};
-
+
class MergeSizeJustRightRight : public MergeSizeBase {
protected:
virtual int rightSize() const { return BtreeBucket::getLowWaterMark() - 1; }
@@ -975,12 +981,12 @@ namespace BtreeTests {
virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() - 1; }
virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
};
-
+
class MergeSizeLeft : public MergeSizeJustRightLeft {
virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() - 1; }
- };
-
+ };
+
class NoMergeBelowMarkRight : public MergeSizeJustRightRight {
virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() - 1; }
@@ -992,7 +998,7 @@ namespace BtreeTests {
virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
virtual bool merge() const { return false; }
};
-
+
class MergeSizeRightTooBig : public MergeSizeJustRightLeft {
virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
virtual bool merge() const { return false; }
@@ -1002,7 +1008,7 @@ namespace BtreeTests {
virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
virtual bool merge() const { return false; }
};
-
+
class BalanceOneLeftToRight : public Base {
public:
void run() {
@@ -1016,7 +1022,7 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
}
};
@@ -1033,7 +1039,7 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
}
};
@@ -1050,7 +1056,7 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
}
};
@@ -1067,7 +1073,7 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
}
};
@@ -1084,8 +1090,8 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
- }
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
};
class PackEmpty : public Base {
@@ -1115,7 +1121,7 @@ namespace BtreeTests {
}
};
};
-
+
class PackedDataSizeEmpty : public Base {
public:
void run() {
@@ -1137,9 +1143,9 @@ namespace BtreeTests {
ASSERT_EQUALS( 0, t->packedDataSize( zero ) );
ASSERT( !( t->flags & Packed ) );
}
- };
+ };
};
-
+
class BalanceSingleParentKeyPackParent : public Base {
public:
void run() {
@@ -1155,8 +1161,8 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
- }
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
};
class BalanceSplitParent : public Base {
@@ -1172,8 +1178,8 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
- }
+ ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
+ }
};
class RebalancedSeparatorBase : public Base {
@@ -1192,7 +1198,7 @@ namespace BtreeTests {
}
};
};
-
+
class EvenRebalanceLeft : public RebalancedSeparatorBase {
virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null,$6:null},_:{$8:null,$9:null,$10$31e:null}}"; }
virtual int expectedSeparator() const { return 4; }
@@ -1202,7 +1208,7 @@ namespace BtreeTests {
virtual string treeSpec() const { return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},_:{$7:null,$8:null,$9$31e:null,$10:null}}"; }
virtual int expectedSeparator() const { return 4; }
};
-
+
class EvenRebalanceRight : public RebalancedSeparatorBase {
virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
virtual int expectedSeparator() const { return 4; }
@@ -1212,12 +1218,12 @@ namespace BtreeTests {
virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
virtual int expectedSeparator() const { return 4; }
};
-
+
class EvenRebalanceCenter : public RebalancedSeparatorBase {
virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
- virtual int expectedSeparator() const { return 4; }
+ virtual int expectedSeparator() const { return 4; }
};
-
+
class OddRebalanceLeft : public RebalancedSeparatorBase {
virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
virtual int expectedSeparator() const { return 4; }
@@ -1232,7 +1238,7 @@ namespace BtreeTests {
virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
virtual int expectedSeparator() const { return 4; }
};
-
+
class RebalanceEmptyRight : public RebalancedSeparatorBase {
virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
virtual void modTree() {
@@ -1250,7 +1256,7 @@ namespace BtreeTests {
}
virtual int expectedSeparator() const { return 4; }
};
-
+
class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight {
virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key; }
@@ -1275,14 +1281,14 @@ namespace BtreeTests {
protected:
BSONObj _oldTop;
};
-
+
class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft {
virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize(); }
virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
// different top means we rebalanced
virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key ) ); }
};
-
+
class PreferBalanceLeft : public Base {
public:
void run() {
@@ -1296,8 +1302,8 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
- }
+ ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
+ }
};
class PreferBalanceRight : public Base {
@@ -1313,8 +1319,8 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
- }
+ ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ }
};
class RecursiveMergeThenBalance : public Base {
@@ -1330,10 +1336,10 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
}
};
-
+
class MergeRightEmpty : public MergeSizeBase {
protected:
virtual int rightAdditional() const { return 1; }
@@ -1341,7 +1347,7 @@ namespace BtreeTests {
virtual const char * delKeys() const { return "lz"; }
virtual int rightSize() const { return 0; }
virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
- };
+ };
class MergeMinRightEmpty : public MergeSizeBase {
protected:
@@ -1350,8 +1356,8 @@ namespace BtreeTests {
virtual const char * delKeys() const { return "z"; }
virtual int rightSize() const { return 0; }
virtual int leftSize() const { return bigSize() + sizeof( _KeyNode ); }
- };
-
+ };
+
class MergeLeftEmpty : public MergeSizeBase {
protected:
virtual int rightAdditional() const { return 1; }
@@ -1359,7 +1365,7 @@ namespace BtreeTests {
virtual const char * delKeys() const { return "zl"; }
virtual int leftSize() const { return 0; }
virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
- };
+ };
class MergeMinLeftEmpty : public MergeSizeBase {
protected:
@@ -1368,8 +1374,8 @@ namespace BtreeTests {
virtual const char * delKeys() const { return "l"; }
virtual int leftSize() const { return 0; }
virtual int rightSize() const { return bigSize() + sizeof( _KeyNode ); }
- };
-
+ };
+
class BalanceRightEmpty : public MergeRightEmpty {
protected:
virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
@@ -1403,7 +1409,7 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:null}", id() );
+ ArtificialTree::checkStructure( "{b:null}", id() );
}
};
@@ -1420,10 +1426,10 @@ namespace BtreeTests {
// dump();
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
+ ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
}
};
-
+
class DelInternal : public Base {
public:
void run() {
@@ -1440,10 +1446,10 @@ namespace BtreeTests {
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
}
};
-
+
class DelInternalReplaceWithUnused : public Base {
public:
void run() {
@@ -1463,10 +1469,10 @@ namespace BtreeTests {
ASSERT_EQUALS( 1, unused );
ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
// doesn't discriminate between used and unused
- ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
}
};
-
+
class DelInternalReplaceRight : public Base {
public:
void run() {
@@ -1484,10 +1490,10 @@ namespace BtreeTests {
ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{b:null}", id() );
+ ArtificialTree::checkStructure( "{b:null}", id() );
}
};
-
+
class DelInternalPromoteKey : public Base {
public:
void run() {
@@ -1505,7 +1511,7 @@ namespace BtreeTests {
ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
+ ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
}
};
@@ -1526,10 +1532,10 @@ namespace BtreeTests {
ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
+ ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
}
};
-
+
class DelInternalReplacementPrevNonNull : public Base {
public:
void run() {
@@ -1546,10 +1552,10 @@ namespace BtreeTests {
ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
+ ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
}
- };
+ };
class DelInternalReplacementNextNonNull : public Base {
public:
@@ -1567,11 +1573,11 @@ namespace BtreeTests {
ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 1, unused );
ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
}
};
-
+
class DelInternalSplitPromoteLeft : public Base {
public:
void run() {
@@ -1588,7 +1594,7 @@ namespace BtreeTests {
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
+ ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
}
};
@@ -1608,16 +1614,16 @@ namespace BtreeTests {
ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
ASSERT_EQUALS( 0, unused );
ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
- ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
+ ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "btree" ){
+ All() : Suite( "btree" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< Create >();
add< SimpleInsertDelete >();
add< SplitRightHeavyBucket >();
diff --git a/dbtests/clienttests.cpp b/dbtests/clienttests.cpp
index 58287e90bde..317aad92365 100644
--- a/dbtests/clienttests.cpp
+++ b/dbtests/clienttests.cpp
@@ -20,40 +20,40 @@
#include "../client/dbclient.h"
#include "dbtests.h"
#include "../db/concurrency.h"
-
+
namespace ClientTests {
-
+
class Base {
public:
-
- Base( string coll ){
+
+ Base( string coll ) {
_ns = (string)"test." + coll;
}
-
- virtual ~Base(){
+
+ virtual ~Base() {
db.dropCollection( _ns );
}
-
- const char * ns(){ return _ns.c_str(); }
-
+
+ const char * ns() { return _ns.c_str(); }
+
string _ns;
DBDirectClient db;
};
-
+
class DropIndex : public Base {
public:
- DropIndex() : Base( "dropindex" ){}
- void run(){
+ DropIndex() : Base( "dropindex" ) {}
+ void run() {
db.insert( ns() , BSON( "x" << 2 ) );
ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
-
+
db.ensureIndex( ns() , BSON( "x" << 1 ) );
ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
-
+
db.dropIndex( ns() , BSON( "x" << 1 ) );
ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
-
+
db.ensureIndex( ns() , BSON( "x" << 1 ) );
ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
@@ -61,18 +61,18 @@ namespace ClientTests {
ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
}
};
-
+
class ReIndex : public Base {
public:
- ReIndex() : Base( "reindex" ){}
- void run(){
-
+ ReIndex() : Base( "reindex" ) {}
+ void run() {
+
db.insert( ns() , BSON( "x" << 2 ) );
ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
-
+
db.ensureIndex( ns() , BSON( "x" << 1 ) );
ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
-
+
db.reIndex( ns() );
ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
}
@@ -81,15 +81,15 @@ namespace ClientTests {
class ReIndex2 : public Base {
public:
- ReIndex2() : Base( "reindex2" ){}
- void run(){
-
+ ReIndex2() : Base( "reindex2" ) {}
+ void run() {
+
db.insert( ns() , BSON( "x" << 2 ) );
ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
-
+
db.ensureIndex( ns() , BSON( "x" << 1 ) );
ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
-
+
BSONObj out;
ASSERT( db.runCommand( "test" , BSON( "reIndex" << "reindex2" ) , out ) );
ASSERT_EQUALS( 2 , out["nIndexes"].number() );
@@ -106,7 +106,7 @@ namespace ClientTests {
for( int i = 0; i < 1111; ++i )
db.insert( ns(), BSON( "a" << i << "b" << longs ) );
db.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
-
+
auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "a" << 1 << "b" << 1 ) ) );
ASSERT_EQUALS( 1111, c->itcount() );
}
@@ -164,10 +164,10 @@ namespace ClientTests {
class All : public Suite {
public:
- All() : Suite( "client" ){
+ All() : Suite( "client" ) {
}
- void setupTests(){
+ void setupTests() {
add<DropIndex>();
add<ReIndex>();
add<ReIndex2>();
@@ -175,6 +175,6 @@ namespace ClientTests {
add<PushBack>();
add<Create>();
}
-
+
} all;
}
diff --git a/dbtests/commandtests.cpp b/dbtests/commandtests.cpp
index fa0014dae1b..fa6204d25fd 100644
--- a/dbtests/commandtests.cpp
+++ b/dbtests/commandtests.cpp
@@ -23,19 +23,19 @@ using namespace mongo;
namespace CommandTests {
// one namespace per command
- namespace FileMD5{
+ namespace FileMD5 {
struct Base {
- Base(){
+ Base() {
db.dropCollection(ns());
db.ensureIndex(ns(), BSON( "files_id" << 1 << "n" << 1 ));
}
const char* ns() { return "test.fs.chunks"; }
-
+
DBDirectClient db;
};
struct Type0 : Base {
- void run(){
+ void run() {
{
BSONObjBuilder b;
b.genOID();
@@ -58,8 +58,8 @@ namespace CommandTests {
ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
}
};
- struct Type2 : Base{
- void run(){
+ struct Type2 : Base {
+ void run() {
{
BSONObjBuilder b;
b.genOID();
@@ -86,13 +86,13 @@ namespace CommandTests {
class All : public Suite {
public:
- All() : Suite( "commands" ){
+ All() : Suite( "commands" ) {
}
- void setupTests(){
+ void setupTests() {
add< FileMD5::Type0 >();
add< FileMD5::Type2 >();
}
-
+
} all;
}
diff --git a/dbtests/cursortests.cpp b/dbtests/cursortests.cpp
index 954c8b01a1a..ddd7b03b9b3 100644
--- a/dbtests/cursortests.cpp
+++ b/dbtests/cursortests.cpp
@@ -25,12 +25,12 @@
#include "dbtests.h"
namespace CursorTests {
-
+
namespace BtreeCursorTests {
// The ranges expressed in these tests are impossible given our query
// syntax, so going to do them a hacky way.
-
+
class Base {
protected:
FieldRangeVector *vec( int *vals, int len, int direction = 1 ) {
@@ -40,7 +40,8 @@ namespace CursorTests {
FieldRangeSet s2( "", _objs.back() );
if ( i == 0 ) {
s.range( "a" ) = s2.range( "a" );
- } else {
+ }
+ else {
s.range( "a" ) |= s2.range( "a" );
}
}
@@ -49,7 +50,7 @@ namespace CursorTests {
private:
vector< BSONObj > _objs;
};
-
+
class MultiRange : public Base {
public:
void run() {
@@ -103,7 +104,7 @@ namespace CursorTests {
ASSERT( !c.ok() );
}
};
-
+
class MultiRangeReverse : public Base {
public:
void run() {
@@ -129,7 +130,7 @@ namespace CursorTests {
ASSERT( !c.ok() );
}
};
-
+
class Base2 {
public:
virtual ~Base2() { _c.dropCollection( ns() ); }
@@ -167,7 +168,7 @@ namespace CursorTests {
dblock _lk;
vector< BSONObj > _objs;
};
-
+
class EqEq : public Base2 {
public:
void run() {
@@ -194,7 +195,7 @@ namespace CursorTests {
check( BSON( "a" << 4 << "b" << BSON( "$gte" << 1 << "$lte" << 10 ) ) );
}
virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
- };
+ };
class EqIn : public Base2 {
public:
@@ -210,7 +211,7 @@ namespace CursorTests {
check( BSON( "a" << 4 << "b" << BSON( "$in" << BSON_ARRAY( 5 << 6 << 11 ) ) ) );
}
virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
- };
+ };
class RangeEq : public Base2 {
public:
@@ -227,7 +228,7 @@ namespace CursorTests {
check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << 4 ) );
}
virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
- };
+ };
class RangeIn : public Base2 {
public:
@@ -244,15 +245,15 @@ namespace CursorTests {
check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << BSON( "$in" << BSON_ARRAY( 4 << 6 ) ) ) );
}
virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
- };
-
+ };
+
} // namespace BtreeCursorTests
-
+
class All : public Suite {
public:
- All() : Suite( "cursor" ){}
-
- void setupTests(){
+ All() : Suite( "cursor" ) {}
+
+ void setupTests() {
add< BtreeCursorTests::MultiRange >();
add< BtreeCursorTests::MultiRangeGap >();
add< BtreeCursorTests::MultiRangeReverse >();
diff --git a/dbtests/d_chunk_manager_tests.cpp b/dbtests/d_chunk_manager_tests.cpp
index 5794d629d61..bcfe9fa9ec1 100644
--- a/dbtests/d_chunk_manager_tests.cpp
+++ b/dbtests/d_chunk_manager_tests.cpp
@@ -32,13 +32,13 @@ namespace {
"unique" << false );
// single-chunk collection
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
- "ns" << "test.foo" <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
"min" << BSON( "a" << MINKEY ) <<
"max" << BSON( "a" << MAXKEY ) ) );
ShardChunkManager s ( collection , chunks );
-
+
BSONObj k1 = BSON( "a" << MINKEY );
ASSERT( s.belongsToMe( k1 ) );
BSONObj k2 = BSON( "a" << MAXKEY );
@@ -57,13 +57,13 @@ namespace {
"unique" << false );
// single-chunk collection
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKeyb_MinKey" <<
- "ns" << "test.foo" <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKeyb_MinKey" <<
+ "ns" << "test.foo" <<
"min" << BSON( "a" << MINKEY << "b" << MINKEY ) <<
"max" << BSON( "a" << MAXKEY << "b" << MAXKEY ) ) );
ShardChunkManager s ( collection , chunks );
-
+
BSONObj k1 = BSON( "a" << MINKEY << "b" << MINKEY );
ASSERT( s.belongsToMe( k1 ) );
BSONObj k2 = BSON( "a" << MAXKEY << "b" << MAXKEY );
@@ -71,13 +71,13 @@ namespace {
BSONObj k3 = BSON( "a" << MINKEY << "b" << 10 );
ASSERT( s.belongsToMe( k3 ) );
BSONObj k4 = BSON( "a" << 10 << "b" << 20 );
- ASSERT( s.belongsToMe( k4 ) );
+ ASSERT( s.belongsToMe( k4 ) );
}
};
- class RangeTests {
+ class RangeTests {
public:
- void run() {
+ void run() {
BSONObj collection = BSON( "_id" << "x.y" <<
"dropped" << false <<
"key" << BSON( "a" << 1 ) <<
@@ -85,23 +85,23 @@ namespace {
// 3-chunk collection, 2 of them being contiguous
// [min->10) , [10->20) , <gap> , [30->max)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << MINKEY ) <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << MINKEY ) <<
"max" << BSON( "a" << 10 ) ) <<
- BSON( "_id" << "x.y-a_10" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 10 ) <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 ) <<
"max" << BSON( "a" << 20 ) ) <<
- BSON( "_id" << "x.y-a_30" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 30 ) <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 ) <<
"max" << BSON( "a" << MAXKEY ) ) );
ShardChunkManager s ( collection , chunks );
BSONObj k1 = BSON( "a" << 5 );
- ASSERT( s.belongsToMe( k1 ) );
+ ASSERT( s.belongsToMe( k1 ) );
BSONObj k2 = BSON( "a" << 10 );
ASSERT( s.belongsToMe( k2 ) );
BSONObj k3 = BSON( "a" << 25 );
@@ -113,10 +113,10 @@ namespace {
}
};
- class GetNextTests {
+ class GetNextTests {
public:
- void run() {
-
+ void run() {
+
BSONObj collection = BSON( "_id" << "x.y" <<
"dropped" << false <<
"key" << BSON( "a" << 1 ) <<
@@ -140,9 +140,9 @@ namespace {
// [10->20]
BSONObj key_a10 = BSON( "a" << 10 );
BSONObj key_a20 = BSON( "a" << 20 );
- BSONArray chunks2 = BSON_ARRAY( BSON( "_id" << "x.y-a_10" <<
- "ns" << "x.y" <<
- "min" << key_a10 <<
+ BSONArray chunks2 = BSON_ARRAY( BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
"max" << key_a20 ) );
ShardChunkManager s2( collection , chunks2 );
ASSERT( s2.getNextChunk( empty , &foundMin , &foundMax ) );
@@ -154,17 +154,17 @@ namespace {
BSONObj key_a30 = BSON( "a" << 30 );
BSONObj key_min = BSON( "a" << MINKEY );
BSONObj key_max = BSON( "a" << MAXKEY );
- BSONArray chunks3 = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
- "ns" << "x.y" <<
- "min" << key_min <<
+ BSONArray chunks3 = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << key_min <<
"max" << key_a10 ) <<
- BSON( "_id" << "x.y-a_10" <<
- "ns" << "x.y" <<
- "min" << key_a10 <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
"max" << key_a20 ) <<
- BSON( "_id" << "x.y-a_30" <<
- "ns" << "x.y" <<
- "min" << key_a30 <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << key_a30 <<
"max" << key_max ) );
ShardChunkManager s3( collection , chunks3 );
ASSERT( ! s3.getNextChunk( empty , &foundMin , &foundMax ) ); // not eof
@@ -198,8 +198,8 @@ namespace {
"unique" << false );
// 1-chunk collection
// [10,0-20,0)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
- "ns" << "test.foo" <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
"min" << BSON( "a" << 10 << "b" << 0 ) <<
"max" << BSON( "a" << 20 << "b" << 0 ) ) );
@@ -211,13 +211,13 @@ namespace {
ShardChunkManagerPtr cloned( s.clonePlus( min , max , 1 /* TODO test version */ ) );
BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
- ASSERT( ! cloned->belongsToMe( k1 ) );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
BSONObj k2 = BSON( "a" << 20 << "b" << 0 );
- ASSERT( cloned->belongsToMe( k2 ) );
+ ASSERT( cloned->belongsToMe( k2 ) );
BSONObj k3 = BSON( "a" << 25 << "b" << 0 );
- ASSERT( cloned->belongsToMe( k3 ) );
+ ASSERT( cloned->belongsToMe( k3 ) );
BSONObj k4 = BSON( "a" << 30 << "b" << 0 );
- ASSERT( ! cloned->belongsToMe( k4 ) );
+ ASSERT( ! cloned->belongsToMe( k4 ) );
}
};
@@ -230,8 +230,8 @@ namespace {
"unique" << false );
// 1-chunk collection
// [10,0-20,0)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
- "ns" << "test.foo" <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
"min" << BSON( "a" << 10 << "b" << 0 ) <<
"max" << BSON( "a" << 20 << "b" << 0 ) ) );
@@ -249,18 +249,18 @@ namespace {
void run() {
BSONObj collection = BSON( "_id" << "x.y" <<
"dropped" << false <<
- "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
"unique" << false );
// 2-chunk collection
// [10,0->20,0) , <gap> , [30,0->40,0)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
"max" << BSON( "a" << 20 << "b" << 0 ) ) <<
- BSON( "_id" << "x.y-a_30b_0" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
"max" << BSON( "a" << 40 << "b" << 0 ) ) );
ShardChunkManager s ( collection , chunks );
@@ -271,9 +271,9 @@ namespace {
ShardChunkManagerPtr cloned( s.cloneMinus( min , max , 1 /* TODO test version */ ) );
BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
- ASSERT( ! cloned->belongsToMe( k1 ) );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
BSONObj k2 = BSON( "a" << 15 << "b" << 0 );
- ASSERT( ! cloned->belongsToMe( k2 ) );
+ ASSERT( ! cloned->belongsToMe( k2 ) );
BSONObj k3 = BSON( "a" << 30 << "b" << 0 );
ASSERT( cloned->belongsToMe( k3 ) );
BSONObj k4 = BSON( "a" << 35 << "b" << 0 );
@@ -288,18 +288,18 @@ namespace {
void run() {
BSONObj collection = BSON( "_id" << "x.y" <<
"dropped" << false <<
- "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
"unique" << false );
// 2-chunk collection
// [10,0->20,0) , <gap> , [30,0->40,0)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
"max" << BSON( "a" << 20 << "b" << 0 ) ) <<
- BSON( "_id" << "x.y-a_30b_0" <<
- "ns" << "x.y" <<
- "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
"max" << BSON( "a" << 40 << "b" << 0 ) ) );
ShardChunkManager s ( collection , chunks );
@@ -328,9 +328,9 @@ namespace {
// [10,0-20,0)
BSONObj min = BSON( "a" << 10 << "b" << 0 );
BSONObj max = BSON( "a" << 20 << "b" << 0 );
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
- << "ns" << "test.foo"
- << "min" << min
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
+ << "ns" << "test.foo"
+ << "min" << min
<< "max" << max ) );
ShardChunkManager s ( collection , chunks );
@@ -339,7 +339,7 @@ namespace {
BSONObj split2 = BSON( "a" << 18 << "b" << 0 );
vector<BSONObj> splitKeys;
splitKeys.push_back( split1 );
- splitKeys.push_back( split2 );
+ splitKeys.push_back( split2 );
ShardChunkVersion version( 1 , 99 ); // first chunk 1|99 , second 1|100
ShardChunkManagerPtr cloned( s.cloneSplit( min , max , splitKeys , version ) );
@@ -349,9 +349,9 @@ namespace {
ASSERT_EQUALS( s.getNumChunks() , 1u );
ASSERT_EQUALS( cloned->getNumChunks() , 3u );
ASSERT( cloned->belongsToMe( min ) );
- ASSERT( cloned->belongsToMe( split1 ) );
- ASSERT( cloned->belongsToMe( split2 ) );
- ASSERT( ! cloned->belongsToMe( max ) );
+ ASSERT( cloned->belongsToMe( split1 ) );
+ ASSERT( cloned->belongsToMe( split2 ) );
+ ASSERT( ! cloned->belongsToMe( max ) );
}
};
@@ -367,8 +367,8 @@ namespace {
BSONObj min = BSON( "a" << 10 << "b" << 0 );
BSONObj max = BSON( "a" << 20 << "b" << 0 );
BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
- << "ns" << "test.foo"
- << "min" << min
+ << "ns" << "test.foo"
+ << "min" << min
<< "max" << max ) );
ShardChunkManager s ( collection , chunks );
@@ -415,8 +415,8 @@ namespace {
// 1-chunk collection
// [10->20)
- BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_10" <<
- "ns" << "test.foo" <<
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_10" <<
+ "ns" << "test.foo" <<
"min" << BSON( "a" << 10 ) <<
"max" << BSON( "a" << 20 ) ) );
@@ -451,7 +451,7 @@ namespace {
add< BasicTests >();
add< BasicCompoundTests >();
add< RangeTests >();
- add< GetNextTests >();
+ add< GetNextTests >();
add< DeletedTests >();
add< ClonePlusTests >();
add< ClonePlusExceptionTests >();
diff --git a/dbtests/directclienttests.cpp b/dbtests/directclienttests.cpp
index e30fa3a29cb..204bf927a63 100644
--- a/dbtests/directclienttests.cpp
+++ b/dbtests/directclienttests.cpp
@@ -46,9 +46,9 @@ namespace DirectClientTests {
const char *ns = "a.b";
- class Capped : public ClientBase {
+ class Capped : public ClientBase {
public:
- virtual void run() {
+ virtual void run() {
for( int pass=0; pass < 3; pass++ ) {
client().createCollection(ns, 1024 * 1024, true, 999);
for( int j =0; j < pass*3; j++ )
@@ -68,12 +68,12 @@ namespace DirectClientTests {
}
}
};
-
+
class All : public Suite {
public:
All() : Suite( "directclient" ) {
}
- void setupTests(){
+ void setupTests() {
add< Capped >();
}
} myall;
diff --git a/dbtests/framework.cpp b/dbtests/framework.cpp
index 5f784882b2a..e2e73d37948 100644
--- a/dbtests/framework.cpp
+++ b/dbtests/framework.cpp
@@ -35,7 +35,7 @@
namespace po = boost::program_options;
namespace mongo {
-
+
CmdLine cmdLine;
namespace regression {
@@ -47,21 +47,21 @@ namespace mongo {
Result( string name ) : _name( name ) , _rc(0) , _tests(0) , _fails(0) , _asserts(0) {
}
- string toString(){
+ string toString() {
stringstream ss;
char result[128];
sprintf(result, "%-20s | tests: %4d | fails: %4d | assert calls: %6d\n", _name.c_str(), _tests, _fails, _asserts);
ss << result;
- for ( list<string>::iterator i=_messages.begin(); i!=_messages.end(); i++ ){
+ for ( list<string>::iterator i=_messages.begin(); i!=_messages.end(); i++ ) {
ss << "\t" << *i << '\n';
}
-
+
return ss.str();
}
- int rc(){
+ int rc() {
return _rc;
}
@@ -78,7 +78,7 @@ namespace mongo {
Result * Result::cur = 0;
- Result * Suite::run( const string& filter ){
+ Result * Suite::run( const string& filter ) {
tlogLevel = -1;
log(1) << "\t about to setupTests" << endl;
@@ -91,53 +91,53 @@ namespace mongo {
/* see note in SavedContext */
//writelock lk("");
- for ( list<TestCase*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ){
+ for ( list<TestCase*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ) {
TestCase * tc = *i;
- if ( filter.size() && tc->getName().find( filter ) == string::npos ){
+ if ( filter.size() && tc->getName().find( filter ) == string::npos ) {
log(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << endl;
continue;
}
r->_tests++;
-
+
bool passes = false;
-
+
log(1) << "\t going to run test: " << tc->getName() << endl;
-
+
stringstream err;
err << tc->getName() << "\t";
-
+
try {
tc->run();
passes = true;
}
- catch ( MyAssertionException * ae ){
+ catch ( MyAssertionException * ae ) {
err << ae->ss.str();
delete( ae );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
err << " exception: " << e.what();
}
- catch ( int x ){
+ catch ( int x ) {
err << " caught int : " << x << endl;
}
- catch ( ... ){
+ catch ( ... ) {
cerr << "unknown exception in test: " << tc->getName() << endl;
}
-
- if ( ! passes ){
+
+ if ( ! passes ) {
string s = err.str();
log() << "FAIL: " << s << endl;
r->_fails++;
r->_messages.push_back( s );
- }
+ }
}
-
+
if ( r->_fails )
r->_rc = 17;
log(1) << "\t DONE running tests" << endl;
-
+
return r;
}
@@ -156,23 +156,23 @@ namespace mongo {
po::positional_options_description positional_options;
shell_options.add_options()
- ("help,h", "show this usage information")
- ("dbpath", po::value<string>(&dbpathSpec)->default_value(default_dbpath),
- "db data path for this test run. NOTE: the contents of this "
- "directory will be overwritten if it already exists")
- ("debug", "run tests with verbose output")
- ("list,l", "list available test suites")
- ("bigfiles", "use big datafiles instead of smallfiles which is the default")
- ("filter,f" , po::value<string>() , "string substring filter on test name" )
- ("verbose,v", "verbose")
- ("dur", "enable journaling")
- ("nodur", "disable journaling (currently the default)")
- ("seed", po::value<unsigned long long>(&seed), "random number seed")
- ;
-
+ ("help,h", "show this usage information")
+ ("dbpath", po::value<string>(&dbpathSpec)->default_value(default_dbpath),
+ "db data path for this test run. NOTE: the contents of this "
+ "directory will be overwritten if it already exists")
+ ("debug", "run tests with verbose output")
+ ("list,l", "list available test suites")
+ ("bigfiles", "use big datafiles instead of smallfiles which is the default")
+ ("filter,f" , po::value<string>() , "string substring filter on test name" )
+ ("verbose,v", "verbose")
+ ("dur", "enable journaling")
+ ("nodur", "disable journaling (currently the default)")
+ ("seed", po::value<unsigned long long>(&seed), "random number seed")
+ ;
+
hidden_options.add_options()
- ("suites", po::value< vector<string> >(), "test suites to run")
- ;
+ ("suites", po::value< vector<string> >(), "test suites to run")
+ ;
positional_options.add("suites", -1);
@@ -189,7 +189,8 @@ namespace mongo {
positional(positional_options).
style(command_line_style).run(), params);
po::notify(params);
- } catch (po::error &e) {
+ }
+ catch (po::error &e) {
cout << "ERROR: " << e.what() << endl << endl;
show_help_text(argv[0], shell_options);
return EXIT_BADOPTIONS;
@@ -200,10 +201,10 @@ namespace mongo {
return EXIT_CLEAN;
}
- if( params.count("nodur") ) {
+ if( params.count("nodur") ) {
cmdLine.dur = false;
}
- if( params.count("dur") || cmdLine.dur ) {
+ if( params.count("dur") || cmdLine.dur ) {
cmdLine.dur = true;
}
@@ -228,21 +229,22 @@ namespace mongo {
}
boost::filesystem::directory_iterator end_iter;
for (boost::filesystem::directory_iterator dir_iter(p);
- dir_iter != end_iter; ++dir_iter) {
+ dir_iter != end_iter; ++dir_iter) {
boost::filesystem::remove_all(*dir_iter);
}
- } else {
+ }
+ else {
boost::filesystem::create_directory(p);
}
string dbpathString = p.native_directory_string();
dbpath = dbpathString.c_str();
-
+
cmdLine.prealloc = false;
// dbtest defaults to smallfiles
cmdLine.smallfiles = true;
- if( params.count("bigfiles") ) {
+ if( params.count("bigfiles") ) {
cmdLine.dur = true;
}
@@ -261,9 +263,9 @@ namespace mongo {
if (params.count("suites")) {
suites = params["suites"].as< vector<string> >();
}
-
+
string filter = "";
- if ( params.count( "filter" ) ){
+ if ( params.count( "filter" ) ) {
filter = params["filter"].as<string>();
}
@@ -274,13 +276,13 @@ namespace mongo {
#if !defined(_WIN32) && !defined(__sunos__)
flock( lockFile, LOCK_UN );
#endif
-
+
cc().shutdown();
dbexit( (ExitCode)ret ); // so everything shuts down cleanly
return ret;
}
- int Suite::run( vector<string> suites , const string& filter ){
+ int Suite::run( vector<string> suites , const string& filter ) {
for ( unsigned int i = 0; i < suites.size(); i++ ) {
if ( _suites->find( suites[i] ) == _suites->end() ) {
cout << "invalid test [" << suites[i] << "], use --list to see valid names" << endl;
@@ -296,7 +298,7 @@ namespace mongo {
list<Result*> results;
- for ( list<string>::iterator i=torun.begin(); i!=torun.end(); i++ ){
+ for ( list<string>::iterator i=torun.begin(); i!=torun.end(); i++ ) {
string name = *i;
Suite * s = (*_suites)[name];
assert( s );
@@ -317,12 +319,12 @@ namespace mongo {
int fails = 0;
int asserts = 0;
- for ( list<Result*>::iterator i=results.begin(); i!=results.end(); i++ ){
+ for ( list<Result*>::iterator i=results.begin(); i!=results.end(); i++ ) {
Result * r = *i;
cout << r->toString();
if ( abs( r->rc() ) > abs( rc ) )
rc = r->rc();
-
+
tests += r->_tests;
fails += r->_fails;
asserts += r->_asserts;
@@ -332,13 +334,13 @@ namespace mongo {
totals._tests = tests;
totals._fails = fails;
totals._asserts = asserts;
-
+
cout << totals.toString(); // includes endl
return rc;
}
- void Suite::registerSuite( string name , Suite * s ){
+ void Suite::registerSuite( string name , Suite * s ) {
if ( ! _suites )
_suites = new map<string,Suite*>();
Suite*& m = (*_suites)[name];
@@ -346,37 +348,37 @@ namespace mongo {
m = s;
}
- void assert_pass(){
+ void assert_pass() {
Result::cur->_asserts++;
}
- void assert_fail( const char * exp , const char * file , unsigned line ){
+ void assert_fail( const char * exp , const char * file , unsigned line ) {
Result::cur->_asserts++;
-
+
MyAssertionException * e = new MyAssertionException();
e->ss << "ASSERT FAILED! " << file << ":" << line << endl;
throw e;
}
- void fail( const char * exp , const char * file , unsigned line ){
+ void fail( const char * exp , const char * file , unsigned line ) {
assert(0);
}
- MyAssertionException * MyAsserts::getBase(){
+ MyAssertionException * MyAsserts::getBase() {
MyAssertionException * e = new MyAssertionException();
e->ss << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
return e;
}
-
- void MyAsserts::printLocation(){
+
+ void MyAsserts::printLocation() {
log() << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
}
- void MyAsserts::_gotAssert(){
+ void MyAsserts::_gotAssert() {
Result::cur->_asserts++;
}
}
- void setupSignals( bool inFork ){}
+ void setupSignals( bool inFork ) {}
}
diff --git a/dbtests/framework.h b/dbtests/framework.h
index bec14a2f28d..29ba58bd96b 100644
--- a/dbtests/framework.h
+++ b/dbtests/framework.h
@@ -49,7 +49,7 @@ namespace mongo {
class TestCase {
public:
- virtual ~TestCase(){}
+ virtual ~TestCase() {}
virtual void run() = 0;
virtual string getName() = 0;
};
@@ -57,15 +57,15 @@ namespace mongo {
template< class T >
class TestHolderBase : public TestCase {
public:
- TestHolderBase(){}
- virtual ~TestHolderBase(){}
- virtual void run(){
+ TestHolderBase() {}
+ virtual ~TestHolderBase() {}
+ virtual void run() {
auto_ptr<T> t;
t.reset( create() );
t->run();
}
virtual T * create() = 0;
- virtual string getName(){
+ virtual string getName() {
return demangleName( typeid(T) );
}
};
@@ -73,7 +73,7 @@ namespace mongo {
template< class T >
class TestHolder0 : public TestHolderBase<T> {
public:
- virtual T * create(){
+ virtual T * create() {
return new T();
}
};
@@ -81,8 +81,8 @@ namespace mongo {
template< class T , typename A >
class TestHolder1 : public TestHolderBase<T> {
public:
- TestHolder1( const A& a ) : _a(a){}
- virtual T * create(){
+ TestHolder1( const A& a ) : _a(a) {}
+ virtual T * create() {
return new T( _a );
}
const A& _a;
@@ -90,25 +90,25 @@ namespace mongo {
class Suite {
public:
- Suite( string name ) : _name( name ){
+ Suite( string name ) : _name( name ) {
registerSuite( name , this );
_ran = 0;
}
virtual ~Suite() {
- if ( _ran ){
+ if ( _ran ) {
DBDirectClient c;
c.dropDatabase( "unittests" );
}
}
template<class T>
- void add(){
+ void add() {
_tests.push_back( new TestHolder0<T>() );
}
template<class T , typename A >
- void add( const A& a ){
+ void add( const A& a ) {
_tests.push_back( new TestHolder1<T,A>(a) );
}
@@ -137,7 +137,7 @@ namespace mongo {
class MyAssertionException : boost::noncopyable {
public:
- MyAssertionException(){
+ MyAssertionException() {
ss << "assertion: ";
}
stringstream ss;
@@ -148,32 +148,32 @@ namespace mongo {
class MyAsserts {
public:
MyAsserts( const char * aexp , const char * bexp , const char * file , unsigned line )
- : _aexp( aexp ) , _bexp( bexp ) , _file( file ) , _line( line ){
+ : _aexp( aexp ) , _bexp( bexp ) , _file( file ) , _line( line ) {
}
-
+
template<typename A,typename B>
- void ae( A a , B b ){
+ void ae( A a , B b ) {
_gotAssert();
if ( a == b )
return;
-
+
printLocation();
-
+
MyAssertionException * e = getBase();
e->ss << a << " != " << b << endl;
log() << e->ss.str() << endl;
throw e;
}
-
+
template<typename A,typename B>
- void nae( A a , B b ){
+ void nae( A a , B b ) {
_gotAssert();
if ( a != b )
return;
-
+
printLocation();
-
+
MyAssertionException * e = getBase();
e->ss << a << " == " << b << endl;
log() << e->ss.str() << endl;
@@ -182,13 +182,13 @@ namespace mongo {
void printLocation();
-
+
private:
-
+
void _gotAssert();
-
+
MyAssertionException * getBase();
-
+
string _aexp;
string _bexp;
string _file;
diff --git a/dbtests/histogram_test.cpp b/dbtests/histogram_test.cpp
index 5a8970d3333..e9cbb5bdf25 100644
--- a/dbtests/histogram_test.cpp
+++ b/dbtests/histogram_test.cpp
@@ -25,9 +25,9 @@ namespace mongo {
using mongo::Histogram;
- class BoundariesInit{
+ class BoundariesInit {
public:
- void run(){
+ void run() {
Histogram::Options opts;
opts.numBuckets = 3;
opts.bucketSize = 10;
@@ -45,9 +45,9 @@ namespace mongo {
}
};
- class BoundariesExponential{
+ class BoundariesExponential {
public:
- void run(){
+ void run() {
Histogram::Options opts;
opts.numBuckets = 4;
opts.bucketSize = 125;
@@ -57,13 +57,13 @@ namespace mongo {
ASSERT_EQUALS( h.getBoundary( 0 ), 125u );
ASSERT_EQUALS( h.getBoundary( 1 ), 250u );
ASSERT_EQUALS( h.getBoundary( 2 ), 500u );
- ASSERT_EQUALS( h.getBoundary( 3 ), numeric_limits<uint32_t>::max() );
+ ASSERT_EQUALS( h.getBoundary( 3 ), numeric_limits<uint32_t>::max() );
}
};
- class BoundariesFind{
+ class BoundariesFind {
public:
- void run(){
+ void run() {
Histogram::Options opts;
opts.numBuckets = 3;
opts.bucketSize = 10;
@@ -81,14 +81,14 @@ namespace mongo {
class HistogramSuite : public Suite {
public:
- HistogramSuite() : Suite( "histogram" ){}
+ HistogramSuite() : Suite( "histogram" ) {}
- void setupTests(){
+ void setupTests() {
add< BoundariesInit >();
add< BoundariesExponential >();
add< BoundariesFind >();
// TODO: complete the test suite
- }
+ }
} histogramSuite;
} // anonymous namespace
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index e1e7f04e95d..a8570e15bfb 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -150,7 +150,7 @@ namespace JsobjTests {
class MultiKeySortOrder : public Base {
public:
- void run(){
+ void run() {
ASSERT( BSON( "x" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
ASSERT( BSON( "x" << "b" ).woCompare( BSON( "x" << "a" ) ) > 0 );
@@ -255,9 +255,9 @@ namespace JsobjTests {
}
};
- class AsTempObj{
+ class AsTempObj {
public:
- void run(){
+ void run() {
{
BSONObjBuilder bb;
bb << "a" << 1;
@@ -267,7 +267,7 @@ namespace JsobjTests {
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
ASSERT(tmp == BSON("a" << 1));
-
+
bb << "b" << 2;
BSONObj obj = bb.obj();
ASSERT_EQUALS(obj.objsize() , 4+(1+2+4)+(1+2+4)+1);
@@ -285,7 +285,7 @@ namespace JsobjTests {
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
-
+
bb << "b" << LT << 2;
BSONObj obj = bb.obj();
ASSERT(obj.objsize() == 4+(1+2+(4+1+4+4+1))+(1+2+(4+1+4+4+1))+1);
@@ -293,7 +293,7 @@ namespace JsobjTests {
ASSERT(obj.hasField("a"));
ASSERT(obj.hasField("b"));
ASSERT(obj == BSON("a" << BSON("$gt" << 1)
- << "b" << BSON("$lt" << 2)));
+ << "b" << BSON("$lt" << 2)));
}
{
BSONObjBuilder bb(32);
@@ -304,10 +304,10 @@ namespace JsobjTests {
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
ASSERT(tmp == BSON("a" << 1));
-
+
//force a realloc
BSONArrayBuilder arr;
- for (int i=0; i < 10000; i++){
+ for (int i=0; i < 10000; i++) {
arr << i;
}
bb << "b" << arr.arr();
@@ -319,8 +319,8 @@ namespace JsobjTests {
}
};
- struct AppendIntOrLL{
- void run(){
+ struct AppendIntOrLL {
+ void run() {
const long long billion = 1000*1000*1000;
BSONObjBuilder b;
b.appendIntOrLL("i1", 1);
@@ -362,16 +362,16 @@ namespace JsobjTests {
};
struct AppendNumber {
- void run(){
+ void run() {
BSONObjBuilder b;
b.appendNumber( "a" , 5 );
b.appendNumber( "b" , 5.5 );
b.appendNumber( "c" , (1024LL*1024*1024)-1 );
b.appendNumber( "d" , (1024LL*1024*1024*1024)-1 );
b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
-
+
BSONObj o = b.obj();
-
+
ASSERT( o["a"].type() == NumberInt );
ASSERT( o["b"].type() == NumberDouble );
ASSERT( o["c"].type() == NumberInt );
@@ -380,7 +380,7 @@ namespace JsobjTests {
}
};
-
+
class ToStringArray {
public:
void run() {
@@ -391,28 +391,28 @@ namespace JsobjTests {
class ToStringNumber {
public:
-
- void run(){
+
+ void run() {
BSONObjBuilder b;
b.append( "a" , (int)4 );
b.append( "b" , (double)5 );
b.append( "c" , (long long)6 );
-
+
b.append( "d" , 123.456789123456789123456789123456789 );
b.append( "e" , 123456789.123456789123456789123456789 );
b.append( "f" , 1234567891234567891234.56789123456789 );
b.append( "g" , -123.456 );
-
+
BSONObj x = b.obj();
ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
- ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
+ ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
ASSERT_EQUALS( "123.4567891234568" , x["d"].toString( false , true ) );
ASSERT_EQUALS( "123456789.1234568" , x["e"].toString( false , true ) );
// ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
-
+
ASSERT_EQUALS( "-123.456" , x["g"].toString( false , true ) );
}
@@ -441,7 +441,7 @@ namespace JsobjTests {
}
};
-
+
class AppendAs {
public:
void run() {
@@ -468,7 +468,7 @@ namespace JsobjTests {
ASSERT_EQUALS( 4, a.nFields() );
}
};
-
+
namespace Validation {
class Base {
@@ -718,12 +718,12 @@ namespace JsobjTests {
a.valid();
BSONObj b = fromjson( "{\"one\":2, \"two\":5, \"three\": {},"
- "\"four\": { \"five\": { \"six\" : 11 } },"
- "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
- "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
- "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
- "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"00\" },"
- "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }" );
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"00\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }" );
fuzz( b );
b.valid();
}
@@ -750,7 +750,7 @@ namespace JsobjTests {
class init1 {
public:
- void run(){
+ void run() {
OID a;
OID b;
@@ -763,7 +763,7 @@ namespace JsobjTests {
class initParse1 {
public:
- void run(){
+ void run() {
OID a;
OID b;
@@ -777,7 +777,7 @@ namespace JsobjTests {
class append {
public:
- void run(){
+ void run() {
BSONObjBuilder b;
b.appendOID( "a" , 0 );
b.appendOID( "b" , 0 , false );
@@ -793,18 +793,18 @@ namespace JsobjTests {
class increasing {
public:
- BSONObj g(){
+ BSONObj g() {
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
return b.obj();
}
- void run(){
+ void run() {
BSONObj a = g();
BSONObj b = g();
-
+
ASSERT( a.woCompare( b ) < 0 );
-
- // yes, there is a 1/1000 chance this won't increase time(0)
+
+ // yes, there is a 1/1000 chance this won't increase time(0)
// and therefore inaccurately say the function is behaving
// buf if its broken, it will fail 999/1000, so i think that's good enough
sleepsecs( 1 );
@@ -815,7 +815,7 @@ namespace JsobjTests {
class ToDate {
public:
- void run(){
+ void run() {
OID oid;
{
@@ -839,7 +839,7 @@ namespace JsobjTests {
class FromDate {
public:
- void run(){
+ void run() {
OID min, oid, max;
Date_t now = jsTime();
oid.init(); // slight chance this has different time. If its a problem, can change.
@@ -917,26 +917,26 @@ namespace JsobjTests {
class LabelMulti : public LabelBase {
BSONObj expected() {
return BSON( "z" << "q"
- << "a" << BSON( "$gt" << 1 << "$lte" << "x" )
- << "b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 )
- << "x" << "p" );
+ << "a" << BSON( "$gt" << 1 << "$lte" << "x" )
+ << "b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 )
+ << "x" << "p" );
}
BSONObj actual() {
return BSON( "z" << "q"
- << "a" << GT << 1 << LTE << "x"
- << "b" << NE << 1 << NE << "f" << NE << 22.3
- << "x" << "p" );
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3
+ << "x" << "p" );
}
};
class LabelishOr : public LabelBase {
BSONObj expected() {
return BSON( "$or" << BSON_ARRAY(
- BSON("a" << BSON( "$gt" << 1 << "$lte" << "x" ))
- << BSON("b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 ))
- << BSON("x" << "p" )));
+ BSON("a" << BSON( "$gt" << 1 << "$lte" << "x" ))
+ << BSON("b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 ))
+ << BSON("x" << "p" )));
}
BSONObj actual() {
- return OR( BSON( "a" << GT << 1 << LTE << "x"),
+ return OR( BSON( "a" << GT << 1 << LTE << "x"),
BSON( "b" << NE << 1 << NE << "f" << NE << 22.3),
BSON( "x" << "p" ) );
}
@@ -952,7 +952,7 @@ namespace JsobjTests {
class ElementAppend {
public:
- void run(){
+ void run() {
BSONObj a = BSON( "a" << 17 );
BSONObj b = BSON( "b" << a["a"] );
ASSERT_EQUALS( NumberInt , a["a"].type() );
@@ -1044,20 +1044,20 @@ namespace JsobjTests {
class MinMaxElementTest {
public:
- BSONObj min( int t ){
+ BSONObj min( int t ) {
BSONObjBuilder b;
b.appendMinForType( "a" , t );
return b.obj();
}
- BSONObj max( int t ){
+ BSONObj max( int t ) {
BSONObjBuilder b;
b.appendMaxForType( "a" , t );
return b.obj();
}
- void run(){
- for ( int t=1; t<JSTypeMax; t++ ){
+ void run() {
+ for ( int t=1; t<JSTypeMax; t++ ) {
stringstream ss;
ss << "type: " << t;
string s = ss.str();
@@ -1072,7 +1072,7 @@ namespace JsobjTests {
class ExtractFieldsTest {
public:
- void run(){
+ void run() {
BSONObj x = BSON( "a" << 10 << "b" << 11 );
assert( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
assert( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
@@ -1084,10 +1084,10 @@ namespace JsobjTests {
class ComparatorTest {
public:
- BSONObj one( string s ){
+ BSONObj one( string s ) {
return BSON( "x" << s );
}
- BSONObj two( string x , string y ){
+ BSONObj two( string x , string y ) {
BSONObjBuilder b;
b.append( "x" , x );
if ( y.size() )
@@ -1097,7 +1097,7 @@ namespace JsobjTests {
return b.obj();
}
- void test( BSONObj order , BSONObj l , BSONObj r , bool wanted ){
+ void test( BSONObj order , BSONObj l , BSONObj r , bool wanted ) {
BSONObjCmp c( order );
bool got = c(l,r);
if ( got == wanted )
@@ -1105,11 +1105,11 @@ namespace JsobjTests {
cout << " order: " << order << " l: " << l << "r: " << r << " wanted: " << wanted << " got: " << got << endl;
}
- void lt( BSONObj order , BSONObj l , BSONObj r ){
+ void lt( BSONObj order , BSONObj l , BSONObj r ) {
test( order , l , r , 1 );
}
- void run(){
+ void run() {
BSONObj s = BSON( "x" << 1 );
BSONObj c = BSON( "x" << 1 << "y" << 1 );
test( s , one( "A" ) , one( "B" ) , 1 );
@@ -1133,7 +1133,7 @@ namespace JsobjTests {
namespace external_sort {
class Basic1 {
public:
- void run(){
+ void run() {
BSONObjExternalSorter sorter;
sorter.add( BSON( "x" << 10 ) , 5 , 1);
sorter.add( BSON( "x" << 2 ) , 3 , 1 );
@@ -1141,14 +1141,14 @@ namespace JsobjTests {
sorter.add( BSON( "x" << 5 ) , 7 , 1 );
sorter.sort();
-
+
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
int num=0;
- while ( i->more() ){
+ while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
if ( num == 0 )
assert( p.first["x"].number() == 2 );
- else if ( num <= 2 ){
+ else if ( num <= 2 ) {
assert( p.first["x"].number() == 5 );
}
else if ( num == 3 )
@@ -1157,15 +1157,15 @@ namespace JsobjTests {
ASSERT( 0 );
num++;
}
-
-
+
+
ASSERT_EQUALS( 0 , sorter.numFiles() );
}
};
class Basic2 {
public:
- void run(){
+ void run() {
BSONObjExternalSorter sorter( BSONObj() , 10 );
sorter.add( BSON( "x" << 10 ) , 5 , 11 );
sorter.add( BSON( "x" << 2 ) , 3 , 1 );
@@ -1173,18 +1173,18 @@ namespace JsobjTests {
sorter.add( BSON( "x" << 5 ) , 7 , 1 );
sorter.sort();
-
+
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
int num=0;
- while ( i->more() ){
+ while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
- if ( num == 0 ){
+ if ( num == 0 ) {
assert( p.first["x"].number() == 2 );
ASSERT_EQUALS( p.second.toString() , "3:1" );
}
else if ( num <= 2 )
assert( p.first["x"].number() == 5 );
- else if ( num == 3 ){
+ else if ( num == 3 ) {
assert( p.first["x"].number() == 10 );
ASSERT_EQUALS( p.second.toString() , "5:b" );
}
@@ -1198,7 +1198,7 @@ namespace JsobjTests {
class Basic3 {
public:
- void run(){
+ void run() {
BSONObjExternalSorter sorter( BSONObj() , 10 );
sorter.sort();
@@ -1211,23 +1211,23 @@ namespace JsobjTests {
class ByDiskLock {
public:
- void run(){
+ void run() {
BSONObjExternalSorter sorter;
sorter.add( BSON( "x" << 10 ) , 5 , 4);
sorter.add( BSON( "x" << 2 ) , 3 , 0 );
sorter.add( BSON( "x" << 5 ) , 6 , 2 );
sorter.add( BSON( "x" << 5 ) , 7 , 3 );
sorter.add( BSON( "x" << 5 ) , 2 , 1 );
-
+
sorter.sort();
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
int num=0;
- while ( i->more() ){
+ while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
if ( num == 0 )
assert( p.first["x"].number() == 2 );
- else if ( num <= 3 ){
+ else if ( num <= 3 ) {
assert( p.first["x"].number() == 5 );
}
else if ( num == 4 )
@@ -1245,9 +1245,9 @@ namespace JsobjTests {
class Big1 {
public:
- void run(){
+ void run() {
BSONObjExternalSorter sorter( BSONObj() , 2000 );
- for ( int i=0; i<10000; i++ ){
+ for ( int i=0; i<10000; i++ ) {
sorter.add( BSON( "x" << rand() % 10000 ) , 5 , i );
}
@@ -1256,7 +1256,7 @@ namespace JsobjTests {
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
int num=0;
double prev = 0;
- while ( i->more() ){
+ while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
num++;
double cur = p.first["x"].number();
@@ -1266,22 +1266,22 @@ namespace JsobjTests {
assert( num == 10000 );
}
};
-
+
class Big2 {
public:
- void run(){
+ void run() {
const int total = 100000;
BSONObjExternalSorter sorter( BSONObj() , total * 2 );
- for ( int i=0; i<total; i++ ){
+ for ( int i=0; i<total; i++ ) {
sorter.add( BSON( "a" << "b" ) , 5 , i );
}
sorter.sort();
-
+
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
int num=0;
double prev = 0;
- while ( i->more() ){
+ while ( i->more() ) {
pair<BSONObj,DiskLoc> p = i->next();
num++;
double cur = p.first["x"].number();
@@ -1295,21 +1295,21 @@ namespace JsobjTests {
class D1 {
public:
- void run(){
-
+ void run() {
+
BSONObjBuilder b;
b.appendNull("");
BSONObj x = b.obj();
-
+
BSONObjExternalSorter sorter;
sorter.add(x, DiskLoc(3,7));
sorter.add(x, DiskLoc(4,7));
sorter.add(x, DiskLoc(2,7));
sorter.add(x, DiskLoc(1,7));
sorter.add(x, DiskLoc(3,77));
-
+
sorter.sort();
-
+
auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
while( i->more() ) {
BSONObjExternalSorter::Data d = i->next();
@@ -1320,14 +1320,14 @@ namespace JsobjTests {
}
};
}
-
+
class CompatBSON {
public:
-
+
#define JSONBSONTEST(j,s,m) ASSERT_EQUALS( fromjson( j ).objsize() , s ); ASSERT_EQUALS( fromjson( j ).md5() , m );
#define RAWBSONTEST(j,s,m) ASSERT_EQUALS( j.objsize() , s ); ASSERT_EQUALS( j.md5() , m );
- void run(){
+ void run() {
JSONBSONTEST( "{ 'x' : true }" , 9 , "6fe24623e4efc5cf07f027f9c66b5456" );
JSONBSONTEST( "{ 'x' : null }" , 8 , "12d43430ff6729af501faf0638e68888" );
@@ -1337,20 +1337,20 @@ namespace JsobjTests {
JSONBSONTEST( "{ 'a' : { 'b' : 1.1 } }" , 24 , "31887a4b9d55cd9f17752d6a8a45d51f" );
JSONBSONTEST( "{ 'x' : 5.2 , 'y' : { 'a' : 'eliot' , b : true } , 'z' : null }" , 44 , "b3de8a0739ab329e7aea138d87235205" );
JSONBSONTEST( "{ 'x' : 5.2 , 'y' : [ 'a' , 'eliot' , 'b' , true ] , 'z' : null }" , 62 , "cb7bad5697714ba0cbf51d113b6a0ee8" );
-
+
RAWBSONTEST( BSON( "x" << 4 ) , 12 , "d1ed8dbf79b78fa215e2ded74548d89d" );
-
+
}
};
-
+
class CompareDottedFieldNamesTest {
public:
- void t( FieldCompareResult res , const string& l , const string& r ){
+ void t( FieldCompareResult res , const string& l , const string& r ) {
ASSERT_EQUALS( res , compareDottedFieldNames( l , r ) );
ASSERT_EQUALS( -1 * res , compareDottedFieldNames( r , l ) );
}
-
- void run(){
+
+ void run() {
t( SAME , "x" , "x" );
t( SAME , "x.a" , "x.a" );
t( LEFT_BEFORE , "a" , "b" );
@@ -1360,13 +1360,13 @@ namespace JsobjTests {
}
};
- struct NestedDottedConversions{
- void t(const BSONObj& nest, const BSONObj& dot){
+ struct NestedDottedConversions {
+ void t(const BSONObj& nest, const BSONObj& dot) {
ASSERT_EQUALS( nested2dotted(nest), dot);
ASSERT_EQUALS( nest, dotted2nested(dot));
}
- void run(){
+ void run() {
t( BSON("a" << BSON("b" << 1)), BSON("a.b" << 1) );
t( BSON("a" << BSON("b" << 1 << "c" << 1)), BSON("a.b" << 1 << "a.c" << 1) );
t( BSON("a" << BSON("b" << 1 << "c" << 1) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "d" << 1) );
@@ -1374,8 +1374,8 @@ namespace JsobjTests {
}
};
- struct BSONArrayBuilderTest{
- void run(){
+ struct BSONArrayBuilderTest {
+ void run() {
int i = 0;
BSONObjBuilder objb;
BSONArrayBuilder arrb;
@@ -1414,13 +1414,13 @@ namespace JsobjTests {
ASSERT_EQUALS(o["arr2"].type(), Array);
}
};
-
- struct ArrayMacroTest{
- void run(){
+
+ struct ArrayMacroTest {
+ void run() {
BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
BSONObj obj = BSON( "0" << "hello"
- << "1" << 1
- << "2" << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ << "1" << 1
+ << "2" << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
ASSERT_EQUALS(arr, obj);
ASSERT_EQUALS(arr["2"].type(), Object);
@@ -1430,25 +1430,25 @@ namespace JsobjTests {
class NumberParsing {
public:
- void run(){
+ void run() {
BSONObjBuilder a;
BSONObjBuilder b;
a.append( "a" , (int)1 );
ASSERT( b.appendAsNumber( "a" , "1" ) );
-
+
a.append( "b" , 1.1 );
ASSERT( b.appendAsNumber( "b" , "1.1" ) );
a.append( "c" , (int)-1 );
ASSERT( b.appendAsNumber( "c" , "-1" ) );
-
+
a.append( "d" , -1.1 );
ASSERT( b.appendAsNumber( "d" , "-1.1" ) );
a.append( "e" , (long long)32131231231232313LL );
ASSERT( b.appendAsNumber( "e" , "32131231231232313" ) );
-
+
ASSERT( ! b.appendAsNumber( "f" , "zz" ) );
ASSERT( ! b.appendAsNumber( "f" , "5zz" ) );
ASSERT( ! b.appendAsNumber( "f" , "zz5" ) );
@@ -1456,10 +1456,10 @@ namespace JsobjTests {
ASSERT_EQUALS( a.obj() , b.obj() );
}
};
-
+
class bson2settest {
public:
- void run(){
+ void run() {
BSONObj o = BSON( "z" << 1 << "a" << 2 << "m" << 3 << "c" << 4 );
BSONObjIteratorSorted i( o );
stringstream ss;
@@ -1469,7 +1469,7 @@ namespace JsobjTests {
{
Timer t;
- for ( int i=0; i<10000; i++ ){
+ for ( int i=0; i<10000; i++ ) {
BSONObjIteratorSorted j( o );
int l = 0;
while ( j.more() )
@@ -1484,22 +1484,22 @@ namespace JsobjTests {
class checkForStorageTests {
public:
-
- void good( string s ){
+
+ void good( string s ) {
BSONObj o = fromjson( s );
if ( o.okForStorage() )
return;
throw UserException( 12528 , (string)"should be ok for storage:" + s );
}
- void bad( string s ){
+ void bad( string s ) {
BSONObj o = fromjson( s );
if ( ! o.okForStorage() )
return;
throw UserException( 12529 , (string)"should NOT be ok for storage:" + s );
}
- void run(){
+ void run() {
good( "{x:1}" );
bad( "{'x.y':1}" );
@@ -1510,7 +1510,7 @@ namespace JsobjTests {
class InvalidIDFind {
public:
- void run(){
+ void run() {
BSONObj x = BSON( "_id" << 5 << "t" << 2 );
{
char * crap = (char*)malloc( x.objsize() );
@@ -1519,7 +1519,7 @@ namespace JsobjTests {
ASSERT_EQUALS( x , y );
free( crap );
}
-
+
{
char * crap = (char*)malloc( x.objsize() );
memcpy( crap , x.objdata() , x.objsize() );
@@ -1530,21 +1530,21 @@ namespace JsobjTests {
BSONObj y( crap , false );
state = 1;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
state = 2;
ASSERT( strstr( e.what() , "_id: 5" ) > 0 );
}
free( crap );
ASSERT_EQUALS( 2 , state );
}
-
-
+
+
}
};
class ElementSetTest {
public:
- void run(){
+ void run() {
BSONObj x = BSON( "a" << 1 << "b" << 1 << "c" << 2 );
BSONElement a = x["a"];
BSONElement b = x["b"];
@@ -1552,7 +1552,7 @@ namespace JsobjTests {
cout << "c: " << c << endl;
ASSERT( a.woCompare( b ) != 0 );
ASSERT( a.woCompare( b , false ) == 0 );
-
+
BSONElementSet s;
s.insert( a );
ASSERT_EQUALS( 1U , s.size() );
@@ -1563,8 +1563,8 @@ namespace JsobjTests {
ASSERT( s.find( a ) != s.end() );
ASSERT( s.find( b ) != s.end() );
ASSERT( s.find( c ) == s.end() );
-
-
+
+
s.insert( c );
ASSERT_EQUALS( 2U , s.size() );
@@ -1581,7 +1581,7 @@ namespace JsobjTests {
BSONElementSet x;
BSONObj o = fromjson( "{ 'a' : [ 1 , 2 , 1 ] }" );
BSONObjIterator i( o["a"].embeddedObjectUserCheck() );
- while ( i.more() ){
+ while ( i.more() ) {
x.insert( i.next() );
}
ASSERT_EQUALS( 2U , x.size() );
@@ -1591,7 +1591,7 @@ namespace JsobjTests {
class EmbeddedNumbers {
public:
- void run(){
+ void run() {
BSONObj x = BSON( "a" << BSON( "b" << 1 ) );
BSONObj y = BSON( "a" << BSON( "b" << 1.0 ) );
ASSERT_EQUALS( x , y );
@@ -1601,12 +1601,12 @@ namespace JsobjTests {
class BuilderPartialItearte {
public:
- void run(){
+ void run() {
{
BSONObjBuilder b;
b.append( "x" , 1 );
b.append( "y" , 2 );
-
+
BSONObjIterator i = b.iterator();
ASSERT( i.more() );
ASSERT_EQUALS( 1 , i.next().numberInt() );
@@ -1627,13 +1627,13 @@ namespace JsobjTests {
ASSERT_EQUALS( BSON( "x" << 1 << "y" << 2 << "z" << 3 ) , b.obj() );
}
-
+
}
};
class BSONFieldTests {
public:
- void run(){
+ void run() {
{
BSONField<int> x("x");
BSONObj o = BSON( x << 5 );
@@ -1660,11 +1660,11 @@ namespace JsobjTests {
class BSONForEachTest {
public:
- void run(){
+ void run() {
BSONObj obj = BSON("a" << 1 << "a" << 2 << "a" << 3);
-
+
int count = 0;
- BSONForEach(e, obj){
+ BSONForEach(e, obj) {
ASSERT_EQUALS( e.fieldName() , string("a") );
count += e.Int();
}
@@ -1675,7 +1675,7 @@ namespace JsobjTests {
class StringDataTest {
public:
- void run(){
+ void run() {
StringData a( string( "aaa" ) );
ASSERT_EQUALS( 3u , a.size() );
@@ -1695,8 +1695,8 @@ namespace JsobjTests {
class CompareOps {
public:
- void run(){
-
+ void run() {
+
BSONObj a = BSON("a"<<1);
BSONObj b = BSON("a"<<1);
BSONObj c = BSON("a"<<2);
@@ -1707,7 +1707,7 @@ namespace JsobjTests {
ASSERT( ! ( a < b ) );
ASSERT( a <= b );
ASSERT( a < c );
-
+
ASSERT( f > d );
ASSERT( f >= e );
ASSERT( ! ( f > e ) );
@@ -1716,12 +1716,12 @@ namespace JsobjTests {
class HashingTest {
public:
- void run(){
+ void run() {
int N = 100000;
- BSONObj x = BSON( "name" << "eliot was here"
+ BSONObj x = BSON( "name" << "eliot was here"
<< "x" << 5
<< "asdasdasdas" << "asldkasldjasldjasldjlasjdlasjdlasdasdasdasdasdasdasd" );
-
+
{
Timer t;
for ( int i=0; i<N; i++ )
@@ -1729,7 +1729,7 @@ namespace JsobjTests {
int millis = t.millis();
cout << "md5 : " << millis << endl;
}
-
+
{
Timer t;
for ( int i=0; i<N; i++ )
@@ -1744,17 +1744,17 @@ namespace JsobjTests {
checksum( x.objdata() , x.objsize() );
int millis = t.millis();
cout << "checksum : " << millis << endl;
- }
-
+ }
+
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "jsobj" ){
+ All() : Suite( "jsobj" ) {
}
- void setupTests(){
+ void setupTests() {
add< BufBuilderBasic >();
add< BSONElementBasic >();
add< BSONObjTests::Create >();
@@ -1859,6 +1859,6 @@ namespace JsobjTests {
add< HashingTest >();
}
} myall;
-
+
} // namespace JsobjTests
diff --git a/dbtests/jsontests.cpp b/dbtests/jsontests.cpp
index 990558e361c..b63052342ed 100644
--- a/dbtests/jsontests.cpp
+++ b/dbtests/jsontests.cpp
@@ -205,11 +205,11 @@ namespace JsonTests {
b.appendDBRef( "a", "namespace", oid );
BSONObj built = b.done();
ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( Strict ) );
+ built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( JS ) );
+ built.jsonString( JS ) );
ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
- built.jsonString( TenGen ) );
+ built.jsonString( TenGen ) );
}
};
@@ -221,7 +221,7 @@ namespace JsonTests {
BSONObjBuilder b;
b.appendDBRef( "a", "namespace", oid );
ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
- b.done().jsonString( Strict ) );
+ b.done().jsonString( Strict ) );
}
};
@@ -234,9 +234,9 @@ namespace JsonTests {
b.appendOID( "a", &oid );
BSONObj built = b.done();
ASSERT_EQUALS( "{ \"a\" : { \"$oid\" : \"ffffffffffffffffffffffff\" } }",
- built.jsonString( Strict ) );
+ built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
- built.jsonString( TenGen ) );
+ built.jsonString( TenGen ) );
}
};
@@ -258,12 +258,12 @@ namespace JsonTests {
BSONObjBuilder c;
c.appendBinData( "a", 2, BinDataGeneral, z );
ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }",
- c.done().jsonString( Strict ) );
+ c.done().jsonString( Strict ) );
BSONObjBuilder d;
d.appendBinData( "a", 1, BinDataGeneral, z );
ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }",
- d.done().jsonString( Strict ) );
+ d.done().jsonString( Strict ) );
}
};
@@ -295,7 +295,7 @@ namespace JsonTests {
b.appendRegex( "a", "abc", "i" );
BSONObj built = b.done();
ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
- built.jsonString( Strict ) );
+ built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( TenGen ) );
ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( JS ) );
}
@@ -308,7 +308,7 @@ namespace JsonTests {
b.appendRegex( "a", "/\"", "i" );
BSONObj built = b.done();
ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
- built.jsonString( Strict ) );
+ built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( TenGen ) );
ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( JS ) );
}
@@ -321,7 +321,7 @@ namespace JsonTests {
b.appendRegex( "a", "z", "abcgimx" );
BSONObj built = b.done();
ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
- built.jsonString( Strict ) );
+ built.jsonString( Strict ) );
ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( TenGen ) );
ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( JS ) );
}
@@ -329,17 +329,17 @@ namespace JsonTests {
class CodeTests {
public:
- void run(){
+ void run() {
BSONObjBuilder b;
b.appendCode( "x" , "function(){ return 1; }" );
BSONObj o = b.obj();
ASSERT_EQUALS( "{ \"x\" : function(){ return 1; } }" , o.jsonString() );
}
};
-
+
class TimestampTests {
public:
- void run(){
+ void run() {
BSONObjBuilder b;
b.appendTimestamp( "x" , 4000 , 10 );
BSONObj o = b.obj();
@@ -349,7 +349,7 @@ namespace JsonTests {
class NullString {
public:
- void run(){
+ void run() {
BSONObjBuilder b;
b.append( "x" , "a\0b" , 4 );
BSONObj o = b.obj();
@@ -359,7 +359,7 @@ namespace JsonTests {
class AllTypes {
public:
- void run(){
+ void run() {
OID oid;
oid.init();
@@ -384,12 +384,12 @@ namespace JsonTests {
b.appendTimestamp( "s" , 123123123123123LL );
b.append( "t" , 12321312312LL );
b.appendMaxKey( "u" );
-
+
BSONObj o = b.obj();
cout << o.jsonString() << endl;
}
};
-
+
} // namespace JsonStringTests
namespace FromJsonTests {
@@ -504,7 +504,7 @@ namespace JsonTests {
virtual ~FancyNumber() {}
void run() {
ASSERT_EQUALS( int( 1000000 * bson().firstElement().number() ),
- int( 1000000 * fromjson( json() ).firstElement().number() ) );
+ int( 1000000 * fromjson( json() ).firstElement().number() ) );
}
virtual BSONObj bson() const {
BSONObjBuilder b;
@@ -978,8 +978,8 @@ namespace JsonTests {
};
class NumericTypes : public Base {
- public:
- void run(){
+ public:
+ void run() {
Base::run();
BSONObj o = fromjson(json());
@@ -990,12 +990,12 @@ namespace JsonTests {
ASSERT(o["long"].numberLong() == 9223372036854775807ll);
}
-
+
virtual BSONObj bson() const {
return BSON( "int" << 123
- << "long" << 9223372036854775807ll // 2**63 - 1
- << "double" << 3.14
- );
+ << "long" << 9223372036854775807ll // 2**63 - 1
+ << "double" << 3.14
+ );
}
virtual string json() const {
return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
@@ -1003,8 +1003,8 @@ namespace JsonTests {
};
class NegativeNumericTypes : public Base {
- public:
- void run(){
+ public:
+ void run() {
Base::run();
BSONObj o = fromjson(json());
@@ -1015,12 +1015,12 @@ namespace JsonTests {
ASSERT(o["long"].numberLong() == -9223372036854775807ll);
}
-
+
virtual BSONObj bson() const {
return BSON( "int" << -123
- << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
- << "double" << -3.14
- );
+ << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
+ << "double" << -3.14
+ );
}
virtual string json() const {
return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
@@ -1029,8 +1029,8 @@ namespace JsonTests {
class EmbeddedDatesBase : public Base {
public:
-
- virtual void run(){
+
+ virtual void run() {
BSONObj o = fromjson( json() );
ASSERT_EQUALS( 3 , (o["time.valid"].type()) );
BSONObj e = o["time.valid"].embeddedObjectUserCheck();
@@ -1038,7 +1038,7 @@ namespace JsonTests {
ASSERT_EQUALS( 9 , e["$lt"].type() );
Base::run();
}
-
+
BSONObj bson() const {
BSONObjBuilder e;
e.appendDate( "$gt" , 1257829200000LL );
@@ -1082,10 +1082,10 @@ namespace JsonTests {
class All : public Suite {
public:
- All() : Suite( "json" ){
+ All() : Suite( "json" ) {
}
- void setupTests(){
+ void setupTests() {
add< JsonStringTests::Empty >();
add< JsonStringTests::SingleStringMember >();
add< JsonStringTests::EscapedCharacters >();
@@ -1116,7 +1116,7 @@ namespace JsonTests {
add< JsonStringTests::TimestampTests >();
add< JsonStringTests::NullString >();
add< JsonStringTests::AllTypes >();
-
+
add< FromJsonTests::Empty >();
add< FromJsonTests::EmptyWithSpace >();
add< FromJsonTests::SingleString >();
diff --git a/dbtests/jstests.cpp b/dbtests/jstests.cpp
index d502b42aa76..c33b2005b38 100644
--- a/dbtests/jstests.cpp
+++ b/dbtests/jstests.cpp
@@ -1,4 +1,4 @@
-// javajstests.cpp
+// javajstests.cpp
//
/**
@@ -31,7 +31,7 @@ namespace mongo {
} // namespace mongo
namespace JSTests {
-
+
class Fundamental {
public:
void run() {
@@ -43,26 +43,26 @@ namespace JSTests {
globalScriptEngine->runTest();
}
};
-
+
class BasicScope {
public:
- void run(){
+ void run() {
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
s->setNumber( "x" , 5 );
ASSERT( 5 == s->getNumber( "x" ) );
-
+
s->setNumber( "x" , 1.67 );
ASSERT( 1.67 == s->getNumber( "x" ) );
s->setString( "s" , "eliot was here" );
ASSERT( "eliot was here" == s->getString( "s" ) );
-
+
s->setBoolean( "b" , true );
ASSERT( s->getBoolean( "b" ) );
- if ( 0 ){
+ if ( 0 ) {
s->setBoolean( "b" , false );
ASSERT( ! s->getBoolean( "b" ) );
}
@@ -71,12 +71,12 @@ namespace JSTests {
class ResetScope {
public:
- void run(){
+ void run() {
// Not worrying about this for now SERVER-446.
/*
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
-
+
s->setBoolean( "x" , true );
ASSERT( s->getBoolean( "x" ) );
@@ -85,36 +85,36 @@ namespace JSTests {
*/
}
};
-
+
class FalseTests {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
ASSERT( ! s->getBoolean( "x" ) );
-
+
s->setString( "z" , "" );
ASSERT( ! s->getBoolean( "z" ) );
-
-
+
+
delete s ;
}
};
class SimpleFunctions {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
s->invoke( "x=5;" , BSONObj() );
ASSERT( 5 == s->getNumber( "x" ) );
-
+
s->invoke( "return 17;" , BSONObj() );
ASSERT( 17 == s->getNumber( "return" ) );
-
+
s->invoke( "function(){ return 17; }" , BSONObj() );
ASSERT( 17 == s->getNumber( "return" ) );
-
+
s->setNumber( "x" , 1.76 );
s->invoke( "return x == 1.76; " , BSONObj() );
ASSERT( s->getBoolean( "return" ) );
@@ -122,7 +122,7 @@ namespace JSTests {
s->setNumber( "x" , 1.76 );
s->invoke( "return x == 1.79; " , BSONObj() );
ASSERT( ! s->getBoolean( "return" ) );
-
+
s->invoke( "function( z ){ return 5 + z; }" , BSON( "" << 11 ) );
ASSERT_EQUALS( 16 , s->getNumber( "return" ) );
@@ -132,9 +132,9 @@ namespace JSTests {
class ObjectMapping {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" );
s->setObject( "blah" , o );
@@ -155,7 +155,7 @@ namespace JSTests {
s->invoke( "this.z == 'asara';" , BSONObj() );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
-
+
s->invoke( "return this.x == 17;" , BSONObj() );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
@@ -170,28 +170,28 @@ namespace JSTests {
s->invoke( "function (){ return this.x == 17; }" , BSONObj() );
ASSERT_EQUALS( true , s->getBoolean( "return" ) );
-
+
s->invoke( "function z(){ return this.x == 18; }" , BSONObj() );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
s->invoke( "function (){ this.x == 17; }" , BSONObj() );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
-
+
s->invoke( "function z(){ this.x == 18; }" , BSONObj() );
ASSERT_EQUALS( false , s->getBoolean( "return" ) );
s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , BSONObj() );
ASSERT_EQUALS( 10 , s->getNumber( "x" ) );
-
+
delete s;
}
};
class ObjectDecoding {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
s->invoke( "z = { num : 1 };" , BSONObj() );
BSONObj out = s->getObject( "z" );
ASSERT_EQUALS( 1 , out["num"].number() );
@@ -201,43 +201,43 @@ namespace JSTests {
out = s->getObject( "z" );
ASSERT_EQUALS( (string)"eliot" , out["x"].valuestr() );
ASSERT_EQUALS( 1 , out.nFields() );
-
+
BSONObj o = BSON( "x" << 17 );
- s->setObject( "blah" , o );
+ s->setObject( "blah" , o );
out = s->getObject( "blah" );
ASSERT_EQUALS( 17 , out["x"].number() );
-
+
delete s;
}
};
-
+
class JSOIDTests {
public:
- void run(){
+ void run() {
#ifdef MOZJS
Scope * s = globalScriptEngine->newScope();
-
+
s->localConnect( "blah" );
-
+
s->invoke( "z = { _id : new ObjectId() , a : 123 };" , BSONObj() );
BSONObj out = s->getObject( "z" );
ASSERT_EQUALS( 123 , out["a"].number() );
ASSERT_EQUALS( jstOID , out["_id"].type() );
-
+
OID save = out["_id"].__oid();
-
+
s->setObject( "a" , out );
-
- s->invoke( "y = { _id : a._id , a : 124 };" , BSONObj() );
+
+ s->invoke( "y = { _id : a._id , a : 124 };" , BSONObj() );
out = s->getObject( "y" );
ASSERT_EQUALS( 124 , out["a"].number() );
- ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
- s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , BSONObj() );
+ s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , BSONObj() );
out = s->getObject( "y" );
ASSERT_EQUALS( 125 , out["a"].number() );
- ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
delete s;
@@ -268,9 +268,9 @@ namespace JSTests {
class ObjectModReadonlyTests {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" << "zz" << BSONObj() );
s->setObject( "blah" , o , true );
@@ -289,16 +289,16 @@ namespace JSTests {
s->setObject( "blah.zz", BSON( "a" << 19 ) );
out = s->getObject( "blah" );
ASSERT( out["zz"].embeddedObject()["a"].eoo() );
-
+
s->invoke( "delete blah['x']" , BSONObj() );
out = s->getObject( "blah" );
ASSERT( !out["x"].eoo() );
-
+
// read-only object itself can be overwritten
s->invoke( "blah = {}", BSONObj() );
out = s->getObject( "blah" );
ASSERT( out.isEmpty() );
-
+
// test array - can't implement this in v8
// o = fromjson( "{a:[1,2,3]}" );
// s->setObject( "blah", o, true );
@@ -308,45 +308,47 @@ namespace JSTests {
// out = s->getObject( "blah" );
// ASSERT_EQUALS( 1.0, out[ "a" ].embeddedObject()[ 0 ].number() );
// ASSERT_EQUALS( 3.0, out[ "a" ].embeddedObject()[ 2 ].number() );
-
+
delete s;
}
};
class OtherJSTypes {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
- { // date
+
+ {
+ // date
BSONObj o;
- {
+ {
BSONObjBuilder b;
b.appendDate( "d" , 123456789 );
o = b.obj();
}
s->setObject( "x" , o );
-
+
s->invoke( "return x.d.getTime() != 12;" , BSONObj() );
ASSERT_EQUALS( true, s->getBoolean( "return" ) );
-
+
s->invoke( "z = x.d.getTime();" , BSONObj() );
ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) );
-
+
s->invoke( "z = { z : x.d }" , BSONObj() );
BSONObj out = s->getObject( "z" );
ASSERT( out["z"].type() == Date );
}
- { // regex
+ {
+ // regex
BSONObj o;
- {
+ {
BSONObjBuilder b;
b.appendRegex( "r" , "^a" , "i" );
o = b.obj();
}
s->setObject( "x" , o );
-
+
s->invoke( "z = x.r.test( 'b' );" , BSONObj() );
ASSERT_EQUALS( false , s->getBoolean( "z" ) );
@@ -363,26 +365,26 @@ namespace JSTests {
ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() );
}
-
+
// array
{
BSONObj o = fromjson( "{r:[1,2,3]}" );
- s->setObject( "x", o, false );
+ s->setObject( "x", o, false );
BSONObj out = s->getObject( "x" );
ASSERT_EQUALS( Array, out.firstElement().type() );
- s->setObject( "x", o, true );
+ s->setObject( "x", o, true );
out = s->getObject( "x" );
ASSERT_EQUALS( Array, out.firstElement().type() );
}
-
+
delete s;
}
};
class SpecialDBTypes {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
BSONObjBuilder b;
@@ -390,7 +392,7 @@ namespace JSTests {
b.appendMinKey( "b" );
b.appendMaxKey( "c" );
b.appendTimestamp( "d" , 1234000 , 9876 );
-
+
{
BSONObj t = b.done();
@@ -399,7 +401,7 @@ namespace JSTests {
}
s->setObject( "z" , b.obj() );
-
+
ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , BSONObj() ) == 0 );
BSONObj out = s->getObject( "y" );
@@ -415,14 +417,14 @@ namespace JSTests {
delete s;
}
};
-
+
class TypeConservation {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
// -- A --
-
+
BSONObj o;
{
BSONObjBuilder b ;
@@ -432,7 +434,7 @@ namespace JSTests {
}
ASSERT_EQUALS( NumberInt , o["a"].type() );
ASSERT_EQUALS( NumberDouble , o["b"].type() );
-
+
s->setObject( "z" , o );
s->invoke( "return z" , BSONObj() );
BSONObj out = s->getObject( "return" );
@@ -443,7 +445,7 @@ namespace JSTests {
ASSERT_EQUALS( NumberInt , out["a"].type() );
// -- B --
-
+
{
BSONObjBuilder b ;
b.append( "a" , (int)5 );
@@ -460,31 +462,31 @@ namespace JSTests {
ASSERT_EQUALS( NumberDouble , out["b"].type() );
ASSERT_EQUALS( NumberInt , out["a"].type() );
-
+
// -- C --
-
+
{
BSONObjBuilder b ;
-
+
{
BSONObjBuilder c;
c.append( "0" , 5.5 );
c.append( "1" , 6 );
b.appendArray( "a" , c.obj() );
}
-
+
o = b.obj();
}
-
+
ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() );
ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() );
-
+
s->setObject( "z" , o , false );
out = s->getObject( "z" );
ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
-
+
s->invokeSafe( "z.z = 5;" , BSONObj() );
out = s->getObject( "z" );
ASSERT_EQUALS( 5 , out["z"].number() );
@@ -494,9 +496,9 @@ namespace JSTests {
// Eliot says I don't have to worry about this case
-
+
// // -- D --
-//
+//
// o = fromjson( "{a:3.0,b:4.5}" );
// ASSERT_EQUALS( NumberDouble , o["a"].type() );
// ASSERT_EQUALS( NumberDouble , o["b"].type() );
@@ -506,16 +508,16 @@ namespace JSTests {
// out = s->getObject( "return" );
// ASSERT_EQUALS( 3 , out["a"].number() );
// ASSERT_EQUALS( 4.5 , out["b"].number() );
-//
+//
// ASSERT_EQUALS( NumberDouble , out["b"].type() );
// ASSERT_EQUALS( NumberDouble , out["a"].type() );
-//
-
+//
+
delete s;
}
-
+
};
-
+
class NumberLong {
public:
void run() {
@@ -528,7 +530,7 @@ namespace JSTests {
s->setObject( "a", in );
BSONObj out = s->getObject( "a" );
ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
-
+
ASSERT( s->exec( "printjson( a ); b = {b:a.a}", "foo", false, true, false ) );
out = s->getObject( "b" );
ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
@@ -538,7 +540,7 @@ namespace JSTests {
cout << out.toString() << endl;
ASSERT_EQUALS( val, out.firstElement().numberLong() );
}
-
+
ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
out = s->getObject( "c" );
stringstream ss;
@@ -553,12 +555,12 @@ namespace JSTests {
ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
out = s->getObject( "e" );
ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
- ASSERT_EQUALS( double( val ), out.firstElement().number() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
out = s->getObject( "f" );
ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
-
+
s->setObject( "z", BSON( "z" << (long long)( 4 ) ) );
ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) );
out = s->getObject( "y" );
@@ -567,13 +569,13 @@ namespace JSTests {
ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) );
out = s->getObject( "x" );
ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
- ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
+ ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) );
out = s->getObject( "w" );
ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
- ASSERT_EQUALS( 4, out.firstElement().numberLong() );
-
+ ASSERT_EQUALS( 4, out.firstElement().numberLong() );
+
}
};
@@ -582,7 +584,7 @@ namespace JSTests {
void run() {
auto_ptr<Scope> s( globalScriptEngine->newScope() );
s->localConnect( "blah" );
-
+
BSONObj in;
{
BSONObjBuilder b;
@@ -595,7 +597,7 @@ namespace JSTests {
in = b.obj();
}
s->setObject( "a" , in );
-
+
ASSERT( s->exec( "x = tojson( a ); " ,"foo" , false , true , false ) );
string outString = s->getString( "x" );
@@ -604,27 +606,27 @@ namespace JSTests {
ASSERT_EQUALS( in , out );
}
};
-
+
class WeirdObjects {
public:
- BSONObj build( int depth ){
+ BSONObj build( int depth ) {
BSONObjBuilder b;
b.append( "0" , depth );
if ( depth > 0 )
b.appendArray( "1" , build( depth - 1 ) );
return b.obj();
}
-
- void run(){
+
+ void run() {
Scope * s = globalScriptEngine->newScope();
s->localConnect( "blah" );
-
- for ( int i=5; i<100 ; i += 10 ){
+
+ for ( int i=5; i<100 ; i += 10 ) {
s->setObject( "a" , build(i) , false );
s->invokeSafe( "tojson( a )" , BSONObj() );
-
+
s->setObject( "a" , build(5) , true );
s->invokeSafe( "tojson( a )" , BSONObj() );
}
@@ -643,7 +645,7 @@ namespace JSTests {
}
DBDirectClient client;
-
+
class Utf8Check {
public:
Utf8Check() { reset(); }
@@ -668,7 +670,7 @@ namespace JSTests {
}
void reset() {
client.dropCollection( ns() );
- }
+ }
static const char *ns() { return "unittest.jstests.utf8check"; }
};
@@ -684,13 +686,13 @@ namespace JSTests {
private:
void reset() {
client.dropCollection( ns() );
- }
+ }
static const char *ns() { return "unittest.jstests.longutf8string"; }
};
class InvalidUTF8Check {
public:
- void run(){
+ void run() {
if( !globalScriptEngine->utf8Ok() )
return;
@@ -706,24 +708,24 @@ namespace JSTests {
crap[2] = (char) 128;
crap[3] = 17;
crap[4] = 0;
-
+
BSONObjBuilder bb;
bb.append( "x" , crap );
b = bb.obj();
}
-
+
//cout << "ELIOT: " << b.jsonString() << endl;
s->setThis( &b );
// its ok if this is handled by js, just can't create a c++ exception
- s->invoke( "x=this.x.length;" , BSONObj() );
+ s->invoke( "x=this.x.length;" , BSONObj() );
}
};
-
+
class CodeTests {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
{
BSONObjBuilder b;
b.append( "a" , 1 );
@@ -732,10 +734,10 @@ namespace JSTests {
b.appendCodeWScope( "d" , "function(){ out.d = 13 + bleh; }" , BSON( "bleh" << 5 ) );
s->setObject( "foo" , b.obj() );
}
-
+
s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , BSONObj() );
BSONObj out = s->getObject( "out" );
-
+
ASSERT_EQUALS( 1 , out["a"].number() );
ASSERT_EQUALS( 11 , out["b"].number() );
ASSERT_EQUALS( 12 , out["c"].number() );
@@ -744,7 +746,7 @@ namespace JSTests {
//s->invokeSafe( "foo.d() " , BSONObj() );
//out = s->getObject( "out" );
//ASSERT_EQUALS( 18 , out["d"].number() );
-
+
delete s;
}
@@ -752,19 +754,19 @@ namespace JSTests {
class DBRefTest {
public:
- DBRefTest(){
+ DBRefTest() {
_a = "unittest.dbref.a";
_b = "unittest.dbref.b";
reset();
}
- ~DBRefTest(){
+ ~DBRefTest() {
//reset();
}
-
- void run(){
+
+ void run() {
client.insert( _a , BSON( "a" << "17" ) );
-
+
{
BSONObj fromA = client.findOne( _a , BSONObj() );
assert( fromA.valid() );
@@ -774,28 +776,28 @@ namespace JSTests {
b.appendDBRef( "c" , "dbref.a" , fromA["_id"].__oid() );
client.insert( _b , b.obj() );
}
-
+
ASSERT( client.eval( "unittest" , "x = db.dbref.b.findOne(); assert.eq( 17 , x.c.fetch().a , 'ref working' );" ) );
-
+
// BSON DBRef <=> JS DBPointer
ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBPointer( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
ASSERT_EQUALS( DBRef, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
-
+
// BSON Object <=> JS DBRef
ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBRef( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
ASSERT_EQUALS( Object, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
ASSERT_EQUALS( string( "dbref.a" ), client.findOne( "unittest.dbref.b", "" )[ "c" ].embeddedObject().getStringField( "$ref" ) );
}
-
- void reset(){
+
+ void reset() {
client.dropCollection( _a );
client.dropCollection( _b );
}
-
+
const char * _a;
const char * _b;
};
-
+
class InformalDBRef {
public:
void run() {
@@ -805,20 +807,20 @@ namespace JSTests {
client.insert( ns(), BSON( "r" << BSON( "$ref" << "jstests.informaldbref" << "$id" << obj["_id"].__oid() << "foo" << "bar" ) ) );
obj = client.findOne( ns(), BSONObj() );
ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
-
+
ASSERT( client.eval( "unittest", "x = db.jstests.informaldbref.findOne(); y = { r:x.r }; db.jstests.informaldbref.drop(); y.r[ \"a\" ] = \"b\"; db.jstests.informaldbref.save( y );" ) );
obj = client.findOne( ns(), BSONObj() );
- ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
- ASSERT_EQUALS( "b", obj[ "r" ].embeddedObject()[ "a" ].str() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+ ASSERT_EQUALS( "b", obj[ "r" ].embeddedObject()[ "a" ].str() );
}
private:
static const char *ns() { return "unittest.jstests.informaldbref"; }
};
-
+
class BinDataType {
public:
-
- void pp( const char * s , BSONElement e ){
+
+ void pp( const char * s , BSONElement e ) {
int len;
const char * data = e.binData( len );
cout << s << ":" << e.binDataType() << "\t" << len << endl;
@@ -828,12 +830,12 @@ namespace JSTests {
cout << endl;
}
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
s->localConnect( "asd" );
const char * foo = "asdas\0asdasd";
const char * base64 = "YXNkYXMAYXNkYXNk";
-
+
BSONObj in;
{
BSONObjBuilder b;
@@ -842,10 +844,10 @@ namespace JSTests {
in = b.obj();
s->setObject( "x" , in );
}
-
+
s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , BSONObj() );
s->invokeSafe( "y = { c : myb };" , BSONObj() );
-
+
BSONObj out = s->getObject( "y" );
ASSERT_EQUALS( BinData , out["c"].type() );
// pp( "in " , in["b"] );
@@ -857,14 +859,14 @@ namespace JSTests {
stringstream expected;
expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
-
+
stringstream scriptBuilder;
scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };";
string script = scriptBuilder.str();
s->invokeSafe( script.c_str(), BSONObj() );
out = s->getObject( "z" );
// pp( "out" , out["c"] );
- ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", BSONObj() );
out = s->getObject( "a" );
@@ -872,16 +874,16 @@ namespace JSTests {
out[ "f" ].binData( len );
ASSERT_EQUALS( 0, len );
ASSERT_EQUALS( 128, out[ "f" ].binDataType() );
-
+
delete s;
}
};
class VarTests {
public:
- void run(){
+ void run() {
Scope * s = globalScriptEngine->newScope();
-
+
ASSERT( s->exec( "a = 5;" , "a" , false , true , false ) );
ASSERT_EQUALS( 5 , s->getNumber("a" ) );
@@ -893,19 +895,19 @@ namespace JSTests {
class Speed1 {
public:
- void run(){
+ void run() {
BSONObj start = BSON( "x" << 5 );
BSONObj empty;
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
-
+
ScriptingFunction f = s->createFunction( "return this.x + 6;" );
s->setThis( &start );
-
+
Timer t;
double n = 0;
- for ( ; n < 100000; n++ ){
+ for ( ; n < 100000; n++ ) {
s->invoke( f , empty );
ASSERT_EQUALS( 11 , s->getNumber( "return" ) );
}
@@ -915,10 +917,10 @@ namespace JSTests {
class ScopeOut {
public:
- void run(){
+ void run() {
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
-
+
s->invokeSafe( "x = 5;" , BSONObj() );
{
BSONObjBuilder b;
@@ -942,14 +944,14 @@ namespace JSTests {
class RenameTest {
public:
- void run(){
+ void run() {
auto_ptr<Scope> s;
s.reset( globalScriptEngine->newScope() );
-
+
s->setNumber( "x" , 5 );
ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
ASSERT_EQUALS( Undefined , s->type( "y" ) );
-
+
s->rename( "x" , "y" );
ASSERT_EQUALS( 5 , s->getNumber( "y" ) );
ASSERT_EQUALS( Undefined , s->type( "x" ) );
@@ -959,20 +961,20 @@ namespace JSTests {
ASSERT_EQUALS( Undefined , s->type( "y" ) );
}
};
-
+
class All : public Suite {
public:
All() : Suite( "js" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< Fundamental >();
add< BasicScope >();
add< ResetScope >();
add< FalseTests >();
add< SimpleFunctions >();
-
+
add< ObjectMapping >();
add< ObjectDecoding >();
add< JSOIDTests >();
@@ -990,9 +992,9 @@ namespace JSTests {
add< DBRefTest >();
add< InformalDBRef >();
add< BinDataType >();
-
+
add< VarTests >();
-
+
add< Speed1 >();
add< InvalidUTF8Check >();
@@ -1002,6 +1004,6 @@ namespace JSTests {
add< ScopeOut >();
}
} myall;
-
+
} // namespace JavaJSTests
diff --git a/dbtests/matchertests.cpp b/dbtests/matchertests.cpp
index e418343712b..380b8b802d4 100644
--- a/dbtests/matchertests.cpp
+++ b/dbtests/matchertests.cpp
@@ -37,26 +37,26 @@ namespace MatcherTests {
ASSERT( m.matches( fromjson( "{\"a\":\"b\"}" ) ) );
}
};
-
+
class DoubleEqual {
public:
void run() {
BSONObj query = fromjson( "{\"a\":5}" );
Matcher m( query );
- ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
}
};
-
+
class MixedNumericEqual {
public:
void run() {
BSONObjBuilder query;
query.append( "a", 5 );
Matcher m( query.done() );
- ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
- }
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ }
};
-
+
class MixedNumericGt {
public:
void run() {
@@ -65,16 +65,16 @@ namespace MatcherTests {
BSONObjBuilder b;
b.append( "a", 5 );
ASSERT( m.matches( b.done() ) );
- }
+ }
};
-
+
class MixedNumericIN {
public:
- void run(){
+ void run() {
BSONObj query = fromjson( "{ a : { $in : [4,6] } }" );
ASSERT_EQUALS( 4 , query["a"].embeddedObject()["$in"].embeddedObject()["0"].number() );
ASSERT_EQUALS( NumberInt , query["a"].embeddedObject()["$in"].embeddedObject()["0"].type() );
-
+
Matcher m( query );
{
@@ -95,19 +95,19 @@ namespace MatcherTests {
b.append( "a" , 4 );
ASSERT( m.matches( b.done() ) );
}
-
+
}
};
class MixedNumericEmbedded {
public:
- void run(){
+ void run() {
Matcher m( BSON( "a" << BSON( "x" << 1 ) ) );
ASSERT( m.matches( BSON( "a" << BSON( "x" << 1 ) ) ) );
ASSERT( m.matches( BSON( "a" << BSON( "x" << 1.0 ) ) ) );
}
};
-
+
class Size {
public:
void run() {
@@ -116,16 +116,16 @@ namespace MatcherTests {
ASSERT( !m.matches( fromjson( "{a:[1,2,3]}" ) ) );
ASSERT( !m.matches( fromjson( "{a:[1,2,3,'a','b']}" ) ) );
ASSERT( !m.matches( fromjson( "{a:[[1,2,3,4]]}" ) ) );
- }
+ }
};
-
-
+
+
class TimingBase {
public:
- long time( const BSONObj& patt , const BSONObj& obj ){
+ long time( const BSONObj& patt , const BSONObj& obj ) {
Matcher m( patt );
Timer t;
- for ( int i=0; i<10000; i++ ){
+ for ( int i=0; i<10000; i++ ) {
ASSERT( m.matches( obj ) );
}
return t.millis();
@@ -134,20 +134,20 @@ namespace MatcherTests {
class AllTiming : public TimingBase {
public:
- void run(){
+ void run() {
long normal = time( BSON( "x" << 5 ) , BSON( "x" << 5 ) );
long all = time( BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 ) ) ) , BSON( "x" << 5 ) );
-
+
cout << "normal: " << normal << " all: " << all << endl;
}
};
class All : public Suite {
public:
- All() : Suite( "matcher" ){
+ All() : Suite( "matcher" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< Basic >();
add< DoubleEqual >();
add< MixedNumericEqual >();
@@ -158,6 +158,6 @@ namespace MatcherTests {
add< AllTiming >();
}
} dball;
-
+
} // namespace MatcherTests
diff --git a/dbtests/mmaptests.cpp b/dbtests/mmaptests.cpp
index e83bf45d599..2773987d276 100755..100644
--- a/dbtests/mmaptests.cpp
+++ b/dbtests/mmaptests.cpp
@@ -26,15 +26,17 @@ namespace MMapTests {
class LeakTest {
const string fn;
public:
- LeakTest() :
- fn( (path(dbpath) / "testfile.map").string() )
+ LeakTest() :
+ fn( (path(dbpath) / "testfile.map").string() )
{ }
- ~LeakTest() {
- try { boost::filesystem::remove(fn); } catch(...) { }
+ ~LeakTest() {
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
}
void run() {
- try { boost::filesystem::remove(fn); } catch(...) { }
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
writelock lk;
@@ -48,7 +50,7 @@ namespace MMapTests {
// write something to the private view as a test
strcpy(p, "hello");
}
- if( cmdLine.dur ) {
+ if( cmdLine.dur ) {
char *w = (char *) f.view_write();
strcpy(w + 6, "world");
}
@@ -77,24 +79,24 @@ namespace MMapTests {
assert(p);
strcpy(p, "zzz");
}
- if( cmdLine.dur ) {
+ if( cmdLine.dur ) {
char *w = (char *) f.view_write();
if( i % 2 == 0 )
++(*w);
assert( w[6] == 'w' );
}
}
- if( t.millis() > 10000 ) {
+ if( t.millis() > 10000 ) {
log() << "warning: MMap LeakTest is unusually slow N:" << N << ' ' << t.millis() << "ms" << endl;
}
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "mmap" ){}
- void setupTests(){
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
add< LeakTest >();
}
} myall;
@@ -139,10 +141,10 @@ namespace MMapTests {
cout << "view unview: " << t.millis() << "ms" << endl;
}
- f.flush(true);
+ f.flush(true);
/* plain old mmaped writes */
- {
+ {
Timer t;
for( int i = 0; i < 10; i++ ) {
memset(p+100, 'c', 200 * 1024 * 1024);
@@ -150,10 +152,10 @@ namespace MMapTests {
cout << "traditional writes: " << t.millis() << "ms" << endl;
}
- f.flush(true);
+ f.flush(true);
/* test doing some writes */
- {
+ {
Timer t;
char *q = (char *) f.testGetCopyOnWriteView();
for( int i = 0; i < 10; i++ ) {
@@ -166,7 +168,7 @@ namespace MMapTests {
}
/* test doing some writes */
- {
+ {
Timer t;
for( int i = 0; i < 10; i++ ) {
char *q = (char *) f.testGetCopyOnWriteView();
@@ -179,7 +181,7 @@ namespace MMapTests {
}
/* more granular */
- {
+ {
Timer t;
for( int i = 0; i < 100; i++ ) {
char *q = (char *) f.testGetCopyOnWriteView();
@@ -189,17 +191,17 @@ namespace MMapTests {
}
cout << "more granular some writes: " << t.millis() << "ms" << endl;
- }
+ }
- p[10] = 0;
- cout << p << endl;
+ p[10] = 0;
+ cout << p << endl;
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "mmap" ){}
- void setupTests(){
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
add< CopyOnWriteSpeedTest >();
}
} myall;
diff --git a/dbtests/mockdbclient.h b/dbtests/mockdbclient.h
index 9119075b9ba..fda09630a18 100644
--- a/dbtests/mockdbclient.h
+++ b/dbtests/mockdbclient.h
@@ -64,8 +64,8 @@ public:
virtual void afterCommand() {}
};
DirectDBClientConnection( ReplPair *rp, ConnectionCallback *cc = 0 ) :
- rp_( rp ),
- cc_( cc ) {
+ rp_( rp ),
+ cc_( cc ) {
}
virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
BSONObj c = query.obj.copy();
diff --git a/dbtests/namespacetests.cpp b/dbtests/namespacetests.cpp
index 3ebb8d1d34b..c2be0b0439e 100644
--- a/dbtests/namespacetests.cpp
+++ b/dbtests/namespacetests.cpp
@@ -32,7 +32,7 @@ namespace NamespaceTests {
dblock lk;
Client::Context _context;
public:
- Base() : _context(ns()){
+ Base() : _context(ns()) {
}
virtual ~Base() {
if ( id_.info.isNull() )
@@ -323,7 +323,7 @@ namespace NamespaceTests {
return k.obj();
}
};
-
+
class ArraySubobjectSingleMissing : public Base {
public:
void run() {
@@ -336,7 +336,7 @@ namespace NamespaceTests {
elts.push_back( simpleBC( i ) );
BSONObjBuilder b;
b.append( "a", elts );
-
+
BSONObjSetDefaultOrder keys;
id().getKeysFromObject( b.done(), keys );
checkSize( 4, keys );
@@ -353,7 +353,7 @@ namespace NamespaceTests {
return aDotB();
}
};
-
+
class ArraySubobjectMissing : public Base {
public:
void run() {
@@ -376,7 +376,7 @@ namespace NamespaceTests {
return aDotB();
}
};
-
+
class MissingField : public Base {
public:
void run() {
@@ -391,7 +391,7 @@ namespace NamespaceTests {
return BSON( "a" << 1 );
}
};
-
+
class SubobjectMissing : public Base {
public:
void run() {
@@ -406,12 +406,12 @@ namespace NamespaceTests {
return aDotB();
}
};
-
+
class CompoundMissing : public Base {
public:
- void run(){
+ void run() {
create();
-
+
{
BSONObjSetDefaultOrder keys;
id().getKeysFromObject( fromjson( "{x:'a',y:'b'}" ) , keys );
@@ -428,16 +428,16 @@ namespace NamespaceTests {
b.appendNull( "" );
assertEquals( b.obj() , *keys.begin() );
}
-
+
}
private:
virtual BSONObj key() const {
return BSON( "x" << 1 << "y" << 1 );
}
-
+
};
-
+
class ArraySubelementComplex : public Base {
public:
void run() {
@@ -508,17 +508,17 @@ namespace NamespaceTests {
return aDotB();
}
};
-
+
class EmptyArray : Base {
public:
- void run(){
+ void run() {
create();
BSONObjSetDefaultOrder keys;
id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
checkSize(2, keys );
keys.clear();
-
+
id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
checkSize(1, keys );
keys.clear();
@@ -535,14 +535,14 @@ namespace NamespaceTests {
class MultiEmptyArray : Base {
public:
- void run(){
+ void run() {
create();
BSONObjSetDefaultOrder keys;
id().getKeysFromObject( fromjson( "{a:1,b:[1,2]}" ), keys );
checkSize(2, keys );
keys.clear();
-
+
id().getKeysFromObject( fromjson( "{a:1,b:[1]}" ), keys );
checkSize(1, keys );
keys.clear();
@@ -551,7 +551,7 @@ namespace NamespaceTests {
//cout << "YO : " << *(keys.begin()) << endl;
checkSize(1, keys );
keys.clear();
-
+
id().getKeysFromObject( fromjson( "{a:1,b:[]}" ), keys );
checkSize(1, keys );
//cout << "YO : " << *(keys.begin()) << endl;
@@ -600,7 +600,7 @@ namespace NamespaceTests {
if ( fileNo == -1 )
continue;
for ( int j = i.ext()->firstRecord.getOfs(); j != DiskLoc::NullOfs;
- j = DiskLoc( fileNo, j ).rec()->nextOfs ) {
+ j = DiskLoc( fileNo, j ).rec()->nextOfs ) {
++count;
}
}
@@ -700,7 +700,7 @@ namespace NamespaceTests {
}
};
- /* test NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc loc)
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc loc)
*/
class TruncateCapped : public Base {
virtual string spec() const {
@@ -820,15 +820,15 @@ namespace NamespaceTests {
ASSERT_EQUALS( 496U, sizeof( NamespaceDetails ) );
}
};
-
+
} // namespace NamespaceDetailsTests
class All : public Suite {
public:
- All() : Suite( "namespace" ){
+ All() : Suite( "namespace" ) {
}
- void setupTests(){
+ void setupTests() {
add< IndexDetailsTests::Create >();
add< IndexDetailsTests::GetKeysFromObjectSimple >();
add< IndexDetailsTests::GetKeysFromObjectDotted >();
diff --git a/dbtests/pairingtests.cpp b/dbtests/pairingtests.cpp
index 68d4c0ee215..9cca5480efe 100644
--- a/dbtests/pairingtests.cpp
+++ b/dbtests/pairingtests.cpp
@@ -37,7 +37,7 @@ namespace PairingTests {
~Base() {
pairSync = backup;
dblock lk;
- Helpers::emptyCollection( "local.pair.sync" );
+ Helpers::emptyCollection( "local.pair.sync" );
if ( pairSync->initialSyncCompleted() ) {
// save to db
pairSync->setInitialSyncCompleted();
@@ -63,7 +63,7 @@ namespace PairingTests {
private:
static void init() {
dblock lk;
- Helpers::emptyCollection( "local.pair.sync" );
+ Helpers::emptyCollection( "local.pair.sync" );
if ( synced != 0 && notSynced != 0 )
return;
notSynced = new PairSync();
@@ -71,7 +71,7 @@ namespace PairingTests {
synced = new PairSync();
synced->init();
synced->setInitialSyncCompleted();
- Helpers::emptyCollection( "local.pair.sync" );
+ Helpers::emptyCollection( "local.pair.sync" );
}
PairSync *backup;
static PairSync *synced;
@@ -199,24 +199,24 @@ namespace PairingTests {
TestableReplPair rp4( true, fromjson( "{ok:1,you_are:1}" ) );
rp4.arbitrate();
- ASSERT( rp4.state == ReplPair::State_Master );
+ ASSERT( rp4.state == ReplPair::State_Master );
TestableReplPair rp5( true, fromjson( "{ok:1,you_are:0}" ) );
rp5.arbitrate();
- ASSERT( rp5.state == ReplPair::State_Slave );
+ ASSERT( rp5.state == ReplPair::State_Slave );
TestableReplPair rp6( true, fromjson( "{ok:1,you_are:-1}" ) );
rp6.arbitrate();
// unchanged from initial value
- ASSERT( rp6.state == ReplPair::State_Negotiating );
+ ASSERT( rp6.state == ReplPair::State_Negotiating );
}
private:
class TestableReplPair : public ReplPair {
public:
TestableReplPair( bool connect, const BSONObj &one ) :
- ReplPair( "a", "z" ),
- connect_( connect ),
- one_( one ) {
+ ReplPair( "a", "z" ),
+ connect_( connect ),
+ one_( one ) {
}
virtual
DBClientConnection *newClientConnection() const {
@@ -326,10 +326,10 @@ namespace PairingTests {
class All : public Suite {
public:
- All() : Suite( "pairing" ){
+ All() : Suite( "pairing" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< ReplPairTests::Create >();
add< ReplPairTests::Dominant >();
add< ReplPairTests::SetMaster >();
diff --git a/dbtests/pdfiletests.cpp b/dbtests/pdfiletests.cpp
index 1d02a1a175e..3345ba520ee 100644
--- a/dbtests/pdfiletests.cpp
+++ b/dbtests/pdfiletests.cpp
@@ -31,7 +31,7 @@ namespace PdfileTests {
class Base {
public:
- Base() : _context( ns() ){
+ Base() : _context( ns() ) {
}
virtual ~Base() {
if ( !nsd() )
@@ -268,11 +268,11 @@ namespace PdfileTests {
};
} // namespace ScanCapped
-
+
namespace Insert {
class Base {
public:
- Base() : _context( ns() ){
+ Base() : _context( ns() ) {
}
virtual ~Base() {
if ( !nsd() )
@@ -291,7 +291,7 @@ namespace PdfileTests {
dblock lk_;
Client::Context _context;
};
-
+
class UpdateDate : public Base {
public:
void run() {
@@ -308,33 +308,33 @@ namespace PdfileTests {
class ExtentSizing {
public:
struct SmallFilesControl {
- SmallFilesControl(){
+ SmallFilesControl() {
old = cmdLine.smallfiles;
cmdLine.smallfiles = false;
}
- ~SmallFilesControl(){
+ ~SmallFilesControl() {
cmdLine.smallfiles = old;
}
bool old;
};
- void run(){
+ void run() {
SmallFilesControl c;
// test that no matter what we start with, we always get to max extent size
- for ( int obj=16; obj<BSONObjMaxUserSize; obj += 111 ){
+ for ( int obj=16; obj<BSONObjMaxUserSize; obj += 111 ) {
int sz = Extent::initialSize( obj );
- for ( int i=0; i<100; i++ ){
+ for ( int i=0; i<100; i++ ) {
sz = Extent::followupSize( obj , sz );
}
ASSERT_EQUALS( Extent::maxSize() , sz );
}
}
};
-
+
class ExtentAllocOrder {
public:
- void run(){
+ void run() {
string dbname = "unittest_ex";
-
+
string c1 = dbname + ".x1";
string c2 = dbname + ".x2";
@@ -345,23 +345,23 @@ namespace PdfileTests {
dblock mylock;
Client::Context cx( dbname );
-
+
bool isnew;
Database * d = dbHolder.getOrCreate( dbname , dbpath , isnew );
assert( d );
int big = 10 * 1024;
//int small = 1024;
-
+
unsigned long long l = 0;
- while ( 1 ){
+ while ( 1 ) {
MongoDataFile * f = d->addAFile( big , false );
cout << f->length() << endl;
if ( f->length() == l )
break;
l = f->length();
}
-
+
int start = d->numFiles();
for ( int i=0; i<start; i++ )
d->allocExtent( c1.c_str() , d->getFile( i )->getHeader()->unusedLength , false );
@@ -374,12 +374,12 @@ namespace PdfileTests {
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "pdfile" ){}
-
- void setupTests(){
+ All() : Suite( "pdfile" ) {}
+
+ void setupTests() {
add< ScanCapped::Empty >();
add< ScanCapped::EmptyLooped >();
add< ScanCapped::EmptyMultiExtentLooped >();
diff --git a/dbtests/perf/btreeperf.cpp b/dbtests/perf/btreeperf.cpp
index 005cf66d819..7d68d8f5cc7 100644
--- a/dbtests/perf/btreeperf.cpp
+++ b/dbtests/perf/btreeperf.cpp
@@ -58,7 +58,7 @@ protected:
* Helper functions for converting a sample value to a sample object with
* specified _id, to be inserted or removed.
*/
-
+
template< class T >
BSONObj insertObjWithVal( const T &val ) {
BSONObjBuilder b;
@@ -177,7 +177,7 @@ protected:
b2.append( "$gte", val );
b2.done();
return b1.obj();
- }
+ }
virtual T insertVal() = 0;
virtual T removeVal() = 0;
};
@@ -190,8 +190,8 @@ protected:
class UniformInsertRangedUniformRemoveInteger : public InsertAndRangedRemoveStrategy< long long > {
public:
UniformInsertRangedUniformRemoveInteger() :
- _uniform_int( 0ULL, ~0ULL ),
- _nextLongLong( randomNumberGenerator, _uniform_int ) {
+ _uniform_int( 0ULL, ~0ULL ),
+ _nextLongLong( randomNumberGenerator, _uniform_int ) {
}
/** Small likelihood of duplicates */
virtual long long insertVal() { return _nextLongLong(); }
@@ -216,10 +216,10 @@ private:
class UniformInsertRangedUniformRemoveString : public InsertAndRangedRemoveStrategy< string > {
public:
UniformInsertRangedUniformRemoveString() :
- _geometric_distribution( 0.9 ),
- _nextLength( randomNumberGenerator, _geometric_distribution ),
- _uniform_char( 'a', 'z' ),
- _nextChar( randomNumberGenerator, _uniform_char ) {
+ _geometric_distribution( 0.9 ),
+ _nextLength( randomNumberGenerator, _geometric_distribution ),
+ _uniform_char( 'a', 'z' ),
+ _nextChar( randomNumberGenerator, _uniform_char ) {
}
/** Small likelihood of duplicates */
virtual string insertVal() { return nextString(); }
@@ -256,7 +256,7 @@ private:
class IncreasingInsertRangedUniformRemoveOID : public InsertAndRangedRemoveStrategy< OID > {
public:
IncreasingInsertRangedUniformRemoveOID() :
- _max( -1 ) {
+ _max( -1 ) {
}
virtual OID insertVal() { return oidFromULL( ++_max ); }
virtual OID removeVal() {
@@ -290,10 +290,10 @@ private:
class IncreasingInsertIncreasingRemoveInteger : public InsertAndRemoveStrategy {
public:
IncreasingInsertIncreasingRemoveInteger() :
- // Start with a large value so data type will be preserved if we round
- // trip through json.
- _min( 1LL << 32 ),
- _max( 1LL << 32 ) {
+ // Start with a large value so data type will be preserved if we round
+ // trip through json.
+ _min( 1LL << 32 ),
+ _max( 1LL << 32 ) {
}
virtual BSONObj insertObj() { return insertObjWithVal( ++_max ); }
virtual BSONObj removeObj() { return removeObjWithVal( _min < _max ? ++_min : _min ); }
@@ -311,27 +311,28 @@ public:
* specify 5 for this argument.
*/
BernoulliGenerator( int excessFalsePercent ) :
- _bernoulli_distribution( 1.0 / ( 2.0 + excessFalsePercent / 100.0 ) ),
- _generator( randomNumberGenerator, _bernoulli_distribution ) {
+ _bernoulli_distribution( 1.0 / ( 2.0 + excessFalsePercent / 100.0 ) ),
+ _generator( randomNumberGenerator, _bernoulli_distribution ) {
}
bool operator()() { return _generator(); }
private:
bernoulli_distribution<> _bernoulli_distribution;
- variate_generator< mt19937&, bernoulli_distribution<> > _generator;
+ variate_generator< mt19937&, bernoulli_distribution<> > _generator;
};
/** Runs a strategy on a connection, with specified mix of inserts and removes. */
class InsertAndRemoveRunner {
public:
InsertAndRemoveRunner( DBClientConnection &conn, InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
- _conn( conn ),
- _strategy( strategy ),
- _nextOpTypeRemove( excessInsertPercent ) {
+ _conn( conn ),
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
}
void writeOne() {
if ( _nextOpTypeRemove() ) {
_conn.remove( ns, _strategy.removeObj(), true );
- } else {
+ }
+ else {
_conn.insert( ns, _strategy.insertObj() );
}
}
@@ -356,14 +357,15 @@ private:
class InsertAndRemoveScriptGenerator {
public:
InsertAndRemoveScriptGenerator( InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
- _strategy( strategy ),
- _nextOpTypeRemove( excessInsertPercent ) {
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
}
void writeOne() {
if ( _nextOpTypeRemove() ) {
- cout << "r " << _strategy.removeObj().jsonString() << endl;
- } else {
- cout << "i " << _strategy.insertObj().jsonString() << endl;
+ cout << "r " << _strategy.removeObj().jsonString() << endl;
+ }
+ else {
+ cout << "i " << _strategy.insertObj().jsonString() << endl;
}
}
private:
@@ -379,14 +381,15 @@ private:
class InsertAndRemoveScriptRunner {
public:
InsertAndRemoveScriptRunner( DBClientConnection &conn ) :
- _conn( conn ) {
+ _conn( conn ) {
}
void writeOne() {
cin.getline( _buf, 1024 );
BSONObj val = fromjson( _buf + 2 );
if ( _buf[ 0 ] == 'r' ) {
_conn.remove( ns, val, true );
- } else {
+ }
+ else {
_conn.insert( ns, val );
}
}
@@ -396,7 +399,7 @@ private:
};
int main( int argc, const char **argv ) {
-
+
DBClientConnection conn;
conn.connect( "127.0.0.1:27017" );
conn.dropCollection( ns );
@@ -410,13 +413,13 @@ int main( int argc, const char **argv ) {
// IncreasingInsertIncreasingRemoveInteger strategy;
// InsertAndRemoveScriptGenerator runner( strategy, 5 );
InsertAndRemoveScriptRunner runner( conn );
-
+
Timer t;
BSONObj statsCmd = BSON( "collstats" << index_collection );
-
+
// Print header, unless we are generating a script (in that case, comment this out).
cout << "ops,milliseconds,docs,totalBucketSize" << endl;
-
+
long long i = 0;
long long n = 10000000000;
while( i < n ) {
diff --git a/dbtests/perf/perftest.cpp b/dbtests/perf/perftest.cpp
index f86a1c3235f..e233f5c3d21 100644
--- a/dbtests/perf/perftest.cpp
+++ b/dbtests/perf/perftest.cpp
@@ -81,7 +81,7 @@ public:
class RunnerSuite : public Suite {
public:
- RunnerSuite( string name ) : Suite( name ){}
+ RunnerSuite( string name ) : Suite( name ) {}
protected:
template< class T >
void add() {
@@ -168,9 +168,9 @@ namespace Insert {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "insert" ){}
+ All() : RunnerSuite( "insert" ) {}
- void setupTests(){
+ void setupTests() {
add< IdIndex >();
add< TwoIndex >();
add< TenIndex >();
@@ -252,8 +252,8 @@ namespace Update {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "update" ){}
- void setupTests(){
+ All() : RunnerSuite( "update" ) {}
+ void setupTests() {
add< Smaller >();
add< Bigger >();
add< Inc >();
@@ -266,33 +266,33 @@ namespace Update {
namespace BSON {
const char *sample =
- "{\"one\":2, \"two\":5, \"three\": {},"
- "\"four\": { \"five\": { \"six\" : 11 } },"
- "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
- "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
- "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
- "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"02\" },"
- "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }";
+ "{\"one\":2, \"two\":5, \"three\": {},"
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"02\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }";
const char *shopwikiSample =
- "{ '_id' : '289780-80f85380b5c1d4a0ad75d1217673a4a2' , 'site_id' : 289780 , 'title'"
- ": 'Jubilee - Margaret Walker' , 'image_url' : 'http://www.heartlanddigsandfinds.c"
- "om/store/graphics/Product_Graphics/Product_8679.jpg' , 'url' : 'http://www.heartla"
- "nddigsandfinds.com/store/store_product_detail.cfm?Product_ID=8679&Category_ID=2&Su"
- "b_Category_ID=910' , 'url_hash' : 3450626119933116345 , 'last_update' : null , '"
- "features' : { '$imagePrefetchDate' : '2008Aug30 22:39' , '$image.color.rgb' : '5a7"
- "574' , 'Price' : '$10.99' , 'Description' : 'Author--s 1st Novel. A Houghton Miffl"
- "in Literary Fellowship Award novel by the esteemed poet and novelist who has demon"
- "strated a lifelong commitment to the heritage of black culture. An acclaimed story"
- "of Vyry, a negro slave during the 19th Century, facing the biggest challenge of h"
- "er lifetime - that of gaining her freedom, fighting for all the things she had nev"
- "er known before. The author, great-granddaughter of Vyry, reveals what the Civil W"
- "ar in America meant to the Negroes. Slavery W' , '$priceHistory-1' : '2008Dec03 $1"
- "0.99' , 'Brand' : 'Walker' , '$brands_in_title' : 'Walker' , '--path' : '//HTML[1]"
- "/BODY[1]/TABLE[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]/TD[1]/TABLE[1]/TR[2]/TD[2]/TABLE"
- "[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]' , '~location' : 'en_US' , '$crawled' : '2009J"
- "an11 03:22' , '$priceHistory-2' : '2008Nov15 $10.99' , '$priceHistory-0' : '2008De"
- "c24 $10.99'}}";
+ "{ '_id' : '289780-80f85380b5c1d4a0ad75d1217673a4a2' , 'site_id' : 289780 , 'title'"
+ ": 'Jubilee - Margaret Walker' , 'image_url' : 'http://www.heartlanddigsandfinds.c"
+ "om/store/graphics/Product_Graphics/Product_8679.jpg' , 'url' : 'http://www.heartla"
+ "nddigsandfinds.com/store/store_product_detail.cfm?Product_ID=8679&Category_ID=2&Su"
+ "b_Category_ID=910' , 'url_hash' : 3450626119933116345 , 'last_update' : null , '"
+ "features' : { '$imagePrefetchDate' : '2008Aug30 22:39' , '$image.color.rgb' : '5a7"
+ "574' , 'Price' : '$10.99' , 'Description' : 'Author--s 1st Novel. A Houghton Miffl"
+ "in Literary Fellowship Award novel by the esteemed poet and novelist who has demon"
+ "strated a lifelong commitment to the heritage of black culture. An acclaimed story"
+ "of Vyry, a negro slave during the 19th Century, facing the biggest challenge of h"
+ "er lifetime - that of gaining her freedom, fighting for all the things she had nev"
+ "er known before. The author, great-granddaughter of Vyry, reveals what the Civil W"
+ "ar in America meant to the Negroes. Slavery W' , '$priceHistory-1' : '2008Dec03 $1"
+ "0.99' , 'Brand' : 'Walker' , '$brands_in_title' : 'Walker' , '--path' : '//HTML[1]"
+ "/BODY[1]/TABLE[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]/TD[1]/TABLE[1]/TR[2]/TD[2]/TABLE"
+ "[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]' , '~location' : 'en_US' , '$crawled' : '2009J"
+ "an11 03:22' , '$priceHistory-2' : '2008Nov15 $10.99' , '$priceHistory-0' : '2008De"
+ "c24 $10.99'}}";
class Parse {
public:
@@ -332,8 +332,8 @@ namespace BSON {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "bson" ){}
- void setupTests(){
+ All() : RunnerSuite( "bson" ) {}
+ void setupTests() {
add< Parse >();
add< ShopwikiParse >();
add< Json >();
@@ -402,8 +402,8 @@ namespace Index {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "index" ){}
- void setupTests(){
+ All() : RunnerSuite( "index" ) {}
+ void setupTests() {
add< Int >();
add< ObjectId >();
add< String >();
@@ -435,7 +435,7 @@ namespace QueryTests {
}
void run() {
client_->findOne( ns_.c_str(),
- QUERY( "a" << "b" ).hint( BSON( "_id" << 1 ) ) );
+ QUERY( "a" << "b" ).hint( BSON( "_id" << 1 ) ) );
}
string ns_;
};
@@ -465,7 +465,7 @@ namespace QueryTests {
}
void run() {
auto_ptr< DBClientCursor > c =
- client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
int i = 0;
for( ; c->more(); c->nextSafe(), ++i );
ASSERT_EQUALS( 50000, i );
@@ -481,7 +481,7 @@ namespace QueryTests {
}
void run() {
auto_ptr< DBClientCursor > c =
- client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
int i = 0;
for( ; c->more(); c->nextSafe(), ++i );
ASSERT_EQUALS( 50000, i );
@@ -541,8 +541,8 @@ namespace QueryTests {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "query" ){}
- void setupTests(){
+ All() : RunnerSuite( "query" ) {}
+ void setupTests() {
add< NoMatch >();
add< NoMatchIndex >();
add< NoMatchLong >();
@@ -602,8 +602,8 @@ namespace Count {
class All : public RunnerSuite {
public:
- All() : RunnerSuite( "count" ){}
- void setupTests(){
+ All() : RunnerSuite( "count" ) {}
+ void setupTests() {
add< Count >();
add< CountIndex >();
add< CountSimpleIndex >();
@@ -677,8 +677,8 @@ namespace Plan {
class All : public RunnerSuite {
public:
- All() : RunnerSuite("plan" ){}
- void setupTests(){
+ All() : RunnerSuite("plan" ) {}
+ void setupTests() {
add< Hint >();
add< Sort >();
add< Query >();
diff --git a/dbtests/perftests.cpp b/dbtests/perftests.cpp
index 2b59aca646b..980fab0dbf6 100644
--- a/dbtests/perftests.cpp
+++ b/dbtests/perftests.cpp
@@ -1,6 +1,6 @@
/** @file perftests.cpp.cpp : unit tests relating to performance
- The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
+ The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
a long time to run. Obviously we need those too, but they will be separate.
These tests use DBDirectClient; they are a bit white-boxish.
@@ -65,14 +65,14 @@ namespace PerfTests {
DBClientType ClientBase::_client;
// todo: use a couple threads. not a very good test yet.
- class DefInvoke {
+ class DefInvoke {
static int tot;
- struct V {
+ struct V {
int val;
static void go(const V &v) { tot += v.val; }
};
public:
- void run() {
+ void run() {
tot = 0;
TaskQueue<V> d;
int x = 0;
@@ -92,11 +92,10 @@ namespace PerfTests {
};
int DefInvoke::tot;
- class CappedTest : public ClientBase {
+ class CappedTest : public ClientBase {
};
- class B : public ClientBase
- {
+ class B : public ClientBase {
string _ns;
protected:
const char *ns() { return _ns.c_str(); }
@@ -113,11 +112,11 @@ namespace PerfTests {
virtual unsigned long long expectation() = 0;
virtual int howLongMillis() { return 5000; }
public:
- void say(unsigned long long n, int ms, string s) {
+ void say(unsigned long long n, int ms, string s) {
cout << setw(36) << left << s << ' ' << right << setw(7) << n*1000/ms << "/sec " << setw(4) << ms << "ms" << endl;
cout << dur::stats.curr->_asObj().toString() << endl;
}
- void run() {
+ void run() {
_ns = string("perftest.") + name();
client().dropCollection(ns());
@@ -130,17 +129,18 @@ namespace PerfTests {
Timer t;
unsigned long long n = 0;
const unsigned Batch = 50;
- do {
+ do {
unsigned i;
for( i = 0; i < Batch; i++ )
timed();
n += i;
- } while( t.millis() < hlm );
+ }
+ while( t.millis() < hlm );
client().getLastError(); // block until all ops are finished
int ms = t.millis();
say(n, ms, name());
- if( n < expectation() ) {
+ if( n < expectation() ) {
cout << "\ntest " << name() << " seems slow n:" << n << " ops/sec but expect greater than:" << expectation() << endl;
cout << endl;
}
@@ -151,12 +151,12 @@ namespace PerfTests {
dur::stats.curr->reset();
Timer t;
unsigned long long n = 0;
- while( 1 ) {
+ while( 1 ) {
unsigned i;
for( i = 0; i < Batch; i++ )
timed2();
n += i;
- if( t.millis() > hlm )
+ if( t.millis() > hlm )
break;
}
int ms = t.millis();
@@ -166,14 +166,14 @@ namespace PerfTests {
}
};
- class InsertDup : public B {
+ class InsertDup : public B {
const BSONObj o;
public:
InsertDup() : o( BSON("_id" << 1) ) { } // dup keys
- string name() {
- return "insert duplicate _ids";
+ string name() {
+ return "insert duplicate _ids";
}
- void prep() {
+ void prep() {
client().insert( ns(), o );
}
void timed() {
@@ -184,8 +184,8 @@ namespace PerfTests {
}
unsigned long long expectation() { return 1000; }
};
-
- class Insert1 : public InsertDup {
+
+ class Insert1 : public InsertDup {
const BSONObj x;
public:
Insert1() : x( BSON("x" << 99) ) { }
@@ -199,10 +199,10 @@ namespace PerfTests {
unsigned long long expectation() { return 1000; }
};
- class InsertBig : public InsertDup {
+ class InsertBig : public InsertDup {
BSONObj x;
- virtual int howLongMillis() {
- if( sizeof(void*) == 4 )
+ virtual int howLongMillis() {
+ if( sizeof(void*) == 4 )
return 1000; // could exceed mmapping if run too long, as this function adds a lot fasta
return 5000;
}
@@ -221,10 +221,10 @@ namespace PerfTests {
unsigned long long expectation() { return 20; }
};
- class InsertRandom : public B {
+ class InsertRandom : public B {
public:
string name() { return "random inserts"; }
- void prep() {
+ void prep() {
client().insert( ns(), BSONObj() );
client().ensureIndex(ns(), BSON("x"<<1));
}
@@ -237,17 +237,17 @@ namespace PerfTests {
}
unsigned long long expectation() { return 1000; }
};
-
- /** upserts about 32k records and then keeps updating them
+
+ /** upserts about 32k records and then keeps updating them
2 indexes
*/
- class Update1 : public B {
+ class Update1 : public B {
public:
- static int rand() {
+ static int rand() {
return std::rand() & 0x7fff;
}
string name() { return "random upserts"; }
- void prep() {
+ void prep() {
client().insert( ns(), BSONObj() );
client().ensureIndex(ns(), BSON("x"<<1));
}
@@ -274,12 +274,12 @@ namespace PerfTests {
}
unsigned long long expectation() { return 1000; }
};
-
+
template <typename T>
- class MoreIndexes : public T {
+ class MoreIndexes : public T {
public:
string name() { return T::name() + " with more indexes"; }
- void prep() {
+ void prep() {
T::prep();
this->client().ensureIndex(this->ns(), BSON("y"<<1));
this->client().ensureIndex(this->ns(), BSON("z"<<1));
@@ -290,7 +290,7 @@ namespace PerfTests {
public:
All() : Suite( "perf" ) {
}
- void setupTests(){
+ void setupTests() {
add< DefInvoke >();
add< InsertDup >();
add< Insert1 >();
diff --git a/dbtests/queryoptimizertests.cpp b/dbtests/queryoptimizertests.cpp
index 7c411b286a2..acf92174be3 100644
--- a/dbtests/queryoptimizertests.cpp
+++ b/dbtests/queryoptimizertests.cpp
@@ -27,12 +27,12 @@
namespace mongo {
extern BSONObj id_obj;
- void runQuery(Message& m, QueryMessage& q, Message &response ){
+ void runQuery(Message& m, QueryMessage& q, Message &response ) {
CurOp op( &(cc()) );
op.ensureStarted();
runQuery( m , q , op, response );
}
- void runQuery(Message& m, QueryMessage& q ){
+ void runQuery(Message& m, QueryMessage& q ) {
Message response;
runQuery( m, q, response );
}
@@ -64,14 +64,14 @@ namespace QueryOptimizerTests {
}
}
};
-
+
class NumericBase : public Base {
public:
- NumericBase(){
+ NumericBase() {
o = BSON( "min" << -numeric_limits<double>::max() << "max" << numeric_limits<double>::max() );
}
-
+
virtual BSONElement lower() { return o["min"]; }
virtual BSONElement upper() { return o["max"]; }
private:
@@ -81,7 +81,7 @@ namespace QueryOptimizerTests {
class Empty : public Base {
virtual BSONObj query() { return BSONObj(); }
};
-
+
class Eq : public Base {
public:
Eq() : o_( BSON( "a" << 1 ) ) {}
@@ -94,7 +94,7 @@ namespace QueryOptimizerTests {
class DupEq : public Eq {
public:
virtual BSONObj query() { return BSON( "a" << 1 << "b" << 2 << "a" << 1 ); }
- };
+ };
class Lt : public NumericBase {
public:
@@ -103,13 +103,13 @@ namespace QueryOptimizerTests {
virtual BSONElement upper() { return o_.firstElement(); }
virtual bool upperInclusive() { return false; }
BSONObj o_;
- };
+ };
class Lte : public Lt {
- virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
+ virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
virtual bool upperInclusive() { return true; }
};
-
+
class Gt : public NumericBase {
public:
Gt() : o_( BSON( "-" << 1 ) ) {}
@@ -117,23 +117,23 @@ namespace QueryOptimizerTests {
virtual BSONElement lower() { return o_.firstElement(); }
virtual bool lowerInclusive() { return false; }
BSONObj o_;
- };
-
+ };
+
class Gte : public Gt {
- virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
+ virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
virtual bool lowerInclusive() { return true; }
};
-
+
class TwoLt : public Lt {
- virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
+ virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
};
class TwoGt : public Gt {
- virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
- };
+ virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
+ };
class EqGte : public Eq {
- virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
+ virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
};
class EqGteInvalid {
@@ -142,7 +142,7 @@ namespace QueryOptimizerTests {
FieldRangeSet fbs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ) );
ASSERT( !fbs.matchPossible() );
}
- };
+ };
struct RegexBase : Base {
void run() { //need to only look at first interval
@@ -166,7 +166,7 @@ namespace QueryOptimizerTests {
virtual BSONElement upper() { return o2_.firstElement(); }
virtual bool upperInclusive() { return false; }
BSONObj o1_, o2_;
- };
+ };
class RegexObj : public RegexBase {
public:
@@ -177,7 +177,7 @@ namespace QueryOptimizerTests {
virtual bool upperInclusive() { return false; }
BSONObj o1_, o2_;
};
-
+
class UnhelpfulRegex : public RegexBase {
public:
UnhelpfulRegex() {
@@ -191,13 +191,13 @@ namespace QueryOptimizerTests {
BSONObjBuilder b;
b.appendRegex( "a", "abc" );
return b.obj();
- }
+ }
virtual BSONElement lower() { return limits["lower"]; }
virtual BSONElement upper() { return limits["upper"]; }
virtual bool upperInclusive() { return false; }
BSONObj limits;
};
-
+
class In : public Base {
public:
In() : o1_( BSON( "-" << -3 ) ), o2_( BSON( "-" << 44 ) ) {}
@@ -219,7 +219,7 @@ namespace QueryOptimizerTests {
virtual BSONElement upper() { return o2_.firstElement(); }
BSONObj o1_, o2_;
};
-
+
class Equality {
public:
void run() {
@@ -237,7 +237,7 @@ namespace QueryOptimizerTests {
ASSERT( !s6.range( "a" ).equality() );
}
};
-
+
class SimplifiedQuery {
public:
void run() {
@@ -251,7 +251,7 @@ namespace QueryOptimizerTests {
ASSERT( !simple.getObjectField( "e" ).woCompare( fromjson( "{$gte:0,$lte:10}" ) ) );
}
};
-
+
class QueryPatternTest {
public:
void run() {
@@ -277,14 +277,14 @@ namespace QueryOptimizerTests {
return FieldRangeSet( "", query ).pattern( sort );
}
};
-
+
class NoWhere {
public:
void run() {
ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ) ).nNontrivialRanges() );
}
};
-
+
class Numeric {
public:
void run() {
@@ -311,7 +311,7 @@ namespace QueryOptimizerTests {
ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
}
};
-
+
class UnionBound {
public:
void run() {
@@ -321,29 +321,29 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( 2U, ret.intervals().size() );
}
};
-
- class MultiBound {
- public:
- void run() {
+
+ class MultiBound {
+ public:
+ void run() {
FieldRangeSet frs1( "", fromjson( "{a:{$in:[1,3,5,7,9]}}" ) );
FieldRangeSet frs2( "", fromjson( "{a:{$in:[2,3,5,8,9]}}" ) );
- FieldRange fr1 = frs1.range( "a" );
- FieldRange fr2 = frs2.range( "a" );
- fr1 &= fr2;
+ FieldRange fr1 = frs1.range( "a" );
+ FieldRange fr2 = frs2.range( "a" );
+ fr1 &= fr2;
ASSERT( fr1.min().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
ASSERT( fr1.max().woCompare( BSON( "a" << 9.0 ).firstElement(), false ) == 0 );
- vector< FieldInterval > intervals = fr1.intervals();
- vector< FieldInterval >::const_iterator j = intervals.begin();
- double expected[] = { 3, 5, 9 };
- for( int i = 0; i < 3; ++i, ++j ) {
- ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
- ASSERT( j->_lower._inclusive );
- ASSERT( j->_lower == j->_upper );
- }
- ASSERT( j == intervals.end() );
- }
- };
-
+ vector< FieldInterval > intervals = fr1.intervals();
+ vector< FieldInterval >::const_iterator j = intervals.begin();
+ double expected[] = { 3, 5, 9 };
+ for( int i = 0; i < 3; ++i, ++j ) {
+ ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
+ ASSERT( j->_lower._inclusive );
+ ASSERT( j->_lower == j->_upper );
+ }
+ ASSERT( j == intervals.end() );
+ }
+ };
+
class DiffBase {
public:
virtual ~DiffBase() {}
@@ -351,7 +351,7 @@ namespace QueryOptimizerTests {
FieldRangeSet frs( "", fromjson( obj().toString() ) );
FieldRange ret = frs.range( "a" );
ret -= frs.range( "b" );
- check( ret );
+ check( ret );
}
protected:
void check( const FieldRange &fr ) {
@@ -376,7 +376,7 @@ namespace QueryOptimizerTests {
class TwoRangeBase : public DiffBase {
public:
TwoRangeBase( string obj, int low, int high, bool lowI, bool highI )
- : _obj( obj ) {
+ : _obj( obj ) {
_n[ 0 ] = low;
_n[ 1 ] = high;
_b[ 0 ] = lowI;
@@ -391,7 +391,7 @@ namespace QueryOptimizerTests {
int _n[ 2 ];
bool _b[ 2 ];
};
-
+
struct Diff1 : public TwoRangeBase {
Diff1() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:3,$lt:4}}", 1, 2, false, false ) {}
};
@@ -399,7 +399,7 @@ namespace QueryOptimizerTests {
struct Diff2 : public TwoRangeBase {
Diff2() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:2,$lt:4}}", 1, 2, false, false ) {}
};
-
+
struct Diff3 : public TwoRangeBase {
Diff3() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gt:2,$lt:4}}", 1, 2, false, true ) {}
};
@@ -407,11 +407,11 @@ namespace QueryOptimizerTests {
struct Diff4 : public TwoRangeBase {
Diff4() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
};
-
+
struct Diff5 : public TwoRangeBase {
Diff5() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
};
-
+
struct Diff6 : public TwoRangeBase {
Diff6() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
};
@@ -419,7 +419,7 @@ namespace QueryOptimizerTests {
struct Diff7 : public TwoRangeBase {
Diff7() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
};
-
+
struct Diff8 : public TwoRangeBase {
Diff8() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
};
@@ -430,12 +430,12 @@ namespace QueryOptimizerTests {
struct Diff10 : public TwoRangeBase {
Diff10() : TwoRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
- };
-
+ };
+
class SplitRangeBase : public DiffBase {
public:
SplitRangeBase( string obj, int low1, bool low1I, int high1, bool high1I, int low2, bool low2I, int high2, bool high2I )
- : _obj( obj ) {
+ : _obj( obj ) {
_n[ 0 ] = low1;
_n[ 1 ] = high1;
_n[ 2 ] = low2;
@@ -452,9 +452,9 @@ namespace QueryOptimizerTests {
virtual BSONObj obj() const { return fromjson( _obj ); }
string _obj;
int _n[ 4 ];
- bool _b[ 4 ];
+ bool _b[ 4 ];
};
-
+
struct Diff11 : public SplitRangeBase {
Diff11() : SplitRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 4, true) {}
};
@@ -462,11 +462,11 @@ namespace QueryOptimizerTests {
struct Diff12 : public SplitRangeBase {
Diff12() : SplitRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 5, false) {}
};
-
+
struct Diff13 : public TwoRangeBase {
Diff13() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:4}}", 4, 5, true, false) {}
};
-
+
struct Diff14 : public SplitRangeBase {
Diff14() : SplitRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:4}}", 1, true, 1, true, 4, true, 5, false) {}
};
@@ -514,7 +514,7 @@ namespace QueryOptimizerTests {
struct Diff25 : public TwoRangeBase {
Diff25() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:0}", 1, 5, true, true) {}
};
-
+
struct Diff26 : public TwoRangeBase {
Diff26() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:1}", 1, 5, false, true) {}
};
@@ -530,7 +530,7 @@ namespace QueryOptimizerTests {
struct Diff29 : public TwoRangeBase {
Diff29() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:5}", 1, 5, true, false) {}
};
-
+
struct Diff30 : public TwoRangeBase {
Diff30() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:5}", 1, 5, true, false) {}
};
@@ -538,7 +538,7 @@ namespace QueryOptimizerTests {
struct Diff31 : public TwoRangeBase {
Diff31() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:6}", 1, 5, true, false) {}
};
-
+
struct Diff32 : public TwoRangeBase {
Diff32() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:6}", 1, 5, true, true) {}
};
@@ -546,7 +546,7 @@ namespace QueryOptimizerTests {
class EmptyBase : public DiffBase {
public:
EmptyBase( string obj )
- : _obj( obj ) {}
+ : _obj( obj ) {}
private:
virtual unsigned len() const { return 0; }
virtual const int *nums() const { return 0; }
@@ -554,7 +554,7 @@ namespace QueryOptimizerTests {
virtual BSONObj obj() const { return fromjson( _obj ); }
string _obj;
};
-
+
struct Diff33 : public EmptyBase {
Diff33() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:6}}" ) {}
};
@@ -586,7 +586,7 @@ namespace QueryOptimizerTests {
struct Diff40 : public EmptyBase {
Diff40() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:0,$lte:5}}" ) {}
};
-
+
struct Diff41 : public TwoRangeBase {
Diff41() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:5}}", 5, 5, true, true ) {}
};
@@ -654,7 +654,7 @@ namespace QueryOptimizerTests {
struct Diff57 : public EmptyBase {
Diff57() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
};
-
+
struct Diff58 : public TwoRangeBase {
Diff58() : TwoRangeBase( "{a:1,b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
};
@@ -678,11 +678,11 @@ namespace QueryOptimizerTests {
struct Diff63 : public EmptyBase {
Diff63() : EmptyBase( "{a:5,b:5}" ) {}
};
-
+
struct Diff64 : public TwoRangeBase {
Diff64() : TwoRangeBase( "{a:{$gte:1,$lte:2},b:{$gt:0,$lte:1}}", 1, 2, false, true ) {}
};
-
+
class DiffMulti1 : public DiffBase {
public:
void run() {
@@ -693,7 +693,7 @@ namespace QueryOptimizerTests {
other |= frs.range( "d" );
other |= frs.range( "e" );
ret -= other;
- check( ret );
+ check( ret );
}
protected:
virtual unsigned len() const { return 3; }
@@ -712,7 +712,7 @@ namespace QueryOptimizerTests {
ret |= frs.range( "d" );
ret |= frs.range( "e" );
ret -= mask;
- check( ret );
+ check( ret );
}
protected:
virtual unsigned len() const { return 2; }
@@ -720,7 +720,7 @@ namespace QueryOptimizerTests {
virtual const bool *incs() const { static bool b[] = { false, true, true, false }; return b; }
virtual BSONObj obj() const { return BSONObj(); }
};
-
+
class SetIntersect {
public:
void run() {
@@ -730,9 +730,9 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( fromjson( "{a:1,b:5,c:7,d:{$gte:8,$lte:9},e:10}" ), frs1.simplifiedQuery( BSONObj() ) );
}
};
-
+
} // namespace FieldRangeTests
-
+
namespace QueryPlanTests {
class Base {
public:
@@ -779,7 +779,7 @@ namespace QueryOptimizerTests {
static DBDirectClient client_;
};
DBDirectClient Base::client_;
-
+
// There's a limit of 10 indexes total, make sure not to exceed this in a given test.
#define INDEXNO(x) nsd()->idxNo( *this->index( BSON(x) ) )
#define INDEX(x) this->index( BSON(x) )
@@ -787,7 +787,7 @@ namespace QueryOptimizerTests {
#define FBS(x) ( FieldRangeSet_GLOBAL.reset( new FieldRangeSet( ns(), x ) ), *FieldRangeSet_GLOBAL )
auto_ptr< FieldRangeSet > FieldRangeSet_GLOBAL2;
#define FBS2(x) ( FieldRangeSet_GLOBAL2.reset( new FieldRangeSet( ns(), x ) ), *FieldRangeSet_GLOBAL2 )
-
+
class NoIndex : public Base {
public:
void run() {
@@ -797,7 +797,7 @@ namespace QueryOptimizerTests {
ASSERT( !p.exactKeyMatch() );
}
};
-
+
class SimpleOrder : public Base {
public:
void run() {
@@ -807,7 +807,7 @@ namespace QueryOptimizerTests {
BSONObjBuilder b2;
b2.appendMaxKey( "" );
BSONObj end = b2.obj();
-
+
QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
ASSERT( !p.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
@@ -820,30 +820,30 @@ namespace QueryOptimizerTests {
ASSERT( !endKey( p3 ).woCompare( end ) );
}
};
-
+
class MoreIndexThanNeeded : public Base {
public:
void run() {
QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
- ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.scanAndOrderRequired() );
}
};
-
+
class IndexSigns : public Base {
public:
void run() {
QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << -1 ) , FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
- ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.scanAndOrderRequired() );
ASSERT_EQUALS( 1, p.direction() );
QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
- ASSERT( p2.scanAndOrderRequired() );
+ ASSERT( p2.scanAndOrderRequired() );
ASSERT_EQUALS( 0, p2.direction() );
QueryPlan p3( nsd(), indexno( id_obj ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "_id" << 1 ) );
ASSERT( !p3.scanAndOrderRequired() );
ASSERT_EQUALS( 1, p3.direction() );
- }
+ }
};
-
+
class IndexReverse : public Base {
public:
void run() {
@@ -856,17 +856,17 @@ namespace QueryOptimizerTests {
b2.appendMinKey( "" );
BSONObj end = b2.obj();
QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ),FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
- ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.scanAndOrderRequired() );
ASSERT_EQUALS( -1, p.direction() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
- ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !p2.scanAndOrderRequired() );
ASSERT_EQUALS( -1, p2.direction() );
QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << -1 ), FBS( BSONObj() ), FBS2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
- ASSERT( p3.scanAndOrderRequired() );
+ ASSERT( p3.scanAndOrderRequired() );
ASSERT_EQUALS( 0, p3.direction() );
- }
+ }
};
class NoOrder : public Base {
@@ -881,28 +881,28 @@ namespace QueryOptimizerTests {
b2.appendMaxKey( "" );
BSONObj end = b2.obj();
QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FBS( BSON( "a" << 3 ) ), FBS2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
- ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
QueryPlan p2( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FBS( BSON( "a" << 3 ) ), FBS2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
- ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !p2.scanAndOrderRequired() );
ASSERT( !startKey( p ).woCompare( start ) );
ASSERT( !endKey( p ).woCompare( end ) );
- }
+ }
};
-
+
class EqualWithOrder : public Base {
public:
void run() {
QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "a" << 4 ) ), FBS2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSON( "b" << 1 ) );
- ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.scanAndOrderRequired() );
QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "b" << 4 ) ), FBS2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
- ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !p2.scanAndOrderRequired() );
QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FBS( BSON( "b" << 4 ) ), FBS2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
- ASSERT( p3.scanAndOrderRequired() );
+ ASSERT( p3.scanAndOrderRequired() );
}
};
-
+
class Optimal : public Base {
public:
void run() {
@@ -926,21 +926,21 @@ namespace QueryOptimizerTests {
ASSERT( p9.optimal() );
}
};
-
+
class MoreOptimal : public Base {
public:
void run() {
- QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 ) ), FBS2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
- ASSERT( p10.optimal() );
- QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 << "b" << LT << 1 ) ), FBS2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
- ASSERT( p11.optimal() );
- QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
- ASSERT( p12.optimal() );
- QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
- ASSERT( p13.optimal() );
+ QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 ) ), FBS2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
+ ASSERT( p10.optimal() );
+ QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << 1 << "b" << LT << 1 ) ), FBS2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
+ ASSERT( p11.optimal() );
+ QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
+ ASSERT( p12.optimal() );
+ QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FBS( BSON( "a" << LT << 1 ) ), FBS2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p13.optimal() );
}
};
-
+
class KeyMatch : public Base {
public:
void run() {
@@ -964,15 +964,15 @@ namespace QueryOptimizerTests {
ASSERT( p9.exactKeyMatch() );
}
};
-
+
class MoreKeyMatch : public Base {
public:
void run() {
QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FBS( BSON( "a" << "r" << "b" << NE << "q" ) ), FBS2( BSON( "a" << "r" << "b" << NE << "q" ) ), BSON( "a" << "r" << "b" << NE << "q" ), BSON( "a" << 1 ) );
- ASSERT( !p.exactKeyMatch() );
+ ASSERT( !p.exactKeyMatch() );
}
};
-
+
class ExactKeyQueryTypes : public Base {
public:
void run() {
@@ -991,7 +991,7 @@ namespace QueryOptimizerTests {
ASSERT( !p5.exactKeyMatch() );
}
};
-
+
class Unhelpful : public Base {
public:
void run() {
@@ -1010,13 +1010,13 @@ namespace QueryOptimizerTests {
ASSERT( p4.unhelpful() );
}
};
-
+
} // namespace QueryPlanTests
namespace QueryPlanSetTests {
class Base {
public:
- Base() : _context( ns() ){
+ Base() : _context( ns() ) {
string err;
userCreateNS( ns(), BSONObj(), err, false );
}
@@ -1039,7 +1039,7 @@ namespace QueryOptimizerTests {
if ( fieldsToReturn )
fieldsToReturn->appendSelfToBufBuilder(b);
toSend.setData(dbQuery, b.buf(), b.len());
- }
+ }
protected:
static const char *ns() { return "unittests.QueryPlanSetTests"; }
static NamespaceDetails *nsd() { return nsdetails( ns() ); }
@@ -1047,7 +1047,7 @@ namespace QueryOptimizerTests {
dblock lk_;
Client::Context _context;
};
-
+
class NoIndexes : public Base {
public:
void run() {
@@ -1057,7 +1057,7 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( 1, s.nPlans() );
}
};
-
+
class Optimal : public Base {
public:
void run() {
@@ -1066,7 +1066,7 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSONObj() );
- ASSERT_EQUALS( 1, s.nPlans() );
+ ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1093,7 +1093,7 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( 1, s.nPlans() );
}
};
-
+
class HintSpec : public Base {
public:
void run() {
@@ -1104,7 +1104,7 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
- ASSERT_EQUALS( 1, s.nPlans() );
+ ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1118,10 +1118,10 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
- ASSERT_EQUALS( 1, s.nPlans() );
+ ASSERT_EQUALS( 1, s.nPlans() );
}
};
-
+
class NaturalHint : public Base {
public:
void run() {
@@ -1132,7 +1132,7 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e );
- ASSERT_EQUALS( 1, s.nPlans() );
+ ASSERT_EQUALS( 1, s.nPlans() );
}
};
@@ -1156,10 +1156,10 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
ASSERT_EXCEPTION( QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), &e ),
- AssertionException );
+ AssertionException );
}
};
-
+
class Count : public Base {
public:
void run() {
@@ -1184,7 +1184,7 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << GT << 0 << LT << -1 ) ), err ) );
}
};
-
+
class QueryMissingNs : public Base {
public:
QueryMissingNs() { log() << "querymissingns starts" << endl; }
@@ -1202,7 +1202,7 @@ namespace QueryOptimizerTests {
}
};
-
+
class UnhelpfulIndex : public Base {
public:
void run() {
@@ -1211,10 +1211,10 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 1 << "c" << 2 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 1 << "c" << 2 ), BSONObj() );
- ASSERT_EQUALS( 2, s.nPlans() );
+ ASSERT_EQUALS( 2, s.nPlans() );
}
- };
-
+ };
+
class SingleException : public Base {
public:
void run() {
@@ -1258,7 +1258,7 @@ namespace QueryOptimizerTests {
mutable bool youThrow_;
};
};
-
+
class AllException : public Base {
public:
void run() {
@@ -1288,7 +1288,7 @@ namespace QueryOptimizerTests {
virtual long long nscanned() { return 0; }
};
};
-
+
class SaveGoodIndex : public Base {
public:
void run() {
@@ -1302,7 +1302,7 @@ namespace QueryOptimizerTests {
nPlans( 3 );
runQuery();
nPlans( 1 );
-
+
{
DBDirectClient client;
for( int i = 0; i < 34; ++i ) {
@@ -1312,7 +1312,7 @@ namespace QueryOptimizerTests {
}
}
nPlans( 3 );
-
+
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
@@ -1334,8 +1334,8 @@ namespace QueryOptimizerTests {
QueryPlanSet s3( ns(), frs3, frsOrig3, BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) );
TestOp newerOriginal;
s3.runOp( newerOriginal );
- nPlans( 3 );
-
+ nPlans( 3 );
+
runQuery();
nPlans( 1 );
}
@@ -1344,7 +1344,7 @@ namespace QueryOptimizerTests {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
auto_ptr< FieldRangeSet > frsOrig( new FieldRangeSet( *frs ) );
QueryPlanSet s( ns(), frs, frsOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
- ASSERT_EQUALS( n, s.nPlans() );
+ ASSERT_EQUALS( n, s.nPlans() );
}
void runQuery() {
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
@@ -1369,8 +1369,8 @@ namespace QueryOptimizerTests {
virtual bool mayRecordPlan() const { return false; }
virtual QueryOp *_createChild() const { return new NoRecordTestOp(); }
};
- };
-
+ };
+
class TryAllPlansOnErr : public Base {
public:
void run() {
@@ -1383,7 +1383,7 @@ namespace QueryOptimizerTests {
s.runOp( op );
ASSERT( fromjson( "{$natural:1}" ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( s.fbs().pattern( BSON( "b" << 1 ) ) ) ) == 0 );
ASSERT_EQUALS( 1, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( s.fbs().pattern( BSON( "b" << 1 ) ) ) );
-
+
auto_ptr< FieldRangeSet > frs2( new FieldRangeSet( ns(), BSON( "a" << 4 ) ) );
auto_ptr< FieldRangeSet > frsOrig2( new FieldRangeSet( *frs2 ) );
QueryPlanSet s2( ns(), frs2, frsOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ) );
@@ -1417,7 +1417,7 @@ namespace QueryOptimizerTests {
}
};
};
-
+
class FindOne : public Base {
public:
void run() {
@@ -1425,12 +1425,12 @@ namespace QueryOptimizerTests {
theDataFileMgr.insertWithObjMod( ns(), one );
BSONObj result;
ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result ) );
- ASSERT_EXCEPTION( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ), AssertionException );
+ ASSERT_EXCEPTION( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ), AssertionException );
Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
- ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ) );
+ ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ) );
}
};
-
+
class Delete : public Base {
public:
void run() {
@@ -1446,7 +1446,7 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( 1, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "a" << 1 ) ).pattern() ) );
}
};
-
+
class DeleteOneScan : public Base {
public:
void run() {
@@ -1499,7 +1499,7 @@ namespace QueryOptimizerTests {
runQuery( m, q);
}
ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
-
+
Message m2;
assembleRequest( ns(), QUERY( "b" << 99 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m2 );
{
@@ -1507,11 +1507,11 @@ namespace QueryOptimizerTests {
QueryMessage q(d);
runQuery( m2, q);
}
- ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::_get( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) ) == 0 );
ASSERT_EQUALS( 3, NamespaceDetailsTransient::_get( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ) ).pattern() ) );
}
};
-
+
class InQueryIntervals : public Base {
public:
void run() {
@@ -1532,7 +1532,7 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
}
ASSERT( !c->ok() );
-
+
// now check reverse
{
auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
@@ -1544,11 +1544,11 @@ namespace QueryOptimizerTests {
for( int i = 0; i < 4; ++i, c->advance() ) {
ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
}
- ASSERT( !c->ok() );
+ ASSERT( !c->ok() );
}
}
};
-
+
class EqualityThenIn : public Base {
public:
void run() {
@@ -1559,7 +1559,7 @@ namespace QueryOptimizerTests {
}
BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
BSONElement hintElt = hint.firstElement();
- auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSet > frs( new FieldRangeSet( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
QueryPlan qp( nsd(), 1, *frs, *frs, fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
boost::shared_ptr<Cursor> c = qp.newCursor();
double expected[] = { 2, 3, 6, 9 };
@@ -1571,7 +1571,7 @@ namespace QueryOptimizerTests {
ASSERT( !c->ok() );
}
};
-
+
class NotEqualityThenIn : public Base {
public:
void run() {
@@ -1594,7 +1594,7 @@ namespace QueryOptimizerTests {
};
} // namespace QueryPlanSetTests
-
+
class Base {
public:
Base() : _ctx( ns() ) {
@@ -1614,7 +1614,7 @@ namespace QueryOptimizerTests {
dblock lk_;
Client::Context _ctx;
};
-
+
class BestGuess : public Base {
public:
void run() {
@@ -1624,7 +1624,7 @@ namespace QueryOptimizerTests {
theDataFileMgr.insertWithObjMod( ns(), temp );
temp = BSON( "b" << 1 );
theDataFileMgr.insertWithObjMod( ns(), temp );
-
+
boost::shared_ptr< Cursor > c = bestGuessCursor( ns(), BSON( "b" << 1 ), BSON( "a" << 1 ) );
ASSERT_EQUALS( string( "a" ), c->indexKeyPattern().firstElement().fieldName() );
c = bestGuessCursor( ns(), BSON( "a" << 1 ), BSON( "b" << 1 ) );
@@ -1633,22 +1633,22 @@ namespace QueryOptimizerTests {
ASSERT_EQUALS( string( "a" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
-
+
FieldRangeSet frs( "ns", BSON( "a" << 1 ) );
{
scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
- NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( frs.pattern( BSON( "b" << 1 ) ), BSON( "a" << 1 ), 0 );
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( frs.pattern( BSON( "b" << 1 ) ), BSON( "a" << 1 ), 0 );
}
m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "queryoptimizer" ){}
-
- void setupTests(){
+ All() : Suite( "queryoptimizer" ) {}
+
+ void setupTests() {
add< FieldRangeTests::Empty >();
add< FieldRangeTests::Eq >();
add< FieldRangeTests::DupEq >();
@@ -1780,6 +1780,6 @@ namespace QueryOptimizerTests {
add< BestGuess >();
}
} myall;
-
+
} // namespace QueryOptimizerTests
diff --git a/dbtests/querytests.cpp b/dbtests/querytests.cpp
index e512640bca6..9b86c1c6e90 100644
--- a/dbtests/querytests.cpp
+++ b/dbtests/querytests.cpp
@@ -39,7 +39,7 @@ namespace QueryTests {
dblock lk;
Client::Context _context;
public:
- Base() : _context( ns() ){
+ Base() : _context( ns() ) {
addIndex( fromjson( "{\"a\":1}" ) );
}
~Base() {
@@ -50,7 +50,8 @@ namespace QueryTests {
toDelete.push_back( c->currLoc() );
for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
theDataFileMgr.deleteRecord( ns(), i->rec(), *i, false );
- } catch ( ... ) {
+ }
+ catch ( ... ) {
FAIL( "Exception while cleaning up records" );
}
}
@@ -131,7 +132,7 @@ namespace QueryTests {
ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
}
};
-
+
class FindOne : public Base {
public:
void run() {
@@ -214,7 +215,7 @@ namespace QueryTests {
client().dropCollection( ns );
}
- void testLimit(int limit){
+ void testLimit(int limit) {
ASSERT_EQUALS(client().query( ns, BSONObj(), limit )->itcount(), limit);
}
void run() {
@@ -289,7 +290,7 @@ namespace QueryTests {
insert( ns, BSON( "a" << 0 ) );
c = client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
ASSERT( 0 != c->getCursorId() );
- ASSERT( !c->isDead() );
+ ASSERT( !c->isDead() );
}
};
@@ -349,7 +350,7 @@ namespace QueryTests {
ASSERT( !client().getLastError().empty() );
}
};
-
+
class TailableQueryOnId : public ClientBase {
public:
~TailableQueryOnId() {
@@ -515,7 +516,7 @@ namespace QueryTests {
static const char *ns() { return "unittests.querytests.AutoResetIndexCache"; }
static const char *idxNs() { return "unittests.system.indexes"; }
void index() const { ASSERT( !client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) ).isEmpty() ); }
- void noIndex() const {
+ void noIndex() const {
BSONObj o = client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) );
if( !o.isEmpty() ) {
cout << o.toString() << endl;
@@ -608,8 +609,8 @@ namespace QueryTests {
client().insert( ns, fromjson( "{a:[1,2,3]}" ) );
ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ) )->more() );
client().ensureIndex( ns, BSON( "a" << 1 ) );
- ASSERT( client().query( ns, Query( "{a:{$in:[1,[1,2,3]]}}" ).hint( BSON( "a" << 1 ) ) )->more() );
- ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ).hint( BSON( "a" << 1 ) ) )->more() ); // SERVER-146
+ ASSERT( client().query( ns, Query( "{a:{$in:[1,[1,2,3]]}}" ).hint( BSON( "a" << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ).hint( BSON( "a" << 1 ) ) )->more() ); // SERVER-146
}
};
@@ -623,7 +624,7 @@ namespace QueryTests {
client().insert( ns, fromjson( "{a:[[1],2]}" ) );
check( "$natural" );
client().ensureIndex( ns, BSON( "a" << 1 ) );
- check( "a" ); // SERVER-146
+ check( "a" ); // SERVER-146
}
private:
void check( const string &hintField ) {
@@ -766,12 +767,12 @@ namespace QueryTests {
class DifferentNumbers : public ClientBase {
public:
- ~DifferentNumbers(){
+ ~DifferentNumbers() {
client().dropCollection( "unittests.querytests.DifferentNumbers" );
}
- void t( const char * ns ){
+ void t( const char * ns ) {
auto_ptr< DBClientCursor > cursor = client().query( ns, Query().sort( "7" ) );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj o = cursor->next();
assert( o.valid() );
//cout << " foo " << o << endl;
@@ -792,37 +793,37 @@ namespace QueryTests {
t(ns);
}
};
-
+
class CollectionBase : public ClientBase {
public:
-
- CollectionBase( string leaf ){
+
+ CollectionBase( string leaf ) {
_ns = "unittests.querytests.";
_ns += leaf;
client().dropCollection( ns() );
}
-
- virtual ~CollectionBase(){
+
+ virtual ~CollectionBase() {
client().dropCollection( ns() );
}
-
- int count(){
+
+ int count() {
return (int) client().count( ns() );
}
- const char * ns(){
+ const char * ns() {
return _ns.c_str();
}
-
+
private:
string _ns;
};
class SymbolStringSame : public CollectionBase {
public:
- SymbolStringSame() : CollectionBase( "symbolstringsame" ){}
+ SymbolStringSame() : CollectionBase( "symbolstringsame" ) {}
- void run(){
+ void run() {
{ BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); b.append( "z" , 17 ); client().insert( ns() , b.obj() ); }
ASSERT_EQUALS( 17 , client().findOne( ns() , BSONObj() )["z"].number() );
{
@@ -838,46 +839,46 @@ namespace QueryTests {
class TailableCappedRaceCondition : public CollectionBase {
public:
-
- TailableCappedRaceCondition() : CollectionBase( "tailablecappedrace" ){
+
+ TailableCappedRaceCondition() : CollectionBase( "tailablecappedrace" ) {
client().dropCollection( ns() );
_n = 0;
}
- void run(){
+ void run() {
string err;
- writelock lk("");
+ writelock lk("");
Client::Context ctx( "unittests" );
ASSERT( userCreateNS( ns() , fromjson( "{ capped : true , size : 2000 }" ) , err , false ) );
- for ( int i=0; i<100; i++ ){
+ for ( int i=0; i<100; i++ ) {
insertNext();
ASSERT( count() < 45 );
}
-
+
int a = count();
-
+
auto_ptr< DBClientCursor > c = client().query( ns() , QUERY( "i" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_CursorTailable );
int n=0;
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj z = c->next();
n++;
}
-
+
ASSERT_EQUALS( a , n );
insertNext();
ASSERT( c->more() );
- for ( int i=0; i<50; i++ ){
+ for ( int i=0; i<50; i++ ) {
insertNext();
}
- while ( c->more() ){ c->next(); }
+ while ( c->more() ) { c->next(); }
ASSERT( c->isDead() );
}
-
- void insertNext(){
+
+ void insertNext() {
insert( ns() , BSON( "i" << _n++ ) );
}
@@ -886,71 +887,71 @@ namespace QueryTests {
class HelperTest : public CollectionBase {
public:
-
- HelperTest() : CollectionBase( "helpertest" ){
+
+ HelperTest() : CollectionBase( "helpertest" ) {
}
- void run(){
+ void run() {
writelock lk("");
Client::Context ctx( "unittests" );
-
- for ( int i=0; i<50; i++ ){
+
+ for ( int i=0; i<50; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
}
ASSERT_EQUALS( 50 , count() );
-
+
BSONObj res;
ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
ASSERT_EQUALS( 40 , res["x"].numberInt() );
-
+
ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
ASSERT_EQUALS( 40 , res["x"].numberInt() );
ASSERT( ! Helpers::findById( cc(), ns() , BSON( "_id" << 200 ) , res ) );
unsigned long long slow , fast;
-
+
int n = 10000;
{
Timer t;
- for ( int i=0; i<n; i++ ){
+ for ( int i=0; i<n; i++ ) {
ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
}
slow = t.micros();
}
{
Timer t;
- for ( int i=0; i<n; i++ ){
+ for ( int i=0; i<n; i++ ) {
ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
}
fast = t.micros();
}
-
+
cout << "HelperTest slow:" << slow << " fast:" << fast << endl;
-
+
}
};
class HelperByIdTest : public CollectionBase {
public:
-
- HelperByIdTest() : CollectionBase( "helpertestbyid" ){
+
+ HelperByIdTest() : CollectionBase( "helpertestbyid" ) {
}
- void run(){
+ void run() {
writelock lk("");
Client::Context ctx( "unittests" );
- for ( int i=0; i<1000; i++ ){
+ for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
}
- for ( int i=0; i<1000; i+=2 ){
+ for ( int i=0; i<1000; i+=2 ) {
client_.remove( ns() , BSON( "_id" << i ) );
}
- BSONObj res;
- for ( int i=0; i<1000; i++ ){
+ BSONObj res;
+ for ( int i=0; i<1000; i++ ) {
bool found = Helpers::findById( cc(), ns() , BSON( "_id" << i ) , res );
ASSERT_EQUALS( i % 2 , int(found) );
}
@@ -958,19 +959,19 @@ namespace QueryTests {
}
};
- class ClientCursorTest : public CollectionBase{
- ClientCursorTest() : CollectionBase( "clientcursortest" ){
+ class ClientCursorTest : public CollectionBase {
+ ClientCursorTest() : CollectionBase( "clientcursortest" ) {
}
- void run(){
+ void run() {
writelock lk("");
Client::Context ctx( "unittests" );
-
- for ( int i=0; i<1000; i++ ){
+
+ for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
}
-
+
}
};
@@ -982,19 +983,19 @@ namespace QueryTests {
~FindingStart() {
__findingStartInitialTimeout = _old;
}
-
+
void run() {
BSONObj info;
ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 1000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
-
+
int i = 0;
for( int oldCount = -1;
- count() != oldCount;
- oldCount = count(), client().insert( ns(), BSON( "ts" << i++ ) ) );
+ count() != oldCount;
+ oldCount = count(), client().insert( ns(), BSON( "ts" << i++ ) ) );
for( int k = 0; k < 5; ++k ) {
client().insert( ns(), BSON( "ts" << i++ ) );
- int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
for( int j = -1; j < i; ++j ) {
auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
ASSERT( c->more() );
@@ -1004,7 +1005,7 @@ namespace QueryTests {
}
}
}
-
+
private:
int _old;
};
@@ -1017,17 +1018,17 @@ namespace QueryTests {
~FindingStartPartiallyFull() {
__findingStartInitialTimeout = _old;
}
-
+
void run() {
BSONObj info;
ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "size" << 10000 << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
-
+
int i = 0;
for( ; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
-
+
for( int k = 0; k < 5; ++k ) {
client().insert( ns(), BSON( "ts" << i++ ) );
- int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
for( int j = -1; j < i; ++j ) {
auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
ASSERT( c->more() );
@@ -1037,12 +1038,12 @@ namespace QueryTests {
}
}
}
-
+
private:
int _old;
};
-
-
+
+
class WhatsMyUri : public CollectionBase {
public:
WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
@@ -1052,15 +1053,15 @@ namespace QueryTests {
ASSERT_EQUALS( unknownAddress.toString(), result[ "you" ].str() );
}
};
-
+
namespace parsedtests {
class basic1 {
public:
- void _test( const BSONObj& in ){
+ void _test( const BSONObj& in ) {
ParsedQuery q( "a.b" , 5 , 6 , 9 , in , BSONObj() );
ASSERT_EQUALS( BSON( "x" << 5 ) , q.getFilter() );
}
- void run(){
+ void run() {
_test( BSON( "x" << 5 ) );
_test( BSON( "query" << BSON( "x" << 5 ) ) );
_test( BSON( "$query" << BSON( "x" << 5 ) ) );
@@ -1082,23 +1083,23 @@ namespace QueryTests {
namespace queryobjecttests {
class names1 {
public:
- void run(){
+ void run() {
ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "query" << BSON( "x" << 1 ) ).getFilter() );
ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "$query" << BSON( "x" << 1 ) ).getFilter() );
}
-
+
};
}
class OrderingTest {
public:
- void run(){
+ void run() {
{
Ordering o = Ordering::make( BSON( "a" << 1 << "b" << -1 << "c" << 1 ) );
ASSERT_EQUALS( 1 , o.get(0) );
ASSERT_EQUALS( -1 , o.get(1) );
ASSERT_EQUALS( 1 , o.get(2) );
-
+
ASSERT( ! o.descending( 1 ) );
ASSERT( o.descending( 1 << 1 ) );
ASSERT( ! o.descending( 1 << 2 ) );
@@ -1109,7 +1110,7 @@ namespace QueryTests {
ASSERT_EQUALS( 1 , o.get(0) );
ASSERT_EQUALS( 1 , o.get(1) );
ASSERT_EQUALS( -1 , o.get(2) );
-
+
ASSERT( ! o.descending( 1 ) );
ASSERT( ! o.descending( 1 << 1 ) );
ASSERT( o.descending( 1 << 2 ) );
@@ -1122,8 +1123,8 @@ namespace QueryTests {
class T1 {
public:
- void run(){
-
+ void run() {
+
Projection m;
m.init( BSON( "a" << 1 ) );
ASSERT_EQUALS( BSON( "a" << 5 ) , m.transform( BSON( "x" << 1 << "a" << 5 ) ) );
@@ -1132,18 +1133,18 @@ namespace QueryTests {
class K1 {
public:
- void run(){
-
+ void run() {
+
Projection m;
m.init( BSON( "a" << 1 ) );
- scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
ASSERT( ! x );
x.reset( m.checkKey( BSON( "a" << 1 << "_id" << 1 ) ) );
ASSERT( x );
- ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
+ ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
x->hydrate( BSON( "" << 5 << "" << 17 ) ) );
x.reset( m.checkKey( BSON( "a" << 1 << "x" << 1 << "_id" << 1 ) ) );
@@ -1157,15 +1158,15 @@ namespace QueryTests {
class K2 {
public:
- void run(){
-
+ void run() {
+
Projection m;
m.init( BSON( "a" << 1 << "_id" << 0 ) );
- scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
ASSERT( x );
- ASSERT_EQUALS( BSON( "a" << 17 ) ,
+ ASSERT_EQUALS( BSON( "a" << 17 ) ,
x->hydrate( BSON( "" << 17 ) ) );
x.reset( m.checkKey( BSON( "x" << 1 << "a" << 1 << "_id" << 1 ) ) );
@@ -1180,13 +1181,13 @@ namespace QueryTests {
class K3 {
public:
- void run(){
-
+ void run() {
+
{
Projection m;
m.init( BSON( "a" << 1 << "_id" << 0 ) );
-
- scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
ASSERT( x );
}
@@ -1195,8 +1196,8 @@ namespace QueryTests {
// TODO: this is temporary SERVER-2104
Projection m;
m.init( BSON( "x.a" << 1 << "_id" << 0 ) );
-
- scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
ASSERT( ! x );
}
@@ -1211,7 +1212,7 @@ namespace QueryTests {
All() : Suite( "query" ) {
}
- void setupTests(){
+ void setupTests() {
add< CountBasic >();
add< CountQuery >();
add< CountFields >();
@@ -1256,9 +1257,9 @@ namespace QueryTests {
add< FindingStart >();
add< FindingStartPartiallyFull >();
add< WhatsMyUri >();
-
+
add< parsedtests::basic1 >();
-
+
add< queryobjecttests::names1 >();
add< OrderingTest >();
@@ -1269,6 +1270,6 @@ namespace QueryTests {
add< proj::K3 >();
}
} myall;
-
+
} // namespace QueryTests
diff --git a/dbtests/repltests.cpp b/dbtests/repltests.cpp
index 33d2118f241..c6ffba28646 100644
--- a/dbtests/repltests.cpp
+++ b/dbtests/repltests.cpp
@@ -34,13 +34,13 @@ namespace ReplTests {
BSONObj f( const char *s ) {
return fromjson( s );
- }
-
+ }
+
class Base {
dblock lk;
Client::Context _context;
public:
- Base() : _context( ns() ){
+ Base() : _context( ns() ) {
replSettings.master = true;
createOplog();
ensureHaveIdIndex( ns() );
@@ -50,7 +50,8 @@ namespace ReplTests {
replSettings.master = false;
deleteAll( ns() );
deleteAll( cllNS() );
- } catch ( ... ) {
+ }
+ catch ( ... ) {
FAIL( "Exception while cleaning up test" );
}
}
@@ -63,7 +64,7 @@ namespace ReplTests {
}
DBDirectClient *client() const { return &client_; }
BSONObj one( const BSONObj &query = BSONObj() ) const {
- return client()->findOne( ns(), query );
+ return client()->findOne( ns(), query );
}
void checkOne( const BSONObj &o ) const {
check( o, one( o ) );
@@ -78,11 +79,11 @@ namespace ReplTests {
void check( const BSONObj &expected, const BSONObj &got ) const {
if ( expected.woCompare( got ) ) {
out() << "expected: " << expected.toString()
- << ", got: " << got.toString() << endl;
+ << ", got: " << got.toString() << endl;
}
ASSERT_EQUALS( expected , got );
}
- BSONObj oneOp() const {
+ BSONObj oneOp() const {
return client()->findOne( cllNS(), BSONObj() );
}
int count() const {
@@ -131,7 +132,7 @@ namespace ReplTests {
out() << "all for " << ns << endl;
for(; c->ok(); c->advance() ) {
out() << c->current().toString() << endl;
- }
+ }
}
// These deletes don't get logged.
static void deleteAll( const char *ns ) {
@@ -143,7 +144,7 @@ namespace ReplTests {
toDelete.push_back( c->currLoc() );
}
for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
- theDataFileMgr.deleteRecord( ns, i->rec(), *i, true );
+ theDataFileMgr.deleteRecord( ns, i->rec(), *i, true );
}
}
static void insert( const BSONObj &o, bool god = false ) {
@@ -163,7 +164,7 @@ namespace ReplTests {
static DBDirectClient client_;
};
DBDirectClient Base::client_;
-
+
class LogBasic : public Base {
public:
void run() {
@@ -172,9 +173,9 @@ namespace ReplTests {
ASSERT_EQUALS( 2, opCount() );
}
};
-
+
namespace Idempotence {
-
+
class Base : public ReplTests::Base {
public:
virtual ~Base() {}
@@ -186,7 +187,7 @@ namespace ReplTests {
applyAllOperations();
check();
ASSERT_EQUALS( nOps, opCount() );
-
+
reset();
applyAllOperations();
check();
@@ -200,7 +201,7 @@ namespace ReplTests {
virtual void check() const = 0;
virtual void reset() const = 0;
};
-
+
class InsertTimestamp : public Base {
public:
void doIt() const {
@@ -221,7 +222,7 @@ namespace ReplTests {
private:
mutable Date_t date_;
};
-
+
class InsertAutoId : public Base {
public:
InsertAutoId() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
@@ -248,12 +249,12 @@ namespace ReplTests {
checkOne( o_ );
}
};
-
+
class InsertTwo : public Base {
public:
- InsertTwo() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- t_( fromjson( "{'_id':2,c:'d'}" ) ) {}
+ InsertTwo() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ t_( fromjson( "{'_id':2,c:'d'}" ) ) {}
void doIt() const {
vector< BSONObj > v;
v.push_back( o_ );
@@ -287,7 +288,7 @@ namespace ReplTests {
deleteAll( ns() );
}
private:
- BSONObj o_;
+ BSONObj o_;
};
class UpdateTimestamp : public Base {
@@ -311,14 +312,14 @@ namespace ReplTests {
private:
mutable Date_t date_;
};
-
+
class UpdateSameField : public Base {
public:
UpdateSameField() :
- q_( fromjson( "{a:'b'}" ) ),
- o1_( wid( "{a:'b'}" ) ),
- o2_( wid( "{a:'b'}" ) ),
- u_( fromjson( "{a:'c'}" ) ){}
+ q_( fromjson( "{a:'b'}" ) ),
+ o1_( wid( "{a:'b'}" ) ),
+ o2_( wid( "{a:'b'}" ) ),
+ u_( fromjson( "{a:'c'}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -334,14 +335,14 @@ namespace ReplTests {
}
private:
BSONObj q_, o1_, o2_, u_;
- };
-
+ };
+
class UpdateSameFieldWithId : public Base {
public:
UpdateSameFieldWithId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- q_( fromjson( "{a:'b'}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ){}
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -356,14 +357,14 @@ namespace ReplTests {
insert( fromjson( "{'_id':2,a:'b'}" ) );
}
private:
- BSONObj o_, q_, u_;
- };
+ BSONObj o_, q_, u_;
+ };
class UpdateSameFieldExplicitId : public Base {
public:
UpdateSameFieldExplicitId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ){}
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
void doIt() const {
client()->update( ns(), o_, u_ );
}
@@ -376,15 +377,15 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, u_;
+ BSONObj o_, u_;
};
-
+
class UpdateDifferentFieldExplicitId : public Base {
public:
UpdateDifferentFieldExplicitId() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{'_id':1,a:'c'}" ) ){}
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -397,28 +398,28 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_;
- };
-
+ BSONObj o_, q_, u_;
+ };
+
class UpsertUpdateNoMods : public UpdateDifferentFieldExplicitId {
void doIt() const {
client()->update( ns(), q_, u_, true );
}
};
-
+
class UpsertInsertNoMods : public InsertAutoId {
void doIt() const {
client()->update( ns(), fromjson( "{a:'c'}" ), o_, true );
}
};
-
+
class UpdateSet : public Base {
public:
UpdateSet() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$set:{a:7}}" ) ),
- ou_( fromjson( "{'_id':1,a:7}" ) ) {}
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{'_id':1,a:7}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -431,16 +432,16 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
-
+
class UpdateInc : public Base {
public:
UpdateInc() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{'_id':1,a:8}" ) ) {}
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -453,16 +454,16 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
class UpdateInc2 : public Base {
public:
UpdateInc2() :
- o_( fromjson( "{'_id':1,a:5}" ) ),
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
- ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_ );
}
@@ -475,16 +476,16 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
-
+
class IncEmbedded : public Base {
public:
IncEmbedded() :
- o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
- ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
+ o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
{}
void doIt() const {
client()->update( ns(), q_, u_ );
@@ -498,16 +499,16 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
class IncCreates : public Base {
public:
IncCreates() :
- o_( fromjson( "{'_id':1}" ) ),
- q_( fromjson( "{'_id':1}" ) ),
- u_( fromjson( "{$inc:{'a':1}}" ) ),
- ou_( fromjson( "{'_id':1,a:1}") )
+ o_( fromjson( "{'_id':1}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:1}") )
{}
void doIt() const {
client()->update( ns(), q_, u_ );
@@ -521,16 +522,16 @@ namespace ReplTests {
insert( o_ );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
class UpsertInsertIdMod : public Base {
public:
UpsertInsertIdMod() :
- q_( fromjson( "{'_id':5,a:4}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{'_id':5,a:7}" ) ) {}
+ q_( fromjson( "{'_id':5,a:4}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':5,a:7}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_, true );
}
@@ -542,15 +543,15 @@ namespace ReplTests {
deleteAll( ns() );
}
protected:
- BSONObj q_, u_, ou_;
+ BSONObj q_, u_, ou_;
};
-
+
class UpsertInsertSet : public Base {
public:
UpsertInsertSet() :
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$set:{a:7}}" ) ),
- ou_( fromjson( "{a:7}" ) ) {}
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{a:7}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_, true );
}
@@ -563,15 +564,15 @@ namespace ReplTests {
insert( fromjson( "{'_id':7,a:7}" ) );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
-
+
class UpsertInsertInc : public Base {
public:
UpsertInsertInc() :
- q_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{$inc:{a:3}}" ) ),
- ou_( fromjson( "{a:8}" ) ) {}
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{a:8}" ) ) {}
void doIt() const {
client()->update( ns(), q_, u_, true );
}
@@ -583,38 +584,38 @@ namespace ReplTests {
deleteAll( ns() );
}
protected:
- BSONObj o_, q_, u_, ou_;
+ BSONObj o_, q_, u_, ou_;
};
-
+
class MultiInc : public Base {
public:
-
+
string s() const {
stringstream ss;
auto_ptr<DBClientCursor> cc = client()->query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
bool first = true;
- while ( cc->more() ){
+ while ( cc->more() ) {
if ( first ) first = false;
else ss << ",";
-
+
BSONObj o = cc->next();
ss << o["x"].numberInt();
}
return ss.str();
}
-
+
void doIt() const {
client()->insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
client()->insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
-
+
ASSERT_EQUALS( "1,5" , s() );
-
+
client()->update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
ASSERT_EQUALS( "2,5" , s() );
-
+
client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
ASSERT_EQUALS( "3,5" , s() );
-
+
client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
check();
}
@@ -622,18 +623,18 @@ namespace ReplTests {
void check() const {
ASSERT_EQUALS( "4,6" , s() );
}
-
+
void reset() const {
deleteAll( ns() );
}
};
-
+
class UpdateWithoutPreexistingId : public Base {
public:
UpdateWithoutPreexistingId() :
- o_( fromjson( "{a:5}" ) ),
- u_( fromjson( "{a:5}" ) ),
- ot_( fromjson( "{b:4}" ) ) {}
+ o_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{a:5}" ) ),
+ ot_( fromjson( "{b:4}" ) ) {}
void doIt() const {
client()->update( ns(), o_, u_ );
}
@@ -648,15 +649,15 @@ namespace ReplTests {
insert( o_, true );
}
protected:
- BSONObj o_, u_, ot_;
- };
-
+ BSONObj o_, u_, ot_;
+ };
+
class Remove : public Base {
public:
Remove() :
- o1_( f( "{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}" ) ),
- o2_( f( "{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}" ) ),
- q_( f( "{\"a\":\"b\"}" ) ) {}
+ o1_( f( "{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}" ) ),
+ o2_( f( "{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}" ) ),
+ q_( f( "{\"a\":\"b\"}" ) ) {}
void doIt() const {
client()->remove( ns(), q_ );
}
@@ -669,23 +670,23 @@ namespace ReplTests {
insert( o2_ );
}
protected:
- BSONObj o1_, o2_, q_;
+ BSONObj o1_, o2_, q_;
};
-
+
class RemoveOne : public Remove {
void doIt() const {
client()->remove( ns(), q_, true );
- }
+ }
void check() const {
ASSERT_EQUALS( 1, count() );
}
};
-
+
class FailingUpdate : public Base {
public:
FailingUpdate() :
- o_( fromjson( "{'_id':1,a:'b'}" ) ),
- u_( fromjson( "{'_id':1,c:'d'}" ) ) {}
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,c:'d'}" ) ) {}
void doIt() const {
client()->update( ns(), o_, u_ );
client()->insert( ns(), o_ );
@@ -700,7 +701,7 @@ namespace ReplTests {
protected:
BSONObj o_, u_;
};
-
+
class SetNumToStr : public Base {
public:
void doIt() const {
@@ -715,7 +716,7 @@ namespace ReplTests {
insert( BSON( "_id" << 0 << "a" << 4.0 ) );
}
};
-
+
class Push : public Base {
public:
void doIt() const {
@@ -729,9 +730,9 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
+ }
};
-
+
class PushUpsert : public Base {
public:
void doIt() const {
@@ -745,7 +746,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
+ }
};
class MultiPush : public Base {
@@ -761,7 +762,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
+ }
};
class EmptyPush : public Base {
@@ -777,13 +778,13 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0}" ) );
- }
+ }
};
class PushAll : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -793,13 +794,13 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
+ }
};
-
+
class PushAllUpsert : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ), true );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ), true );
}
using ReplTests::Base::check;
void check() const {
@@ -809,7 +810,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4]}" ) );
- }
+ }
};
class EmptyPushAll : public Base {
@@ -825,7 +826,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0}" ) );
- }
+ }
};
class Pull : public Base {
@@ -841,9 +842,9 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4,5]}" ) );
- }
+ }
};
-
+
class PullNothing : public Base {
public:
void doIt() const {
@@ -857,13 +858,13 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4,5]}" ) );
- }
+ }
};
-
+
class PullAll : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pullAll:{a:[4,5]}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pullAll:{a:[4,5]}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -873,7 +874,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
+ }
};
class Pop : public Base {
@@ -889,7 +890,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
+ }
};
class PopReverse : public Base {
@@ -905,7 +906,7 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
- }
+ }
};
class BitOp : public Base {
@@ -921,14 +922,14 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:3}" ) );
- }
+ }
};
class Rename : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -938,14 +939,14 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:3}" ) );
- }
+ }
};
class RenameReplace : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -955,13 +956,13 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:3,b:100}" ) );
- }
+ }
};
class RenameOverwrite : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -971,13 +972,13 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,z:1,a:3}" ) );
- }
+ }
};
-
+
class NoRename : public Base {
public:
void doIt() const {
- client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{c:'b'},$set:{z:1}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{c:'b'},$set:{z:1}}" ) );
}
using ReplTests::Base::check;
void check() const {
@@ -987,12 +988,12 @@ namespace ReplTests {
void reset() const {
deleteAll( ns() );
insert( fromjson( "{'_id':0,a:3}" ) );
- }
+ }
};
-
-
+
+
} // namespace Idempotence
-
+
class DeleteOpIsIdBased : public Base {
public:
void run() {
@@ -1002,21 +1003,21 @@ namespace ReplTests {
client()->remove( ns(), BSON( "a" << 10 ) );
ASSERT_EQUALS( 1U, client()->count( ns(), BSONObj() ) );
insert( BSON( "_id" << 0 << "a" << 11 ) );
- insert( BSON( "_id" << 2 << "a" << 10 ) );
+ insert( BSON( "_id" << 2 << "a" << 10 ) );
insert( BSON( "_id" << 3 << "a" << 10 ) );
-
+
applyAllOperations();
ASSERT_EQUALS( 2U, client()->count( ns(), BSONObj() ) );
ASSERT( !one( BSON( "_id" << 1 ) ).isEmpty() );
ASSERT( !one( BSON( "_id" << 2 ) ).isEmpty() );
}
};
-
+
class DbIdsTest {
public:
void run() {
Client::Context ctx( "unittests.repltest.DbIdsTest" );
-
+
s_.reset( new DbIds( "local.temp.DbIdsTest" ) );
s_->reset();
check( false, false, false );
@@ -1025,7 +1026,7 @@ namespace ReplTests {
check( true, false, false );
s_->set( "a", BSON( "_id" << 4 ), false );
check( false, false, false );
-
+
s_->set( "b", BSON( "_id" << 4 ), true );
check( false, true, false );
s_->set( "b", BSON( "_id" << 4 ), false );
@@ -1043,7 +1044,7 @@ namespace ReplTests {
s_->reset();
check( false, false, false );
-
+
s_->set( "a", BSON( "_id" << 4 ), true );
s_->set( "a", BSON( "_id" << 4 ), true );
check( true, false, false );
@@ -1054,17 +1055,17 @@ namespace ReplTests {
void check( bool one, bool two, bool three ) {
ASSERT_EQUALS( one, s_->get( "a", BSON( "_id" << 4 ) ) );
ASSERT_EQUALS( two, s_->get( "b", BSON( "_id" << 4 ) ) );
- ASSERT_EQUALS( three, s_->get( "a", BSON( "_id" << 5 ) ) );
+ ASSERT_EQUALS( three, s_->get( "a", BSON( "_id" << 5 ) ) );
}
dblock lk_;
auto_ptr< DbIds > s_;
};
-
+
class MemIdsTest {
public:
void run() {
int n = sizeof( BSONObj ) + BSON( "_id" << 4 ).objsize();
-
+
s_.reset();
ASSERT_EQUALS( 0, s_.roughSize() );
ASSERT( !s_.get( "a", BSON( "_id" << 4 ) ) );
@@ -1091,7 +1092,7 @@ namespace ReplTests {
public:
void run() {
Client::Context ctx( "unittests.repltests.IdTrackerTest" );
-
+
ASSERT( s_.inMem() );
s_.reset( 4 * sizeof( BSONObj ) - 1 );
s_.haveId( "a", BSON( "_id" << 0 ), true );
@@ -1103,34 +1104,34 @@ namespace ReplTests {
s_.mayUpgradeStorage();
ASSERT( !s_.inMem() );
check();
-
+
s_.haveId( "a", BSON( "_id" << 1 ), false );
ASSERT( !s_.haveId( "a", BSON( "_id" << 1 ) ) );
s_.haveId( "a", BSON( "_id" << 1 ), true );
check();
- ASSERT( !s_.inMem() );
-
+ ASSERT( !s_.inMem() );
+
s_.reset( 4 * sizeof( BSONObj ) - 1 );
s_.mayUpgradeStorage();
- ASSERT( s_.inMem() );
+ ASSERT( s_.inMem() );
}
private:
void check() {
ASSERT( s_.haveId( "a", BSON( "_id" << 0 ) ) );
ASSERT( s_.haveId( "a", BSON( "_id" << 1 ) ) );
ASSERT( s_.haveId( "b", BSON( "_id" << 0 ) ) );
- ASSERT( s_.haveModId( "b", BSON( "_id" << 0 ) ) );
+ ASSERT( s_.haveModId( "b", BSON( "_id" << 0 ) ) );
}
dblock lk_;
IdTracker s_;
};
-
+
class All : public Suite {
public:
- All() : Suite( "repl" ){
+ All() : Suite( "repl" ) {
}
-
- void setupTests(){
+
+ void setupTests() {
add< LogBasic >();
add< Idempotence::InsertTimestamp >();
add< Idempotence::InsertAutoId >();
@@ -1182,6 +1183,6 @@ namespace ReplTests {
add< IdTrackerTest >();
}
} myall;
-
+
} // namespace ReplTests
diff --git a/dbtests/sharding.cpp b/dbtests/sharding.cpp
index 2473366ccda..19edd5537ab 100644
--- a/dbtests/sharding.cpp
+++ b/dbtests/sharding.cpp
@@ -27,17 +27,17 @@ namespace ShardingTests {
namespace serverandquerytests {
class test1 {
public:
- void run(){
+ void run() {
ServerAndQuery a( "foo:1" , BSON( "a" << GT << 0 << LTE << 100 ) );
ServerAndQuery b( "foo:1" , BSON( "a" << GT << 200 << LTE << 1000 ) );
-
+
ASSERT( a < b );
ASSERT( ! ( b < a ) );
set<ServerAndQuery> s;
s.insert( a );
s.insert( b );
-
+
ASSERT_EQUALS( (unsigned int)2 , s.size() );
}
};
@@ -45,12 +45,12 @@ namespace ShardingTests {
class All : public Suite {
public:
- All() : Suite( "sharding" ){
+ All() : Suite( "sharding" ) {
}
- void setupTests(){
+ void setupTests() {
add< serverandquerytests::test1 >();
}
} myall;
-
+
}
diff --git a/dbtests/socktests.cpp b/dbtests/socktests.cpp
index 427e19e7c4b..5cd42f59d45 100644
--- a/dbtests/socktests.cpp
+++ b/dbtests/socktests.cpp
@@ -29,20 +29,20 @@ namespace SockTests {
ASSERT_EQUALS( "127.0.0.1", hostbyname( "localhost" ) );
ASSERT_EQUALS( "127.0.0.1", hostbyname( "127.0.0.1" ) );
// ASSERT_EQUALS( "::1", hostbyname( "::1" ) ); // IPv6 disabled at runtime by default.
-
+
HostAndPort h("asdfasdfasdf_no_such_host");
// this fails uncomment when fixed.
ASSERT( !h.isSelf() );
}
};
-
+
class All : public Suite {
public:
- All() : Suite( "sock" ){}
- void setupTests(){
+ All() : Suite( "sock" ) {}
+ void setupTests() {
add< HostByName >();
}
} myall;
-
+
} // namespace SockTests
diff --git a/dbtests/spin_lock_test.cpp b/dbtests/spin_lock_test.cpp
index d053d61cd5b..01eb7b37121 100644
--- a/dbtests/spin_lock_test.cpp
+++ b/dbtests/spin_lock_test.cpp
@@ -26,26 +26,26 @@ namespace {
using mongo::SpinLock;
- class LockTester{
+ class LockTester {
public:
LockTester( SpinLock* spin, int* counter )
- : _spin(spin), _counter(counter), _requests(0){}
+ : _spin(spin), _counter(counter), _requests(0) {}
- ~LockTester(){
+ ~LockTester() {
delete _t;
}
- void start( int increments ){
- _t = new boost::thread( boost::bind(&LockTester::test, this, increments) );
+ void start( int increments ) {
+ _t = new boost::thread( boost::bind(&LockTester::test, this, increments) );
}
- void join(){
+ void join() {
if ( _t ) _t->join();
}
- int requests() const{
- return _requests;
- }
+ int requests() const {
+ return _requests;
+ }
private:
SpinLock* _spin; // not owned here
@@ -53,7 +53,7 @@ namespace {
int _requests;
boost::thread* _t;
- void test( int increments ){
+ void test( int increments ) {
while ( increments-- > 0 ) {
_spin->lock();
++(*_counter);
@@ -61,14 +61,14 @@ namespace {
_spin->unlock();
}
}
-
+
LockTester( LockTester& );
LockTester& operator=( LockTester& );
};
- class ConcurrentIncs{
+ class ConcurrentIncs {
public:
- void run(){
+ void run() {
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
@@ -77,37 +77,37 @@ namespace {
const int threads = 64;
const int incs = 10000;
- LockTester* testers[threads];
-
- for ( int i = 0; i < threads; i++ ){
- testers[i] = new LockTester( &spin, &counter );
- }
- for ( int i = 0; i < threads; i++ ){
- testers[i]->start( incs );
- }
- for ( int i = 0; i < threads; i++ ){
- testers[i]->join();
- ASSERT_EQUALS( testers[i]->requests(), incs );
- delete testers[i];
- }
-
- ASSERT_EQUALS( counter, threads*incs );
+ LockTester* testers[threads];
+
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i] = new LockTester( &spin, &counter );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->start( incs );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->join();
+ ASSERT_EQUALS( testers[i]->requests(), incs );
+ delete testers[i];
+ }
+
+ ASSERT_EQUALS( counter, threads*incs );
#else
- // WARNING "TODO Missing spin lock in this platform."
- ASSERT( true );
+ // WARNING "TODO Missing spin lock in this platform."
+ ASSERT( true );
+
-
#endif
}
};
- class SpinLockSuite : public Suite{
+ class SpinLockSuite : public Suite {
public:
- SpinLockSuite() : Suite( "spinlock" ){}
+ SpinLockSuite() : Suite( "spinlock" ) {}
- void setupTests(){
+ void setupTests() {
add< ConcurrentIncs >();
}
} spinLockSuite;
diff --git a/dbtests/threadedtests.cpp b/dbtests/threadedtests.cpp
index af413cc2fe4..8891594537c 100644
--- a/dbtests/threadedtests.cpp
+++ b/dbtests/threadedtests.cpp
@@ -29,34 +29,34 @@
namespace ThreadedTests {
template <int nthreads_param=10>
- class ThreadedTest{
- public:
- virtual void setup() {} //optional
- virtual void subthread() = 0;
- virtual void validate() = 0;
+ class ThreadedTest {
+ public:
+ virtual void setup() {} //optional
+ virtual void subthread() = 0;
+ virtual void validate() = 0;
- static const int nthreads = nthreads_param;
+ static const int nthreads = nthreads_param;
- void run(){
- setup();
+ void run() {
+ setup();
- launch_subthreads(nthreads);
+ launch_subthreads(nthreads);
- validate();
- }
+ validate();
+ }
- virtual ~ThreadedTest() {}; // not necessary, but makes compilers happy
+ virtual ~ThreadedTest() {}; // not necessary, but makes compilers happy
- private:
- void launch_subthreads(int remaining){
- if (!remaining) return;
+ private:
+ void launch_subthreads(int remaining) {
+ if (!remaining) return;
- boost::thread athread(boost::bind(&ThreadedTest::subthread, this));
+ boost::thread athread(boost::bind(&ThreadedTest::subthread, this));
- launch_subthreads(remaining - 1);
+ launch_subthreads(remaining - 1);
- athread.join();
- }
+ athread.join();
+ }
};
// Tested with up to 30k threads
@@ -64,13 +64,13 @@ namespace ThreadedTests {
static const int iterations = 1000000;
AtomicUInt target;
- void subthread(){
- for(int i=0; i < iterations; i++){
+ void subthread() {
+ for(int i=0; i < iterations; i++) {
//target.x++; // verified to fail with this version
target++;
}
}
- void validate(){
+ void validate() {
ASSERT_EQUALS(target.x , unsigned(nthreads * iterations));
AtomicUInt u;
@@ -87,10 +87,10 @@ namespace ThreadedTests {
static const int iterations = 10000;
MVar<int> target;
- public:
+ public:
MVarTest() : target(0) {}
- void subthread(){
- for(int i=0; i < iterations; i++){
+ void subthread() {
+ for(int i=0; i < iterations; i++) {
int val = target.take();
#if BOOST_VERSION >= 103500
//increase chances of catching failure
@@ -99,30 +99,30 @@ namespace ThreadedTests {
target.put(val+1);
}
}
- void validate(){
+ void validate() {
ASSERT_EQUALS(target.take() , nthreads * iterations);
}
};
- class ThreadPoolTest{
+ class ThreadPoolTest {
static const int iterations = 10000;
static const int nThreads = 8;
AtomicUInt counter;
- void increment(int n){
- for (int i=0; i<n; i++){
+ void increment(int n) {
+ for (int i=0; i<n; i++) {
counter++;
}
}
- public:
- void run(){
+ public:
+ void run() {
ThreadPool tp(nThreads);
- for (int i=0; i < iterations; i++){
+ for (int i=0; i < iterations; i++) {
tp.schedule(&ThreadPoolTest::increment, this, 2);
}
-
+
tp.join();
ASSERT(counter == (unsigned)(iterations * 2));
@@ -131,7 +131,7 @@ namespace ThreadedTests {
class LockTest {
public:
- void run(){
+ void run() {
// quick atomicint wrap test
// MSGID likely assumes this semantic
AtomicUInt counter = 0xffffffff;
@@ -145,10 +145,10 @@ namespace ThreadedTests {
class All : public Suite {
public:
- All() : Suite( "threading" ){
+ All() : Suite( "threading" ) {
}
- void setupTests(){
+ void setupTests() {
add< IsAtomicUIntAtomic >();
add< MVarTest >();
add< ThreadPoolTest >();
diff --git a/dbtests/updatetests.cpp b/dbtests/updatetests.cpp
index 665bdc481ee..0f95a326faa 100644
--- a/dbtests/updatetests.cpp
+++ b/dbtests/updatetests.cpp
@@ -110,14 +110,14 @@ namespace UpdateTests {
class PushAllNonArray : public Fail {
void doIt() {
- insert( ns(), fromjson( "{a:[1]}" ) );
+ insert( ns(), fromjson( "{a:[1]}" ) );
update( ns(), BSONObj(), fromjson( "{$pushAll:{a:'d'}}" ) );
}
};
class PullAllNonArray : public Fail {
void doIt() {
- insert( ns(), fromjson( "{a:[1]}" ) );
+ insert( ns(), fromjson( "{a:[1]}" ) );
update( ns(), BSONObj(), fromjson( "{$pullAll:{a:'d'}}" ) );
}
};
@@ -241,12 +241,12 @@ namespace UpdateTests {
class MultiInc : public SetBase {
public:
-
- string s(){
+
+ string s() {
stringstream ss;
auto_ptr<DBClientCursor> cc = client().query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
bool first = true;
- while ( cc->more() ){
+ while ( cc->more() ) {
if ( first ) first = false;
else ss << ",";
@@ -255,11 +255,11 @@ namespace UpdateTests {
}
return ss.str();
}
-
- void run(){
+
+ void run() {
client().insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
client().insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
-
+
ASSERT_EQUALS( "1,5" , s() );
client().update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
@@ -270,7 +270,7 @@ namespace UpdateTests {
client().update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
ASSERT_EQUALS( "4,6" , s() );
-
+
}
};
@@ -498,10 +498,10 @@ namespace UpdateTests {
client().insert( ns(), BSON( "_id" << 55 << "i" << 5 ) );
client().update( ns(), BSON( "i" << 5 ), BSON( "i" << 6 ) );
ASSERT( !client().findOne( ns(), Query( BSON( "_id" << 55 ) ).hint
- ( "{\"_id\":ObjectId(\"000000000000000000000000\")}" ) ).isEmpty() );
+ ( "{\"_id\":ObjectId(\"000000000000000000000000\")}" ) ).isEmpty() );
}
};
-
+
class CheckNoMods : public SetBase {
public:
void run() {
@@ -509,7 +509,7 @@ namespace UpdateTests {
ASSERT( error() );
}
};
-
+
class UpdateMissingToNull : public SetBase {
public:
void run() {
@@ -520,10 +520,10 @@ namespace UpdateTests {
};
namespace ModSetTests {
-
+
class internal1 {
public:
- void run(){
+ void run() {
BSONObj b = BSON( "$inc" << BSON( "x" << 1 << "a.b" << 1 ) );
ModSet m(b);
@@ -532,7 +532,7 @@ namespace UpdateTests {
ASSERT( ! m.haveModForField( "y" ) );
ASSERT( ! m.haveModForField( "a.c" ) );
ASSERT( ! m.haveModForField( "a" ) );
-
+
ASSERT( m.haveConflictingMod( "x" ) );
ASSERT( m.haveConflictingMod( "a" ) );
ASSERT( m.haveConflictingMod( "a.b" ) );
@@ -541,14 +541,14 @@ namespace UpdateTests {
ASSERT( ! m.haveConflictingMod( "a.a" ) );
}
};
-
+
class Base {
public:
- virtual ~Base(){}
+ virtual ~Base() {}
-
- void test( BSONObj morig , BSONObj in , BSONObj wanted ){
+
+ void test( BSONObj morig , BSONObj in , BSONObj wanted ) {
BSONObj m = morig.copy();
ModSet set(m);
@@ -556,20 +556,20 @@ namespace UpdateTests {
ASSERT_EQUALS( wanted , out );
}
};
-
+
class inc1 : public Base {
public:
- void run(){
+ void run() {
BSONObj m = BSON( "$inc" << BSON( "x" << 1 ) );
test( m , BSON( "x" << 5 ) , BSON( "x" << 6 ) );
test( m , BSON( "a" << 5 ) , BSON( "a" << 5 << "x" << 1 ) );
test( m , BSON( "z" << 5 ) , BSON( "x" << 1 << "z" << 5 ) );
}
};
-
+
class inc2 : public Base {
public:
- void run(){
+ void run() {
BSONObj m = BSON( "$inc" << BSON( "a.b" << 1 ) );
test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 ) ) );
test( m , BSON( "a" << BSON( "b" << 2 ) ) , BSON( "a" << BSON( "b" << 3 ) ) );
@@ -577,23 +577,23 @@ namespace UpdateTests {
m = BSON( "$inc" << BSON( "a.b" << 1 << "a.c" << 1 ) );
test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 << "c" << 1 ) ) );
-
+
}
};
class set1 : public Base {
public:
- void run(){
+ void run() {
test( BSON( "$set" << BSON( "x" << 17 ) ) , BSONObj() , BSON( "x" << 17 ) );
test( BSON( "$set" << BSON( "x" << 17 ) ) , BSON( "x" << 5 ) , BSON( "x" << 17 ) );
test( BSON( "$set" << BSON( "x.a" << 17 ) ) , BSON( "z" << 5 ) , BSON( "x" << BSON( "a" << 17 )<< "z" << 5 ) );
}
- };
-
+ };
+
class push1 : public Base {
public:
- void run(){
+ void run() {
test( BSON( "$push" << BSON( "a" << 5 ) ) , fromjson( "{a:[1]}" ) , fromjson( "{a:[1,5]}" ) );
}
};
@@ -603,28 +603,28 @@ namespace UpdateTests {
namespace basic {
class Base : public ClientBase {
protected:
-
+
virtual const char * ns() = 0;
virtual void dotest() = 0;
-
- void insert( const BSONObj& o ){
+
+ void insert( const BSONObj& o ) {
client().insert( ns() , o );
}
-
- void update( const BSONObj& m ){
+
+ void update( const BSONObj& m ) {
client().update( ns() , BSONObj() , m );
}
- BSONObj findOne(){
+ BSONObj findOne() {
return client().findOne( ns() , BSONObj() );
}
- void test( const char* initial , const char* mod , const char* after ){
+ void test( const char* initial , const char* mod , const char* after ) {
test( fromjson( initial ) , fromjson( mod ) , fromjson( after ) );
}
- void test( const BSONObj& initial , const BSONObj& mod , const BSONObj& after ){
+ void test( const BSONObj& initial , const BSONObj& mod , const BSONObj& after ) {
client().dropCollection( ns() );
insert( initial );
update( mod );
@@ -633,14 +633,14 @@ namespace UpdateTests {
}
public:
-
- Base(){}
- virtual ~Base(){
+
+ Base() {}
+ virtual ~Base() {
}
- void run(){
+ void run() {
client().dropCollection( ns() );
-
+
dotest();
client().dropCollection( ns() );
@@ -652,87 +652,87 @@ namespace UpdateTests {
virtual BSONObj mod() = 0;
virtual BSONObj after() = 0;
- void dotest(){
+ void dotest() {
test( initial() , mod() , after() );
}
-
+
};
-
+
class inc1 : public SingleTest {
- virtual BSONObj initial(){
+ virtual BSONObj initial() {
return BSON( "_id" << 1 << "x" << 1 );
}
- virtual BSONObj mod(){
+ virtual BSONObj mod() {
return BSON( "$inc" << BSON( "x" << 2 ) );
}
- virtual BSONObj after(){
+ virtual BSONObj after() {
return BSON( "_id" << 1 << "x" << 3 );
}
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc1";
}
};
class inc2 : public SingleTest {
- virtual BSONObj initial(){
+ virtual BSONObj initial() {
return BSON( "_id" << 1 << "x" << 1 );
}
- virtual BSONObj mod(){
+ virtual BSONObj mod() {
return BSON( "$inc" << BSON( "x" << 2.5 ) );
}
- virtual BSONObj after(){
+ virtual BSONObj after() {
return BSON( "_id" << 1 << "x" << 3.5 );
}
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc2";
}
};
-
+
class inc3 : public SingleTest {
- virtual BSONObj initial(){
+ virtual BSONObj initial() {
return BSON( "_id" << 1 << "x" << 537142123123LL );
}
- virtual BSONObj mod(){
+ virtual BSONObj mod() {
return BSON( "$inc" << BSON( "x" << 2 ) );
}
- virtual BSONObj after(){
+ virtual BSONObj after() {
return BSON( "_id" << 1 << "x" << 537142123125LL );
}
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc3";
}
};
class inc4 : public SingleTest {
- virtual BSONObj initial(){
+ virtual BSONObj initial() {
return BSON( "_id" << 1 << "x" << 537142123123LL );
}
- virtual BSONObj mod(){
+ virtual BSONObj mod() {
return BSON( "$inc" << BSON( "x" << 2LL ) );
}
- virtual BSONObj after(){
+ virtual BSONObj after() {
return BSON( "_id" << 1 << "x" << 537142123125LL );
}
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc4";
}
};
class inc5 : public SingleTest {
- virtual BSONObj initial(){
+ virtual BSONObj initial() {
return BSON( "_id" << 1 << "x" << 537142123123LL );
}
- virtual BSONObj mod(){
+ virtual BSONObj mod() {
return BSON( "$inc" << BSON( "x" << 2.0 ) );
}
- virtual BSONObj after(){
+ virtual BSONObj after() {
return BSON( "_id" << 1 << "x" << 537142123125LL );
}
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc5";
}
@@ -740,23 +740,23 @@ namespace UpdateTests {
class inc6 : public Base {
- virtual const char * ns(){
+ virtual const char * ns() {
return "unittests.inc6";
}
- virtual BSONObj initial(){ return BSONObj(); }
- virtual BSONObj mod(){ return BSONObj(); }
- virtual BSONObj after(){ return BSONObj(); }
+ virtual BSONObj initial() { return BSONObj(); }
+ virtual BSONObj mod() { return BSONObj(); }
+ virtual BSONObj after() { return BSONObj(); }
- void dotest(){
+ void dotest() {
client().insert( ns() , BSON( "x" << 5 ) );
ASSERT( findOne()["x"].type() == NumberInt );
long long start = 5;
long long max = numeric_limits<int>::max();
max *= 32;
- while ( start < max ){
+ while ( start < max ) {
update( BSON( "$inc" << BSON( "x" << 500000 ) ) );
start += 500000;
ASSERT_EQUALS( start , findOne()["x"].numberLong() ); // SERVER-2005
@@ -764,12 +764,12 @@ namespace UpdateTests {
}
};
-
+
class bit1 : public Base {
- const char * ns(){
+ const char * ns() {
return "unittests.bit1";
}
- void dotest(){
+ void dotest() {
test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 ) ) ) , BSON( "_id" << 1 << "x" << ( 3 & 2 ) ) );
test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 4 ) ) ) , BSON( "_id" << 1 << "x" << ( 1 | 4 ) ) );
test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 << "or" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 & 2 ) | 8 ) ) );
@@ -777,21 +777,21 @@ namespace UpdateTests {
}
};
-
+
class unset : public Base {
- const char * ns(){
+ const char * ns() {
return "unittests.unset";
}
- void dotest(){
+ void dotest() {
test( "{_id:1,x:1}" , "{$unset:{x:1}}" , "{_id:1}" );
}
};
class setswitchint : public Base {
- const char * ns(){
+ const char * ns() {
return "unittests.int1";
}
- void dotest(){
+ void dotest() {
test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$set" << BSON( "x" << 5.6 ) ) , BSON( "_id" << 1 << "x" << 5.6 ) );
test( BSON( "_id" << 1 << "x" << 5.6 ) , BSON( "$set" << BSON( "x" << 1 ) ) , BSON( "_id" << 1 << "x" << 1 ) );
}
@@ -799,12 +799,12 @@ namespace UpdateTests {
};
-
+
class All : public Suite {
public:
All() : Suite( "update" ) {
}
- void setupTests(){
+ void setupTests() {
add< ModId >();
add< ModNonmodMix >();
add< InvalidMod >();
@@ -853,13 +853,13 @@ namespace UpdateTests {
add< PreserveIdWithIndex >();
add< CheckNoMods >();
add< UpdateMissingToNull >();
-
+
add< ModSetTests::internal1 >();
add< ModSetTests::inc1 >();
add< ModSetTests::inc2 >();
add< ModSetTests::set1 >();
add< ModSetTests::push1 >();
-
+
add< basic::inc1 >();
add< basic::inc2 >();
add< basic::inc3 >();
diff --git a/pch.cpp b/pch.cpp
index 7202a90bc44..a81ff610d82 100644
--- a/pch.cpp
+++ b/pch.cpp
@@ -27,4 +27,4 @@
#else
// unknown compiler
-#endif
+#endif
diff --git a/pch.h b/pch.h
index f1812dc9420..be48761500d 100644
--- a/pch.h
+++ b/pch.h
@@ -96,7 +96,7 @@
namespace mongo {
# if defined(_TESTINTENT)
- /** Use _TESTINTENT to test write intent declarations by using a read only view for non-declared operations.
+ /** Use _TESTINTENT to test write intent declarations by using a read only view for non-declared operations.
We don't do journalling when _TESTINTENT is enabled.
*/
const bool testIntent = true;
@@ -118,8 +118,8 @@ namespace mongo {
const int VERSION_MINOR = 5;
enum ExitCode {
- EXIT_CLEAN = 0 ,
- EXIT_BADOPTIONS = 2 ,
+ EXIT_CLEAN = 0 ,
+ EXIT_BADOPTIONS = 2 ,
EXIT_REPLICATION_ERROR = 3 ,
EXIT_NEED_UPGRADE = 4 ,
EXIT_SHARDING_ERROR = 5 ,
@@ -127,8 +127,8 @@ namespace mongo {
EXIT_ABRUPT = 14 ,
EXIT_NTSERVICE_ERROR = 20 ,
EXIT_JAVA = 21 ,
- EXIT_OOM_MALLOC = 42 ,
- EXIT_OOM_REALLOC = 43 ,
+ EXIT_OOM_MALLOC = 42 ,
+ EXIT_OOM_REALLOC = 43 ,
EXIT_FS = 45 ,
EXIT_CLOCK_SKEW = 47 ,
EXIT_NET_ERROR = 48 ,
@@ -147,7 +147,7 @@ namespace mongo {
*/
void exit( ExitCode returnCode );
bool inShutdown();
-
+
using namespace boost::filesystem;
void asserted(const char *msg, const char *file, unsigned line);
}
diff --git a/s/balance.cpp b/s/balance.cpp
index 186500d91fd..d0199b398b8 100644
--- a/s/balance.cpp
+++ b/s/balance.cpp
@@ -31,10 +31,10 @@
#include "grid.h"
namespace mongo {
-
+
Balancer balancer;
- Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy ){}
+ Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy ) {}
Balancer::~Balancer() {
delete _policy;
@@ -43,15 +43,15 @@ namespace mongo {
int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
int movedCount = 0;
- for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ){
+ for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
const CandidateChunk& chunkInfo = *it->get();
DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
assert( cfg );
-
+
ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
assert( cm );
-
+
const BSONObj& chunkToMove = chunkInfo.chunk;
ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
@@ -61,50 +61,50 @@ namespace mongo {
c = cm->findChunk( chunkToMove["min"].Obj() );
if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
- log() << "chunk mismatch after reload, ignoring will retry issue cm: "
+ log() << "chunk mismatch after reload, ignoring will retry issue cm: "
<< c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
continue;
}
}
-
+
BSONObj res;
- if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , res ) ){
+ if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , res ) ) {
movedCount++;
continue;
}
// the move requires acquiring the collection metadata's lock, which can fail
- log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
+ log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
<< " chunk: " << chunkToMove << endl;
}
return movedCount;
}
-
- void Balancer::_ping( DBClientBase& conn ){
+
+ void Balancer::_ping( DBClientBase& conn ) {
WriteConcern w = conn.getWriteConcern();
conn.setWriteConcern( W_NONE );
- conn.update( ShardNS::mongos ,
- BSON( "_id" << _myid ) ,
- BSON( "$set" << BSON( "ping" << DATENOW << "up" << (int)(time(0)-_started) ) ) ,
- true );
+ conn.update( ShardNS::mongos ,
+ BSON( "_id" << _myid ) ,
+ BSON( "$set" << BSON( "ping" << DATENOW << "up" << (int)(time(0)-_started) ) ) ,
+ true );
conn.setWriteConcern( w);
}
-
- bool Balancer::_checkOIDs(){
+
+ bool Balancer::_checkOIDs() {
vector<Shard> all;
Shard::getAllShards( all );
-
+
map<int,Shard> oids;
-
- for ( vector<Shard>::iterator i=all.begin(); i!=all.end(); ++i ){
+
+ for ( vector<Shard>::iterator i=all.begin(); i!=all.end(); ++i ) {
Shard s = *i;
BSONObj f = s.runCommand( "admin" , "features" );
- if ( f["oidMachine"].isNumber() ){
+ if ( f["oidMachine"].isNumber() ) {
int x = f["oidMachine"].numberInt();
- if ( oids.count(x) == 0 ){
+ if ( oids.count(x) == 0 ) {
oids[x] = s;
}
else {
@@ -121,7 +121,7 @@ namespace mongo {
return true;
}
- void Balancer::_doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks ){
+ void Balancer::_doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks ) {
assert( candidateChunks );
//
@@ -131,7 +131,7 @@ namespace mongo {
auto_ptr<DBClientCursor> cursor = conn.query( ShardNS::collection , BSONObj() );
vector< string > collections;
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj col = cursor->next();
// sharded collections will have a shard "key".
@@ -152,7 +152,7 @@ namespace mongo {
//
// TODO: skip unresponsive shards and mark information as stale.
//
-
+
vector<Shard> allShards;
Shard::getAllShards( allShards );
if ( allShards.size() < 2) {
@@ -160,12 +160,12 @@ namespace mongo {
return;
}
- map< string, BSONObj > shardLimitsMap;
- for ( vector<Shard>::const_iterator it = allShards.begin(); it != allShards.end(); ++it ){
+ map< string, BSONObj > shardLimitsMap;
+ for ( vector<Shard>::const_iterator it = allShards.begin(); it != allShards.end(); ++it ) {
const Shard& s = *it;
ShardStatus status = s.getStatus();
- BSONObj limitsObj = BSON( ShardFields::maxSize( s.getMaxSize() ) <<
+ BSONObj limitsObj = BSON( ShardFields::maxSize( s.getMaxSize() ) <<
LimitsFields::currSize( status.mapped() ) <<
ShardFields::draining( s.isDraining() ) <<
LimitsFields::hasOpsQueued( status.hasOpsQueued() )
@@ -183,7 +183,7 @@ namespace mongo {
map< string,vector<BSONObj> > shardToChunksMap;
cursor = conn.query( ShardNS::chunk , QUERY( "ns" << ns ).sort( "min" ) );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj chunk = cursor->next();
vector<BSONObj>& chunks = shardToChunksMap[chunk["shard"].String()];
chunks.push_back( chunk.getOwned() );
@@ -194,8 +194,8 @@ namespace mongo {
log(1) << "skipping empty collection (" << ns << ")";
continue;
}
-
- for ( vector<Shard>::iterator i=allShards.begin(); i!=allShards.end(); ++i ){
+
+ for ( vector<Shard>::iterator i=allShards.begin(); i!=allShards.end(); ++i ) {
// this just makes sure there is an entry in shardToChunksMap for every shard
Shard s = *i;
shardToChunksMap[s.getName()].size();
@@ -208,7 +208,7 @@ namespace mongo {
bool Balancer::_init() {
try {
-
+
log() << "about to contact config servers and shards" << endl;
// contact the config server and refresh shard information
@@ -225,11 +225,12 @@ namespace mongo {
_myid = buf.str();
_started = time(0);
- log() << "balancer id: " << _myid << " started at " << time_t_to_String_short(_started) << endl;
+ log() << "balancer id: " << _myid << " started at " << time_t_to_String_short(_started) << endl;
return true;
- } catch ( std::exception& ) {
+ }
+ catch ( std::exception& ) {
log( LL_WARNING ) << "could not initialize balancer, please check that all shards and config servers are up" << endl;
return false;
@@ -237,7 +238,7 @@ namespace mongo {
}
}
- void Balancer::run(){
+ void Balancer::run() {
// this is the body of a BackgroundJob so if we throw here we're basically ending the balancer thread prematurely
while ( ! inShutdown() ) {
@@ -256,28 +257,28 @@ namespace mongo {
ConnectionString config = configServer.getConnectionString();
DistributedLock balanceLock( config , "balancer" );
- while ( ! inShutdown() ){
-
+ while ( ! inShutdown() ) {
+
try {
ScopedDbConnection conn( config );
- _ping( conn.conn() );
- if ( ! _checkOIDs() ){
+ _ping( conn.conn() );
+ if ( ! _checkOIDs() ) {
uassert( 13258 , "oids broken after resetting!" , _checkOIDs() );
}
-
+
// use fresh shard state
- Shard::reloadShardInfo();
+ Shard::reloadShardInfo();
dist_lock_try lk( &balanceLock , "doing balance round" );
- if ( ! lk.got() ){
+ if ( ! lk.got() ) {
log(1) << "skipping balancing round because another balancer is active" << endl;
conn.done();
sleepsecs( 30 ); // no need to wake up soon
continue;
}
-
+
if ( ! grid.shouldBalance() ) {
log(1) << "skipping balancing round because balancing is disabled" << endl;;
conn.done();
@@ -286,26 +287,27 @@ namespace mongo {
continue;
}
- log(1) << "*** start balancing round" << endl;
+ log(1) << "*** start balancing round" << endl;
vector<CandidateChunkPtr> candidateChunks;
_doBalanceRound( conn.conn() , &candidateChunks );
if ( candidateChunks.size() == 0 ) {
log(1) << "no need to move any chunk" << endl;
- } else {
+ }
+ else {
_balancedLastTime = _moveChunks( &candidateChunks );
}
- log(1) << "*** end of balancing round" << endl;
+ log(1) << "*** end of balancing round" << endl;
conn.done();
sleepsecs( _balancedLastTime ? 5 : 10 );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log() << "caught exception while doing balance: " << e.what() << endl;
// Just to match the opening statement if in log level 1
- log(1) << "*** End of balancing round" << endl;
+ log(1) << "*** End of balancing round" << endl;
sleepsecs( 30 ); // sleep a fair amount b/c of error
continue;
diff --git a/s/balance.h b/s/balance.h
index 80662b5c761..0ad2647ea40 100644
--- a/s/balance.h
+++ b/s/balance.h
@@ -24,14 +24,14 @@
#include "balancer_policy.h"
namespace mongo {
-
+
/**
* The balancer is a background task that tries to keep the number of chunks across all servers of the cluster even. Although
* every mongos will have one balancer running, only one of them will be active at the any given point in time. The balancer
* uses a 'DistributedLock' for that coordination.
*
- * The balancer does act continuously but in "rounds". At a given round, it would decide if there is an imbalance by
- * checking the difference in chunks between the most and least loaded shards. It would issue a request for a chunk
+ * The balancer does act continuously but in "rounds". At a given round, it would decide if there is an imbalance by
+ * checking the difference in chunks between the most and least loaded shards. It would issue a request for a chunk
* migration per round, if it found so.
*/
class Balancer : public BackgroundJob {
@@ -43,7 +43,7 @@ namespace mongo {
virtual void run();
- virtual string name() const { return "Balancer"; }
+ virtual string name() const { return "Balancer"; }
private:
typedef BalancerPolicy::ChunkInfo CandidateChunk;
@@ -56,10 +56,10 @@ namespace mongo {
time_t _started;
// number of moved chunks in last round
- int _balancedLastTime;
+ int _balancedLastTime;
// decide which chunks to move; owned here.
- BalancerPolicy* _policy;
+ BalancerPolicy* _policy;
/**
* Checks that the balancer can connect to all servers it needs to do its job.
@@ -69,7 +69,7 @@ namespace mongo {
* This method throws on a network exception
*/
bool _init();
-
+
/**
* Gathers all the necessary information about shards and chunks, and decides whether there are candidate chunks to
* be moved.
@@ -81,7 +81,7 @@ namespace mongo {
/**
* Issues chunk migration request, one at a time.
- *
+ *
* @param candidateChunks possible chunks to move
* @return number of chunks effectively moved
*/
@@ -100,6 +100,6 @@ namespace mongo {
bool _checkOIDs();
};
-
+
extern Balancer balancer;
}
diff --git a/s/balancer_policy.cpp b/s/balancer_policy.cpp
index e78f479efcf..0597a08945f 100644
--- a/s/balancer_policy.cpp
+++ b/s/balancer_policy.cpp
@@ -32,15 +32,15 @@ namespace mongo {
BSONField<long long> LimitsFields::currSize( "currSize" );
BSONField<bool> LimitsFields::hasOpsQueued( "hasOpsQueued" );
- BalancerPolicy::ChunkInfo* BalancerPolicy::balance( const string& ns,
- const ShardToLimitsMap& shardToLimitsMap,
- const ShardToChunksMap& shardToChunksMap,
- int balancedLastTime ){
+ BalancerPolicy::ChunkInfo* BalancerPolicy::balance( const string& ns,
+ const ShardToLimitsMap& shardToLimitsMap,
+ const ShardToChunksMap& shardToChunksMap,
+ int balancedLastTime ) {
pair<string,unsigned> min("",numeric_limits<unsigned>::max());
pair<string,unsigned> max("",0);
vector<string> drainingShards;
-
- for (ShardToChunksIter i = shardToChunksMap.begin(); i!=shardToChunksMap.end(); ++i ){
+
+ for (ShardToChunksIter i = shardToChunksMap.begin(); i!=shardToChunksMap.end(); ++i ) {
// Find whether this shard's capacity or availability are exhausted
const string& shard = i->first;
@@ -53,37 +53,37 @@ namespace mongo {
// Is this shard a better chunk receiver then the current one?
// Shards that would be bad receiver candidates:
- // + maxed out shards
- // + draining shards
+ // + maxed out shards
+ // + draining shards
// + shards with operations queued for writeback
const unsigned size = i->second.size();
- if ( ! maxedOut && ! draining && ! opsQueued ){
- if ( size < min.second ){
+ if ( ! maxedOut && ! draining && ! opsQueued ) {
+ if ( size < min.second ) {
min = make_pair( shard , size );
}
}
// Check whether this shard is a better chunk donor then the current one.
// Draining shards take a lower priority than overloaded shards.
- if ( size > max.second ){
- max = make_pair( shard , size );
+ if ( size > max.second ) {
+ max = make_pair( shard , size );
}
- if ( draining && (size > 0)){
+ if ( draining && (size > 0)) {
drainingShards.push_back( shard );
}
}
- // If there is no candidate chunk receiver -- they may have all been maxed out,
- // draining, ... -- there's not much that the policy can do.
- if ( min.second == numeric_limits<unsigned>::max() ){
+ // If there is no candidate chunk receiver -- they may have all been maxed out,
+ // draining, ... -- there's not much that the policy can do.
+ if ( min.second == numeric_limits<unsigned>::max() ) {
log() << "no availalable shards to take chunks" << endl;
return NULL;
}
-
+
log(1) << "collection : " << ns << endl;
log(1) << "donor : " << max.second << " chunks on " << max.first << endl;
log(1) << "receiver : " << min.second << " chunks on " << min.first << endl;
- if ( ! drainingShards.empty() ){
+ if ( ! drainingShards.empty() ) {
string drainingStr;
joinStringDelim( drainingShards, &drainingStr, ',' );
log(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
@@ -94,34 +94,36 @@ namespace mongo {
const int imbalance = max.second - min.second;
const int threshold = balancedLastTime ? 2 : 8;
string from, to;
- if ( imbalance >= threshold ){
+ if ( imbalance >= threshold ) {
from = max.first;
to = min.first;
- } else if ( ! drainingShards.empty() ){
+ }
+ else if ( ! drainingShards.empty() ) {
from = drainingShards[ rand() % drainingShards.size() ];
to = min.first;
- } else {
- // Everything is balanced here!
+ }
+ else {
+ // Everything is balanced here!
return NULL;
}
const vector<BSONObj>& chunksFrom = shardToChunksMap.find( from )->second;
const vector<BSONObj>& chunksTo = shardToChunksMap.find( to )->second;
BSONObj chunkToMove = pickChunk( chunksFrom , chunksTo );
- log() << "chose [" << from << "] to [" << to << "] " << chunkToMove << endl;
+ log() << "chose [" << from << "] to [" << to << "] " << chunkToMove << endl;
return new ChunkInfo( ns, to, from, chunkToMove );
}
- BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ){
+ BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ) {
// It is possible for a donor ('from') shard to have less chunks than a recevier one ('to')
- // if the donor is in draining mode.
-
+ // if the donor is in draining mode.
+
if ( to.size() == 0 )
return from[0];
-
+
if ( from[0]["min"].Obj().woCompare( to[to.size()-1]["max"].Obj() , BSONObj() , false ) == 0 )
return from[0];
@@ -131,38 +133,38 @@ namespace mongo {
return from[0];
}
- bool BalancerPolicy::isSizeMaxed( BSONObj limits ){
- // If there's no limit information for the shard, assume it can be a chunk receiver
+ bool BalancerPolicy::isSizeMaxed( BSONObj limits ) {
+ // If there's no limit information for the shard, assume it can be a chunk receiver
// (i.e., there's not bound on space utilization)
- if ( limits.isEmpty() ){
+ if ( limits.isEmpty() ) {
return false;
}
long long maxUsage = limits[ ShardFields::maxSize.name() ].Long();
- if ( maxUsage == 0 ){
+ if ( maxUsage == 0 ) {
return false;
}
long long currUsage = limits[ LimitsFields::currSize.name() ].Long();
- if ( currUsage < maxUsage ){
+ if ( currUsage < maxUsage ) {
return false;
}
return true;
}
- bool BalancerPolicy::isDraining( BSONObj limits ){
+ bool BalancerPolicy::isDraining( BSONObj limits ) {
BSONElement draining = limits[ ShardFields::draining.name() ];
- if ( draining.eoo() || ! draining.Bool() ){
+ if ( draining.eoo() || ! draining.Bool() ) {
return false;
}
return true;
}
- bool BalancerPolicy::hasOpsQueued( BSONObj limits ){
+ bool BalancerPolicy::hasOpsQueued( BSONObj limits ) {
BSONElement opsQueued = limits[ LimitsFields::hasOpsQueued.name() ];
- if ( opsQueued.eoo() || ! opsQueued.Bool() ){
+ if ( opsQueued.eoo() || ! opsQueued.Bool() ) {
return false;
}
return true;
diff --git a/s/balancer_policy.h b/s/balancer_policy.h
index ae6dc6f6675..cef5aa64afc 100644
--- a/s/balancer_policy.h
+++ b/s/balancer_policy.h
@@ -29,20 +29,20 @@ namespace mongo {
/**
* Returns a suggested chunk to move whithin a collection's shards, given information about
- * space usage and number of chunks for that collection. If the policy doesn't recommend
+ * space usage and number of chunks for that collection. If the policy doesn't recommend
* moving, it returns NULL.
*
* @param ns is the collections namepace.
- * @param shardLimitMap is a map from shardId to an object that describes (for now) space
+ * @param shardLimitMap is a map from shardId to an object that describes (for now) space
* cap and usage. E.g.: { "maxSize" : <size_in_MB> , "usedSize" : <size_in_MB> }.
* @param shardToChunksMap is a map from shardId to chunks that live there. A chunk's format
- * is { }.
+ * is { }.
* @param balancedLastTime is the number of chunks effectively moved in the last round.
* @returns NULL or ChunkInfo of the best move to make towards balacing the collection.
*/
typedef map< string,BSONObj > ShardToLimitsMap;
typedef map< string,vector<BSONObj> > ShardToChunksMap;
- static ChunkInfo* balance( const string& ns, const ShardToLimitsMap& shardToLimitsMap,
+ static ChunkInfo* balance( const string& ns, const ShardToLimitsMap& shardToLimitsMap,
const ShardToChunksMap& shardToChunksMap, int balancedLastTime );
// below exposed for testing purposes only -- treat it as private --
@@ -57,7 +57,7 @@ namespace mongo {
static bool isSizeMaxed( BSONObj shardLimits );
/**
- * Returns true if 'shardLimist' contains a field "draining". Expects the optional field
+ * Returns true if 'shardLimist' contains a field "draining". Expects the optional field
* "isDraining" on 'shrdLimits'.
*/
static bool isDraining( BSONObj shardLimits );
@@ -81,7 +81,7 @@ namespace mongo {
const BSONObj chunk;
ChunkInfo( const string& a_ns , const string& a_to , const string& a_from , const BSONObj& a_chunk )
- : ns( a_ns ) , to( a_to ) , from( a_from ), chunk( a_chunk ){}
+ : ns( a_ns ) , to( a_to ) , from( a_from ), chunk( a_chunk ) {}
};
/**
@@ -92,7 +92,7 @@ namespace mongo {
static BSONField<long long> currSize; // currently used disk space in bytes
static BSONField<bool> hasOpsQueued; // writeback queue is not empty?
};
-
+
} // namespace mongo
#endif // S_BALANCER_POLICY_HEADER
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 342ea6ec463..76b883c2a39 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -30,9 +30,9 @@
namespace mongo {
- inline bool allOfType(BSONType type, const BSONObj& o){
+ inline bool allOfType(BSONType type, const BSONObj& o) {
BSONObjIterator it(o);
- while(it.more()){
+ while(it.more()) {
if (it.next().type() != type)
return false;
}
@@ -42,36 +42,36 @@ namespace mongo {
// ------- Shard --------
string Chunk::chunkMetadataNS = "config.chunks";
-
+
int Chunk::MaxChunkSize = 1024 * 1024 * 64;
-
+
Chunk::Chunk( ChunkManager * manager ) : _manager(manager), _lastmod(0) {
_setDataWritten();
}
Chunk::Chunk(ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard)
- : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0) {
+ : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0) {
_setDataWritten();
}
- void Chunk::_setDataWritten(){
+ void Chunk::_setDataWritten() {
_dataWritten = rand() % ( MaxChunkSize / 5 );
}
string Chunk::getns() const {
assert( _manager );
- return _manager->getns();
+ return _manager->getns();
}
- bool Chunk::contains( const BSONObj& obj ) const{
+ bool Chunk::contains( const BSONObj& obj ) const {
return
_manager->getShardKey().compare( getMin() , obj ) <= 0 &&
_manager->getShardKey().compare( obj , getMax() ) < 0;
}
bool ChunkRange::contains(const BSONObj& obj) const {
- // same as Chunk method
- return
+ // same as Chunk method
+ return
_manager->getShardKey().compare( getMin() , obj ) <= 0 &&
_manager->getShardKey().compare( obj , getMax() ) < 0;
}
@@ -96,24 +96,24 @@ namespace mongo {
BSONObj k = _manager->getShardKey().key();
BSONObjBuilder r;
-
+
BSONObjIterator i(k);
while( i.more() ) {
BSONElement e = i.next();
uassert( 10163 , "can only handle numbers here - which i think is correct" , e.isNumber() );
r.append( e.fieldName() , -1 * e.number() );
}
-
+
q.sort( r.obj() );
}
// find the extreme key
BSONObj end = conn->findOne( _manager->getns() , q );
conn.done();
-
+
if ( end.isEmpty() )
return BSONObj();
-
+
return _manager->getShardKey().extractKey( end );
}
@@ -129,22 +129,22 @@ namespace mongo {
cmd.appendBool( "force" , true );
BSONObj cmdObj = cmd.obj();
- if ( ! conn->runCommand( "admin" , cmdObj , result )){
+ if ( ! conn->runCommand( "admin" , cmdObj , result )) {
conn.done();
ostringstream os;
os << "splitVector command (median key) failed: " << result;
uassert( 13503 , os.str() , 0 );
- }
+ }
BSONObjIterator it( result.getObjectField( "splitKeys" ) );
- if ( it.more() ){
+ if ( it.more() ) {
medianKey = it.next().Obj().getOwned();
}
conn.done();
}
- void Chunk::pickSplitVector( vector<BSONObj>& splitPoints , int chunkSize /* bytes */, int maxPoints, int maxObjs ) const {
+ void Chunk::pickSplitVector( vector<BSONObj>& splitPoints , int chunkSize /* bytes */, int maxPoints, int maxObjs ) const {
// Ask the mongod holding this chunk to figure out the split points.
ScopedDbConnection conn( getShard().getConnString() );
BSONObj result;
@@ -158,21 +158,21 @@ namespace mongo {
cmd.append( "maxChunkObjects" , maxObjs );
BSONObj cmdObj = cmd.obj();
- if ( ! conn->runCommand( "admin" , cmdObj , result )){
+ if ( ! conn->runCommand( "admin" , cmdObj , result )) {
conn.done();
ostringstream os;
os << "splitVector command failed: " << result;
uassert( 13345 , os.str() , 0 );
- }
+ }
BSONObjIterator it( result.getObjectField( "splitKeys" ) );
- while ( it.more() ){
+ while ( it.more() ) {
splitPoints.push_back( it.next().Obj().getOwned() );
}
conn.done();
}
- ChunkPtr Chunk::singleSplit( bool force , BSONObj& res ){
+ ChunkPtr Chunk::singleSplit( bool force , BSONObj& res ) {
vector<BSONObj> splitPoint;
// if splitting is not obligatory we may return early if there are not enough data
@@ -181,7 +181,7 @@ namespace mongo {
if ( ! force ) {
vector<BSONObj> candidates;
const int maxPoints = 2;
- const int maxObjs = 250000;
+ const int maxObjs = 250000;
pickSplitVector( candidates , getManager()->getCurrentDesiredChunkSize() , maxPoints , maxObjs );
if ( candidates.size() <= 1 ) {
// no split points means there isn't enough data to split on
@@ -193,7 +193,8 @@ namespace mongo {
splitPoint.push_back( candidates.front() );
- } else {
+ }
+ else {
// if forcing a split, use the chunk's median key
BSONObj medianKey;
pickMedianKey( medianKey );
@@ -211,13 +212,14 @@ namespace mongo {
splitPoint.push_back( key );
}
- } else if ( maxIsInf() ) {
+ }
+ else if ( maxIsInf() ) {
splitPoint.clear();
BSONObj key = _getExtremeKey( -1 );
if ( ! key.isEmpty() ) {
splitPoint.push_back( key );
}
- }
+ }
// Normally, we'd have a sound split point here if the chunk is not empty. It's also a good place to
// sanity check.
@@ -229,14 +231,14 @@ namespace mongo {
return multiSplit( splitPoint , res );
}
-
+
ChunkPtr Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res ) {
const size_t maxSplitPoints = 256;
uassert( 10165 , "can't split as shard doesn't have a manager" , _manager );
uassert( 13332 , "need a split key to split chunk" , !m.empty() );
uassert( 13333 , "can't split a chunk in that many parts", m.size() < maxSplitPoints );
- uassert( 13003 , "can't split a chunk with only one distinct value" , _min.woCompare(_max) );
+ uassert( 13003 , "can't split a chunk with only one distinct value" , _min.woCompare(_max) );
ScopedDbConnection conn( getShard().getConnString() );
@@ -248,7 +250,7 @@ namespace mongo {
cmd.append( "from" , getShard().getConnString() );
cmd.append( "splitKeys" , m );
cmd.append( "shardId" , genID() );
- cmd.append( "configdb" , configServer.modelServer() );
+ cmd.append( "configdb" , configServer.modelServer() );
BSONObj cmdObj = cmd.obj();
if ( ! conn->runCommand( "admin" , cmdObj , res )) {
@@ -277,30 +279,30 @@ namespace mongo {
// return the second half, if a single split, or the first new chunk, if a multisplit.
return _manager->findChunk( m[0] );
- }
+ }
- bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ){
+ bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) {
uassert( 10167 , "can't move shard to its current location!" , getShard() != to );
-
+
log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") " << _shard.toString() << " -> " << to.toString() << endl;
-
+
Shard from = _shard;
-
+
ScopedDbConnection fromconn( from);
bool worked = fromconn->runCommand( "admin" ,
- BSON( "moveChunk" << _manager->getns() <<
- "from" << from.getConnString() <<
- "to" << to.getConnString() <<
- "min" << _min <<
- "max" << _max <<
- "maxChunkSizeBytes" << chunkSize <<
- "shardId" << genID() <<
- "configdb" << configServer.modelServer()
- ) ,
+ BSON( "moveChunk" << _manager->getns() <<
+ "from" << from.getConnString() <<
+ "to" << to.getConnString() <<
+ "min" << _min <<
+ "max" << _max <<
+ "maxChunkSizeBytes" << chunkSize <<
+ "shardId" << genID() <<
+ "configdb" << configServer.modelServer()
+ ) ,
res
- );
-
+ );
+
fromconn.done();
// if succeeded, needs to reload to pick up the new location
@@ -310,56 +312,57 @@ namespace mongo {
return worked;
}
-
- bool Chunk::splitIfShould( long dataWritten ){
+
+ bool Chunk::splitIfShould( long dataWritten ) {
LastError::Disabled d( lastError.get() );
try {
- _dataWritten += dataWritten;
+ _dataWritten += dataWritten;
int splitThreshold = getManager()->getCurrentDesiredChunkSize();
- if ( minIsInf() || maxIsInf() ){
+ if ( minIsInf() || maxIsInf() ) {
splitThreshold = (int) ((double)splitThreshold * .9);
}
if ( _dataWritten < splitThreshold / 5 )
return false;
-
+
log(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << endl;
-
+
_dataWritten = 0; // reset so we check often enough
-
- BSONObj res;
+
+ BSONObj res;
ChunkPtr newShard = singleSplit( false /* does not force a split if not enough data */ , res );
- if ( newShard.get() == NULL ){
+ if ( newShard.get() == NULL ) {
// singleSplit would have issued a message if we got here
return false;
}
- log() << "autosplitted " << _manager->getns() << " shard: " << toString()
- << " on: " << newShard->getMax() << "(splitThreshold " << splitThreshold << ")"
+ log() << "autosplitted " << _manager->getns() << " shard: " << toString()
+ << " on: " << newShard->getMax() << "(splitThreshold " << splitThreshold << ")"
#ifdef _DEBUG
<< " size: " << getPhysicalSize() // slow - but can be usefule when debugging
#endif
<< endl;
-
+
moveIfShould( newShard );
-
+
return true;
- } catch ( std::exception& e ){
+ }
+ catch ( std::exception& e ) {
// if the collection lock is taken (e.g. we're migrating), it is fine for the split to fail.
log( LL_WARNING ) << "could have autosplit on collection: " << _manager->getns() << " but: " << e.what() << endl;
return false;
}
}
- bool Chunk::moveIfShould( ChunkPtr newChunk ){
+ bool Chunk::moveIfShould( ChunkPtr newChunk ) {
ChunkPtr toMove;
-
- if ( newChunk->countObjects(2) <= 1 ){
+
+ if ( newChunk->countObjects(2) <= 1 ) {
toMove = newChunk;
}
- else if ( this->countObjects(2) <= 1 ){
+ else if ( this->countObjects(2) <= 1 ) {
DEV assert( shared_from_this() );
toMove = shared_from_this();
}
@@ -369,9 +372,9 @@ namespace mongo {
}
assert( toMove );
-
+
Shard newLocation = Shard::pick( getShard() );
- if ( getShard() == newLocation ){
+ if ( getShard() == newLocation ) {
// if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard).
log(1) << "recently split chunk: " << toString() << "already in the best shard" << endl;
return 0;
@@ -380,35 +383,35 @@ namespace mongo {
log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation.toString() << " #objects: " << toMove->countObjects() << endl;
BSONObj res;
- massert( 10412 ,
- str::stream() << "moveAndCommit failed: " << res ,
+ massert( 10412 ,
+ str::stream() << "moveAndCommit failed: " << res ,
toMove->moveAndCommit( newLocation , MaxChunkSize , res ) );
-
+
return true;
}
- long Chunk::getPhysicalSize() const{
+ long Chunk::getPhysicalSize() const {
ScopedDbConnection conn( getShard().getConnString() );
-
+
BSONObj result;
- uassert( 10169 , "datasize failed!" , conn->runCommand( "admin" ,
- BSON( "datasize" << _manager->getns()
- << "keyPattern" << _manager->getShardKey().key()
- << "min" << getMin()
- << "max" << getMax()
- << "maxSize" << ( MaxChunkSize + 1 )
- << "estimate" << true
- ) , result ) );
-
+ uassert( 10169 , "datasize failed!" , conn->runCommand( "admin" ,
+ BSON( "datasize" << _manager->getns()
+ << "keyPattern" << _manager->getShardKey().key()
+ << "min" << getMin()
+ << "max" << getMax()
+ << "maxSize" << ( MaxChunkSize + 1 )
+ << "estimate" << true
+ ) , result ) );
+
conn.done();
return (long)result["size"].number();
}
- int Chunk::countObjects(int maxCount) const {
+ int Chunk::countObjects(int maxCount) const {
static const BSONObj fields = BSON("_id" << 1 );
ShardConnection conn( getShard() , _manager->getns() );
-
+
// not using regular count as this is more flexible and supports $min/$max
Query q = Query().minKey(_min).maxKey(_max);
int n;
@@ -416,33 +419,33 @@ namespace mongo {
auto_ptr<DBClientCursor> c = conn->query(_manager->getns(), q, maxCount, 0, &fields);
assert( c.get() );
n = c->itcount();
- }
+ }
conn.done();
return n;
}
- void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ){
+ void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) {
BSONObjBuilder bb( b.subobjStart( name ) );
bb.append( "min" , _min );
bb.append( "max" , _max );
bb.done();
}
-
- bool Chunk::operator==( const Chunk& s ) const{
- return
+
+ bool Chunk::operator==( const Chunk& s ) const {
+ return
_manager->getShardKey().compare( _min , s._min ) == 0 &&
_manager->getShardKey().compare( _max , s._max ) == 0
;
}
- void Chunk::serialize(BSONObjBuilder& to,ShardChunkVersion myLastMod){
-
+ void Chunk::serialize(BSONObjBuilder& to,ShardChunkVersion myLastMod) {
+
to.append( "_id" , genID( _manager->getns() , _min ) );
- if ( myLastMod.isSet() ){
+ if ( myLastMod.isSet() ) {
to.appendTimestamp( "lastmod" , myLastMod );
}
- else if ( _lastmod.isSet() ){
+ else if ( _lastmod.isSet() ) {
assert( _lastmod > 0 && _lastmod < 1000 );
to.appendTimestamp( "lastmod" , _lastmod );
}
@@ -461,15 +464,15 @@ namespace mongo {
buf << ns << "-";
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
buf << e.fieldName() << "_" << e.toString(false, true);
}
return buf.str();
}
-
- void Chunk::unserialize(const BSONObj& from){
+
+ void Chunk::unserialize(const BSONObj& from) {
string ns = from.getStringField( "ns" );
_shard.reset( from.getStringField( "shard" ) );
@@ -478,15 +481,15 @@ namespace mongo {
BSONElement e = from["minDotted"];
- if (e.eoo()){
+ if (e.eoo()) {
_min = from.getObjectField( "min" ).getOwned();
_max = from.getObjectField( "max" ).getOwned();
- }
+ }
else { // TODO delete this case after giving people a chance to migrate
_min = e.embeddedObject().getOwned();
_max = from.getObjectField( "maxDotted" ).getOwned();
}
-
+
uassert( 10170 , "Chunk needs a ns" , ! ns.empty() );
uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() );
@@ -501,8 +504,8 @@ namespace mongo {
ss << "ns:" << _manager->getns() << " at: " << _shard.toString() << " lastmod: " << _lastmod.toString() << " min: " << _min << " max: " << _max;
return ss.str();
}
-
- ShardKeyPattern Chunk::skey() const{
+
+ ShardKeyPattern Chunk::skey() const {
return _manager->getShardKey();
}
@@ -510,44 +513,43 @@ namespace mongo {
AtomicUInt ChunkManager::NextSequenceNumber = 1;
- ChunkManager::ChunkManager( string ns , ShardKeyPattern pattern , bool unique ) :
- _ns( ns ) , _key( pattern ) , _unique( unique ) , _lock("rw:ChunkManager"),
- _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns )
- {
+ ChunkManager::ChunkManager( string ns , ShardKeyPattern pattern , bool unique ) :
+ _ns( ns ) , _key( pattern ) , _unique( unique ) , _lock("rw:ChunkManager"),
+ _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ) {
_reload_inlock(); // will set _sequenceNumber
}
- ChunkManager::~ChunkManager(){
+ ChunkManager::~ChunkManager() {
_chunkMap.clear();
_chunkRanges.clear();
_shards.clear();
}
-
- void ChunkManager::_reload(){
+
+ void ChunkManager::_reload() {
rwlock lk( _lock , true );
_reload_inlock();
}
- void ChunkManager::_reload_inlock(){
+ void ChunkManager::_reload_inlock() {
int tries = 3;
- while (tries--){
+ while (tries--) {
_chunkMap.clear();
_chunkRanges.clear();
_shards.clear();
_load();
- if (_isValid()){
+ if (_isValid()) {
_chunkRanges.reloadAll(_chunkMap);
// The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
// Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
// the most up to date value.
- _sequenceNumber = ++NextSequenceNumber;
+ _sequenceNumber = ++NextSequenceNumber;
return;
}
- if (_chunkMap.size() < 10){
+ if (_chunkMap.size() < 10) {
_printChunks();
}
@@ -555,22 +557,22 @@ namespace mongo {
}
msgasserted(13282, "Couldn't load a valid config for " + _ns + " after 3 attempts. Please try again.");
-
+
}
- void ChunkManager::_load(){
+ void ChunkManager::_load() {
ScopedDbConnection conn( configServer.modelServer() );
// TODO really need the sort?
auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",1), 0, 0, 0, 0,
- (DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
+ (DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
assert( cursor.get() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj d = cursor->next();
- if ( d["isMaxMarker"].trueValue() ){
+ if ( d["isMaxMarker"].trueValue() ) {
continue;
}
-
+
ChunkPtr c( new Chunk( this ) );
c->unserialize( d );
@@ -592,10 +594,10 @@ namespace mongo {
ENSURE(allOfType(MaxKey, prior(_chunkMap.end())->second->getMax()));
// Make sure there are no gaps or overlaps
- for (ChunkMap::const_iterator it=boost::next(_chunkMap.begin()), end=_chunkMap.end(); it != end; ++it){
+ for (ChunkMap::const_iterator it=boost::next(_chunkMap.begin()), end=_chunkMap.end(); it != end; ++it) {
ChunkMap::const_iterator last = prior(it);
- if (!(it->second->getMin() == last->second->getMax())){
+ if (!(it->second->getMin() == last->second->getMax())) {
PRINT(it->second->toString());
PRINT(it->second->getMin());
PRINT(last->second->getMax());
@@ -614,20 +616,20 @@ namespace mongo {
}
}
- bool ChunkManager::hasShardKey( const BSONObj& obj ){
+ bool ChunkManager::hasShardKey( const BSONObj& obj ) {
return _key.hasShardKey( obj );
}
- void ChunkManager::createFirstChunk( const Shard& shard ){
+ void ChunkManager::createFirstChunk( const Shard& shard ) {
assert( _chunkMap.size() == 0 );
ChunkPtr c( new Chunk(this, _key.globalMin(), _key.globalMax(), shard ) );
-
+
// this is the first chunk; start the versioning from scratch
ShardChunkVersion version;
version.incMajor();
- // build update for the chunk collection
+ // build update for the chunk collection
BSONObjBuilder chunkBuilder;
c->serialize( chunkBuilder , version );
BSONObj chunkCmd = chunkBuilder.obj();
@@ -639,7 +641,7 @@ namespace mongo {
conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << c->genID() ), chunkCmd, true, false );
string errmsg = conn->getLastError();
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
stringstream ss;
ss << "saving first chunk failed. cmd: " << chunkCmd << " result: " << errmsg;
log( LL_ERROR ) << ss.str() << endl;
@@ -654,61 +656,61 @@ namespace mongo {
_sequenceNumber = ++NextSequenceNumber;
_chunkMap[c->getMax()] = c;
- _chunkRanges.reloadAll(_chunkMap);
+ _chunkRanges.reloadAll(_chunkMap);
_shards.insert(c->getShard());
c->setLastmod(version);
- // the ensure index will have the (desired) indirect effect of creating the collection on the
+ // the ensure index will have the (desired) indirect effect of creating the collection on the
// assigned shard, as it sets up the index over the sharding keys.
ensureIndex_inlock();
log() << "successfully created first chunk for " << c->toString() << endl;
}
- ChunkPtr ChunkManager::findChunk( const BSONObj & obj , bool retry ){
+ ChunkPtr ChunkManager::findChunk( const BSONObj & obj , bool retry ) {
BSONObj key = _key.extractKey(obj);
-
+
{
- rwlock lk( _lock , false );
-
+ rwlock lk( _lock , false );
+
BSONObj foo;
ChunkPtr c;
{
ChunkMap::iterator it = _chunkMap.upper_bound(key);
- if (it != _chunkMap.end()){
+ if (it != _chunkMap.end()) {
foo = it->first;
c = it->second;
}
}
-
- if ( c ){
+
+ if ( c ) {
if ( c->contains( obj ) )
return c;
-
+
PRINT(foo);
PRINT(*c);
PRINT(key);
-
+
_reload_inlock();
massert(13141, "Chunk map pointed to incorrect chunk", false);
}
}
- if ( retry ){
+ if ( retry ) {
stringstream ss;
ss << "couldn't find a chunk aftry retry which should be impossible extracted: " << key;
throw UserException( 8070 , ss.str() );
}
-
+
log() << "ChunkManager: couldn't find chunk for: " << key << " going to retry" << endl;
_reload_inlock();
return findChunk( obj , true );
}
ChunkPtr ChunkManager::findChunkOnServer( const Shard& shard ) const {
- rwlock lk( _lock , false );
-
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
+ rwlock lk( _lock , false );
+
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
ChunkPtr c = i->second;
if ( c->getShard() == shard )
return c;
@@ -717,8 +719,8 @@ namespace mongo {
return ChunkPtr();
}
- void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ){
- rwlock lk( _lock , false );
+ void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) {
+ rwlock lk( _lock , false );
DEV PRINT(query);
//TODO look into FieldRangeSetOr
@@ -726,14 +728,15 @@ namespace mongo {
const string special = fros.getSpecial();
if (special == "2d") {
- BSONForEach(field, query){
+ BSONForEach(field, query) {
if (getGtLtOp(field) == BSONObj::opNEAR) {
uassert(13501, "use geoNear command rather than $near query", false);
// TODO: convert to geoNear rather than erroring out
}
// $within queries are fine
}
- } else if (!special.empty()){
+ }
+ else if (!special.empty()) {
uassert(13502, "unrecognized special query type: " + special, false);
}
@@ -742,7 +745,7 @@ namespace mongo {
{
// special case if most-significant field isn't in query
FieldRange range = frs->range(_key.key().firstElement().fieldName());
- if ( !range.nontrivial() ){
+ if ( !range.nontrivial() ) {
DEV PRINT(range.nontrivial());
getAllShards(shards);
return;
@@ -750,7 +753,7 @@ namespace mongo {
}
BoundList ranges = frs->indexBounds(_key.key(), 1);
- for (BoundList::const_iterator it=ranges.begin(), end=ranges.end(); it != end; ++it){
+ for (BoundList::const_iterator it=ranges.begin(), end=ranges.end(); it != end; ++it) {
BSONObj minObj = it->first.replaceFieldNames(_key.key());
BSONObj maxObj = it->second.replaceFieldNames(_key.key());
@@ -760,36 +763,37 @@ namespace mongo {
ChunkRangeMap::const_iterator min, max;
min = _chunkRanges.upper_bound(minObj);
max = _chunkRanges.upper_bound(maxObj);
-
+
massert( 13507 , str::stream() << "invalid chunk config minObj: " << minObj , min != _chunkRanges.ranges().end());
// make max non-inclusive like end iterators
if(max != _chunkRanges.ranges().end())
++max;
- for (ChunkRangeMap::const_iterator it=min; it != max; ++it){
+ for (ChunkRangeMap::const_iterator it=min; it != max; ++it) {
shards.insert(it->second->getShard());
}
// once we know we need to visit all shards no need to keep looping
//if (shards.size() == _shards.size())
- //return;
+ //return;
}
if (fros.moreOrClauses())
fros.popOrClause();
- } while (fros.moreOrClauses());
+ }
+ while (fros.moreOrClauses());
}
- void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max){
+ void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max) {
uassert(13405, "min must have shard key", hasShardKey(min));
uassert(13406, "max must have shard key", hasShardKey(max));
ChunkRangeMap::const_iterator it = _chunkRanges.upper_bound(min);
ChunkRangeMap::const_iterator end = _chunkRanges.lower_bound(max);
- for (; it!=end; ++ it){
+ for (; it!=end; ++ it) {
shards.insert(it->second->getShard());
// once we know we need to visit all shards no need to keep looping
@@ -798,63 +802,63 @@ namespace mongo {
}
}
- void ChunkManager::getAllShards( set<Shard>& all ){
- rwlock lk( _lock , false );
+ void ChunkManager::getAllShards( set<Shard>& all ) {
+ rwlock lk( _lock , false );
all.insert(_shards.begin(), _shards.end());
}
-
- void ChunkManager::ensureIndex_inlock(){
+
+ void ChunkManager::ensureIndex_inlock() {
//TODO in parallel?
- for ( set<Shard>::const_iterator i=_shards.begin(); i!=_shards.end(); ++i ){
+ for ( set<Shard>::const_iterator i=_shards.begin(); i!=_shards.end(); ++i ) {
ScopedDbConnection conn( i->getConnString() );
conn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false /* do not cache ensureIndex SERVER-1691 */ );
conn.done();
}
}
-
- void ChunkManager::drop( ChunkManagerPtr me ){
- rwlock lk( _lock , true );
+
+ void ChunkManager::drop( ChunkManagerPtr me ) {
+ rwlock lk( _lock , true );
configServer.logChange( "dropCollection.start" , _ns , BSONObj() );
-
+
dist_lock_try dlk( &_nsLock , "drop" );
uassert( 13331 , "collection's metadata is undergoing changes. Please try again." , dlk.got() );
-
+
uassert( 10174 , "config servers not all up" , configServer.allUp() );
-
+
set<Shard> seen;
-
+
log(1) << "ChunkManager::drop : " << _ns << endl;
// lock all shards so no one can do a split/migrate
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
ChunkPtr c = i->second;
seen.insert( c->getShard() );
}
-
- log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
+
+ log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
// wipe my meta-data
_chunkMap.clear();
_chunkRanges.clear();
_shards.clear();
-
+
// delete data from mongod
- for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){
+ for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
ScopedDbConnection conn( *i );
conn->dropCollection( _ns );
conn.done();
}
-
- log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
+
+ log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
// remove chunk data
ScopedDbConnection conn( configServer.modelServer() );
conn->remove( Chunk::chunkMetadataNS , BSON( "ns" << _ns ) );
conn.done();
- log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
-
- for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){
+ log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
+
+ for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
ScopedDbConnection conn( *i );
BSONObj res;
if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) )
@@ -862,17 +866,17 @@ namespace mongo {
conn.done();
}
- log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
+ log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
-
+
void ChunkManager::maybeChunkCollection() {
uassert( 13346 , "can't pre-split already splitted collection" , (_chunkMap.size() == 1) );
ChunkPtr soleChunk = _chunkMap.begin()->second;
vector<BSONObj> splitPoints;
soleChunk->pickSplitVector( splitPoints , Chunk::MaxChunkSize );
- if ( splitPoints.empty() ){
+ if ( splitPoints.empty() ) {
log(1) << "not enough data to warrant chunking " << getns() << endl;
return;
}
@@ -886,77 +890,77 @@ namespace mongo {
}
}
- ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const{
- rwlock lk( _lock , false );
+ ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
+ rwlock lk( _lock , false );
// TODO: cache or something?
-
+
ShardChunkVersion max = 0;
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
ChunkPtr c = i->second;
DEV assert( c );
if ( c->getShard() != shard )
continue;
if ( c->getLastmod() > max )
max = c->getLastmod();
- }
+ }
return max;
}
- ShardChunkVersion ChunkManager::getVersion() const{
- rwlock lk( _lock , false );
+ ShardChunkVersion ChunkManager::getVersion() const {
+ rwlock lk( _lock , false );
ShardChunkVersion max = 0;
-
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
+
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
ChunkPtr c = i->second;
if ( c->getLastmod() > max )
max = c->getLastmod();
- }
+ }
return max;
}
string ChunkManager::toString() const {
- rwlock lk( _lock , false );
+ rwlock lk( _lock , false );
stringstream ss;
ss << "ChunkManager: " << _ns << " key:" << _key.toString() << '\n';
- for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
const ChunkPtr c = i->second;
ss << "\t" << c->toString() << '\n';
}
return ss.str();
}
-
- void ChunkRangeManager::assertValid() const{
+
+ void ChunkRangeManager::assertValid() const {
if (_ranges.empty())
return;
try {
// No Nulls
- for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it){
+ for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
assert(it->second);
}
-
+
// Check endpoints
assert(allOfType(MinKey, _ranges.begin()->second->getMin()));
assert(allOfType(MaxKey, prior(_ranges.end())->second->getMax()));
// Make sure there are no gaps or overlaps
- for (ChunkRangeMap::const_iterator it=boost::next(_ranges.begin()), end=_ranges.end(); it != end; ++it){
+ for (ChunkRangeMap::const_iterator it=boost::next(_ranges.begin()), end=_ranges.end(); it != end; ++it) {
ChunkRangeMap::const_iterator last = prior(it);
assert(it->second->getMin() == last->second->getMax());
}
// Check Map keys
- for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it){
+ for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
assert(it->first == it->second->getMax());
}
// Make sure we match the original chunks
const ChunkMap chunks = _ranges.begin()->second->getManager()->_chunkMap;
- for ( ChunkMap::const_iterator i=chunks.begin(); i!=chunks.end(); ++i ){
+ for ( ChunkMap::const_iterator i=chunks.begin(); i!=chunks.end(); ++i ) {
const ChunkPtr chunk = i->second;
ChunkRangeMap::const_iterator min = _ranges.upper_bound(chunk->getMin());
@@ -969,8 +973,9 @@ namespace mongo {
assert(min->second->contains( chunk->getMin() ));
assert(min->second->contains( chunk->getMax() ) || (min->second->getMax() == chunk->getMax()));
}
-
- } catch (...) {
+
+ }
+ catch (...) {
log( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it)
@@ -980,15 +985,15 @@ namespace mongo {
}
}
- void ChunkRangeManager::reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max){
- if (_ranges.empty()){
+ void ChunkRangeManager::reloadRange(const ChunkMap& chunks, const BSONObj& min, const BSONObj& max) {
+ if (_ranges.empty()) {
reloadAll(chunks);
return;
}
-
+
ChunkRangeMap::iterator low = _ranges.upper_bound(min);
ChunkRangeMap::iterator high = _ranges.lower_bound(max);
-
+
assert(low != _ranges.end());
assert(high != _ranges.end());
assert(low->second);
@@ -1014,10 +1019,10 @@ namespace mongo {
// merge low-end if possible
low = _ranges.upper_bound(min);
assert(low != _ranges.end());
- if (low != _ranges.begin()){
+ if (low != _ranges.begin()) {
shared_ptr<ChunkRange> a = prior(low)->second;
shared_ptr<ChunkRange> b = low->second;
- if (a->getShard() == b->getShard()){
+ if (a->getShard() == b->getShard()) {
shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b));
_ranges.erase(prior(low));
_ranges.erase(low); // invalidates low
@@ -1029,10 +1034,10 @@ namespace mongo {
// merge high-end if possible
high = _ranges.lower_bound(max);
- if (high != prior(_ranges.end())){
+ if (high != prior(_ranges.end())) {
shared_ptr<ChunkRange> a = high->second;
shared_ptr<ChunkRange> b = boost::next(high)->second;
- if (a->getShard() == b->getShard()){
+ if (a->getShard() == b->getShard()) {
shared_ptr<ChunkRange> cr (new ChunkRange(*a, *b));
_ranges.erase(boost::next(high));
_ranges.erase(high); //invalidates high
@@ -1043,15 +1048,15 @@ namespace mongo {
DEV assertValid();
}
- void ChunkRangeManager::reloadAll(const ChunkMap& chunks){
+ void ChunkRangeManager::reloadAll(const ChunkMap& chunks) {
_ranges.clear();
_insertRange(chunks.begin(), chunks.end());
DEV assertValid();
}
- void ChunkRangeManager::_insertRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end){
- while (begin != end){
+ void ChunkRangeManager::_insertRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end) {
+ while (begin != end) {
ChunkMap::const_iterator first = begin;
Shard shard = first->second->getShard();
while (begin != end && (begin->second->getShard() == shard))
@@ -1067,44 +1072,44 @@ namespace mongo {
const int minChunkSize = 1 << 20; // 1 MBytes
int splitThreshold = Chunk::MaxChunkSize;
-
+
int nc = numChunks();
-
- if ( nc < 10 ){
+
+ if ( nc < 10 ) {
splitThreshold = max( splitThreshold / 4 , minChunkSize );
- }
- else if ( nc < 20 ){
+ }
+ else if ( nc < 20 ) {
splitThreshold = max( splitThreshold / 2 , minChunkSize );
}
-
+
return splitThreshold;
}
-
+
class ChunkObjUnitTest : public UnitTest {
public:
- void runShard(){
+ void runShard() {
ChunkPtr c;
assert( ! c );
c.reset( new Chunk( 0 ) );
assert( c );
}
-
- void runShardChunkVersion(){
+
+ void runShardChunkVersion() {
vector<ShardChunkVersion> all;
all.push_back( ShardChunkVersion(1,1) );
all.push_back( ShardChunkVersion(1,2) );
all.push_back( ShardChunkVersion(2,1) );
all.push_back( ShardChunkVersion(2,2) );
-
- for ( unsigned i=0; i<all.size(); i++ ){
- for ( unsigned j=i+1; j<all.size(); j++ ){
+
+ for ( unsigned i=0; i<all.size(); i++ ) {
+ for ( unsigned j=i+1; j<all.size(); j++ ) {
assert( all[i] < all[j] );
}
}
}
- void run(){
+ void run() {
runShard();
runShardChunkVersion();
log(1) << "shardObjTest passed" << endl;
@@ -1118,7 +1123,7 @@ namespace mongo {
// NOTE (careful when deprecating)
// currently the sharding is enabled because of a write or read (as opposed to a split or migrate), the shard learns
// its name and through the 'setShardVersion' command call
- bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ){
+ bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ) {
BSONObjBuilder cmdBuilder;
cmdBuilder.append( "setShardVersion" , ns.c_str() );
cmdBuilder.append( "configdb" , configServer.modelServer() );
@@ -1131,9 +1136,9 @@ namespace mongo {
cmdBuilder.append( "shard" , s.getName() );
cmdBuilder.append( "shardHost" , s.getConnString() );
BSONObj cmd = cmdBuilder.obj();
-
+
log(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl;
-
+
return conn.runCommand( "admin" , cmd , result );
}
diff --git a/s/chunk.h b/s/chunk.h
index 57839f7f04e..0001fec1ff1 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -29,7 +29,7 @@
#include "util.h"
namespace mongo {
-
+
class DBConfig;
class Chunk;
class ChunkRange;
@@ -42,16 +42,16 @@ namespace mongo {
// key is max for each Chunk or ChunkRange
typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap;
typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap;
-
+
typedef shared_ptr<ChunkManager> ChunkManagerPtr;
/**
config.chunks
{ ns : "alleyinsider.fs.chunks" , min : {} , max : {} , server : "localhost:30001" }
-
+
x is in a shard iff
min <= x < max
- */
+ */
class Chunk : boost::noncopyable, public boost::enable_shared_from_this<Chunk> {
public:
Chunk( ChunkManager * info );
@@ -63,7 +63,7 @@ namespace mongo {
void serialize(BSONObjBuilder& to, ShardChunkVersion myLastMod=0);
void unserialize(const BSONObj& from);
-
+
//
// chunk boundary support
//
@@ -84,7 +84,7 @@ namespace mongo {
//
// chunk version support
- //
+ //
void appendShortVersion( const char * name , BSONObjBuilder& b );
@@ -100,7 +100,7 @@ namespace mongo {
* then we check the real size, and if its too big, we split
*/
bool splitIfShould( long dataWritten );
-
+
/**
* Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
*
@@ -122,7 +122,7 @@ namespace mongo {
/**
* Asks the mongod holding this chunk to find a key that approximately divides this chunk in two
- *
+ *
* @param medianKey the key that divides this chunk, if there is one, or empty
*/
void pickMedianKey( BSONObj& medianKey ) const;
@@ -137,7 +137,7 @@ namespace mongo {
//
// migration support
- //
+ //
/**
* moves either this shard or newShard if it makes sense too
@@ -164,14 +164,14 @@ namespace mongo {
//
// chunk size support
-
+
int countObjects(int maxcount=0) const;
-
+
//
- // public constants
+ // public constants
//
- static string chunkMetadataNS;
+ static string chunkMetadataNS;
static int MaxChunkSize;
//
@@ -180,10 +180,10 @@ namespace mongo {
string toString() const;
- friend ostream& operator << (ostream& out, const Chunk& c){ return (out << c.toString()); }
+ friend ostream& operator << (ostream& out, const Chunk& c) { return (out << c.toString()); }
bool operator==(const Chunk& s) const;
bool operator!=(const Chunk& s) const { return ! ( *this == s ); }
-
+
string getns() const;
const char * getNS() { return "config.chunks"; }
Shard getShard() const { return _shard; }
@@ -191,7 +191,7 @@ namespace mongo {
private:
// main shard info
-
+
ChunkManager * _manager;
BSONObj _min;
@@ -204,7 +204,7 @@ namespace mongo {
long _dataWritten;
// methods, etc..
-
+
/**
* if sort 1, return lowest key
* if sort -1, return highest key
@@ -218,10 +218,10 @@ namespace mongo {
ShardKeyPattern skey() const;
};
- class ChunkRange{
+ class ChunkRange {
public:
- const ChunkManager* getManager() const{ return _manager; }
- Shard getShard() const{ return _shard; }
+ const ChunkManager* getManager() const { return _manager; }
+ Shard getShard() const { return _shard; }
const BSONObj& getMin() const { return _min; }
const BSONObj& getMax() const { return _max; }
@@ -233,11 +233,10 @@ namespace mongo {
: _manager(begin->second->getManager())
, _shard(begin->second->getShard())
, _min(begin->second->getMin())
- , _max(prior(end)->second->getMax())
- {
+ , _max(prior(end)->second->getMax()) {
assert( begin != end );
- DEV while (begin != end){
+ DEV while (begin != end) {
assert(begin->second->getManager() == _manager);
assert(begin->second->getShard() == _shard);
++begin;
@@ -249,14 +248,13 @@ namespace mongo {
: _manager(min.getManager())
, _shard(min.getShard())
, _min(min.getMin())
- , _max(max.getMax())
- {
+ , _max(max.getMax()) {
assert(min.getShard() == max.getShard());
assert(min.getManager() == max.getManager());
assert(min.getMax() == max.getMin());
}
- friend ostream& operator<<(ostream& out, const ChunkRange& cr){
+ friend ostream& operator<<(ostream& out, const ChunkRange& cr) {
return (out << "ChunkRange(min=" << cr._min << ", max=" << cr._max << ", shard=" << cr._shard <<")");
}
@@ -291,7 +289,7 @@ namespace mongo {
};
/* config.sharding
- { ns: 'alleyinsider.fs.chunks' ,
+ { ns: 'alleyinsider.fs.chunks' ,
key: { ts : 1 } ,
shards: [ { min: 1, max: 100, server: a } , { min: 101, max: 200 , server : b } ]
}
@@ -303,19 +301,19 @@ namespace mongo {
virtual ~ChunkManager();
string getns() const { return _ns; }
-
+
int numChunks() const { rwlock lk( _lock , false ); return _chunkMap.size(); }
bool hasShardKey( const BSONObj& obj );
void createFirstChunk( const Shard& shard );
ChunkPtr findChunk( const BSONObj& obj , bool retry = false );
ChunkPtr findChunkOnServer( const Shard& shard ) const;
-
+
const ShardKeyPattern& getShardKey() const { return _key; }
bool isUnique() const { return _unique; }
void maybeChunkCollection();
-
+
void getShardsForQuery( set<Shard>& shards , const BSONObj& query );
void getAllShards( set<Shard>& all );
void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max); // [min, max)
@@ -329,39 +327,39 @@ namespace mongo {
* this is just an increasing number of how many ChunkManagers we have so we know if something has been updated
*/
unsigned long long getSequenceNumber() const { return _sequenceNumber; }
-
- void getInfo( BSONObjBuilder& b ){
+
+ void getInfo( BSONObjBuilder& b ) {
b.append( "key" , _key.key() );
b.appendBool( "unique" , _unique );
}
-
+
/**
* @param me - so i don't get deleted before i'm done
*/
void drop( ChunkManagerPtr me );
void _printChunks() const;
-
+
int getCurrentDesiredChunkSize() const;
- private:
+ private:
void _reload();
void _reload_inlock();
void _load();
void ensureIndex_inlock();
-
+
string _ns;
ShardKeyPattern _key;
bool _unique;
-
+
ChunkMap _chunkMap;
ChunkRangeManager _chunkRanges;
set<Shard> _shards;
unsigned long long _sequenceNumber;
-
+
mutable RWLock _lock;
DistributedLock _nsLock;
@@ -398,9 +396,9 @@ namespace mongo {
/*
struct chunk_lock {
chunk_lock( const Chunk* c ){
-
+
}
-
+
Chunk _c;
};
*/
diff --git a/s/client.cpp b/s/client.cpp
index 2e803afbbe0..c1c3da8ea7e 100644
--- a/s/client.cpp
+++ b/s/client.cpp
@@ -35,68 +35,68 @@
#include "s/writeback_listener.h"
namespace mongo {
-
- ClientInfo::ClientInfo( int clientId ) : _id( clientId ){
+
+ ClientInfo::ClientInfo( int clientId ) : _id( clientId ) {
_cur = &_a;
_prev = &_b;
newRequest();
}
-
- ClientInfo::~ClientInfo(){
- if ( _lastAccess ){
+
+ ClientInfo::~ClientInfo() {
+ if ( _lastAccess ) {
scoped_lock lk( _clientsLock );
Cache::iterator i = _clients.find( _id );
- if ( i != _clients.end() ){
+ if ( i != _clients.end() ) {
_clients.erase( i );
}
}
}
-
- void ClientInfo::addShard( const string& shard ){
+
+ void ClientInfo::addShard( const string& shard ) {
_cur->insert( shard );
_sinceLastGetError.insert( shard );
}
-
- void ClientInfo::newRequest( AbstractMessagingPort* p ){
- if ( p ){
+ void ClientInfo::newRequest( AbstractMessagingPort* p ) {
+
+ if ( p ) {
string r = p->remote().toString();
if ( _remote == "" )
_remote = r;
- else if ( _remote != r ){
+ else if ( _remote != r ) {
stringstream ss;
ss << "remotes don't match old [" << _remote << "] new [" << r << "]";
throw UserException( 13134 , ss.str() );
}
}
-
+
_lastAccess = (int) time(0);
-
+
set<string> * temp = _cur;
_cur = _prev;
_prev = temp;
_cur->clear();
}
-
- void ClientInfo::disconnect(){
+
+ void ClientInfo::disconnect() {
_lastAccess = 0;
}
-
- ClientInfo * ClientInfo::get( int clientId , bool create ){
-
+
+ ClientInfo * ClientInfo::get( int clientId , bool create ) {
+
if ( ! clientId )
clientId = getClientId();
-
- if ( ! clientId ){
+
+ if ( ! clientId ) {
ClientInfo * info = _tlInfo.get();
- if ( ! info ){
+ if ( ! info ) {
info = new ClientInfo( 0 );
_tlInfo.reset( info );
}
info->newRequest();
return info;
}
-
+
scoped_lock lk( _clientsLock );
Cache::iterator i = _clients.find( clientId );
if ( i != _clients.end() )
@@ -107,8 +107,8 @@ namespace mongo {
_clients[clientId] = info;
return info;
}
-
- void ClientInfo::disconnect( int clientId ){
+
+ void ClientInfo::disconnect( int clientId ) {
if ( ! clientId )
return;
@@ -123,46 +123,46 @@ namespace mongo {
_clients.erase( i );
}
- void ClientInfo::_addWriteBack( vector<WBInfo>& all , const BSONObj& o ){
+ void ClientInfo::_addWriteBack( vector<WBInfo>& all , const BSONObj& o ) {
BSONElement w = o["writeback"];
-
+
if ( w.type() != jstOID )
return;
-
+
BSONElement cid = o["connectionId"];
cout << "ELIOT : " << cid << endl;
-
- if ( cid.eoo() ){
+
+ if ( cid.eoo() ) {
error() << "getLastError writeback can't work because of version mis-match" << endl;
return;
}
-
+
all.push_back( WBInfo( cid.numberLong() , w.OID() ) );
}
-
- void ClientInfo::_handleWriteBacks( vector<WBInfo>& all ){
+
+ void ClientInfo::_handleWriteBacks( vector<WBInfo>& all ) {
if ( all.size() == 0 )
return;
-
- for ( unsigned i=0; i<all.size(); i++ ){
+
+ for ( unsigned i=0; i<all.size(); i++ ) {
WriteBackListener::waitFor( all[i].connectionId , all[i].id );
}
}
-
-
- bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result ){
+
+
+ bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result ) {
set<string> * shards = getPrev();
-
- if ( shards->size() == 0 ){
+
+ if ( shards->size() == 0 ) {
result.appendNull( "err" );
return true;
}
-
+
vector<WBInfo> writebacks;
-
+
// handle single server
- if ( shards->size() == 1 ){
+ if ( shards->size() == 1 ) {
string theShard = *(shards->begin() );
result.append( "theshard" , theShard.c_str() );
ShardConnection conn( theShard , "" );
@@ -173,13 +173,13 @@ namespace mongo {
conn.done();
result.append( "singleShard" , theShard );
_addWriteBack( writebacks , res );
-
+
// hit other machines just to block
- for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ){
+ for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
string temp = *i;
if ( temp == theShard )
continue;
-
+
ShardConnection conn( temp , "" );
_addWriteBack( writebacks , conn->getLastErrorDetailed() );
conn.done();
@@ -188,15 +188,15 @@ namespace mongo {
_handleWriteBacks( writebacks );
return ok;
}
-
+
BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
-
+
long long n = 0;
-
+
// hit each shard
vector<string> errors;
vector<BSONObj> errorObjects;
- for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ){
+ for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
string theShard = *i;
bbb.append( theShard );
ShardConnection conn( theShard , "" );
@@ -204,49 +204,51 @@ namespace mongo {
bool ok = conn->runCommand( "admin" , options , res );
_addWriteBack( writebacks, res );
string temp = DBClientWithCommands::getLastErrorString( res );
- if ( ok == false || temp.size() ){
+ if ( ok == false || temp.size() ) {
errors.push_back( temp );
errorObjects.push_back( res );
}
n += res["n"].numberLong();
conn.done();
}
-
+
bbb.done();
-
+
result.appendNumber( "n" , n );
// hit other machines just to block
- for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ){
+ for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
string temp = *i;
if ( shards->count( temp ) )
continue;
-
+
ShardConnection conn( temp , "" );
_addWriteBack( writebacks, conn->getLastErrorDetailed() );
conn.done();
}
clearSinceLastGetError();
-
- if ( errors.size() == 0 ){
+
+ if ( errors.size() == 0 ) {
result.appendNull( "err" );
_handleWriteBacks( writebacks );
return true;
}
-
+
result.append( "err" , errors[0].c_str() );
-
- { // errs
+
+ {
+ // errs
BSONArrayBuilder all( result.subarrayStart( "errs" ) );
- for ( unsigned i=0; i<errors.size(); i++ ){
+ for ( unsigned i=0; i<errors.size(); i++ ) {
all.append( errors[i].c_str() );
}
all.done();
}
- { // errObjects
+ {
+ // errObjects
BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
- for ( unsigned i=0; i<errorObjects.size(); i++ ){
+ for ( unsigned i=0; i<errorObjects.size(); i++ ) {
all.append( errorObjects[i] );
}
all.done();
diff --git a/s/client.h b/s/client.h
index 050661b1876..9570bb05131 100644
--- a/s/client.h
+++ b/s/client.h
@@ -19,7 +19,7 @@
#include "../pch.h"
namespace mongo {
-
+
/**
* holds information about a client connected to a mongos
* 1 per client socket
@@ -32,20 +32,20 @@ namespace mongo {
public:
ClientInfo( int clientId );
~ClientInfo();
-
+
/** new request from client, adjusts internal state */
void newRequest( AbstractMessagingPort* p = 0 );
/** client disconnected */
void disconnect();
-
+
/**
* @return remote socket address of the client
*/
string getRemote() const { return _remote; }
-
+
/**
- * notes that this client use this shard
+ * notes that this client use this shard
* keeps track of all shards accessed this request
*/
void addShard( const string& shard );
@@ -54,23 +54,23 @@ namespace mongo {
* gets shards used on the previous request
*/
set<string> * getPrev() const { return _prev; };
-
+
/**
* gets all shards we've accessed since the last time we called clearSinceLastGetError
*/
const set<string>& sinceLastGetError() const { return _sinceLastGetError; }
-
+
/**
* clears list of shards we've talked to
*/
- void clearSinceLastGetError(){ _sinceLastGetError.clear(); }
-
+ void clearSinceLastGetError() { _sinceLastGetError.clear(); }
+
/**
- * calls getLastError
+ * calls getLastError
* resets shards since get last error
*/
bool getLastError( const BSONObj& options , BSONObjBuilder& result );
-
+
static ClientInfo * get( int clientId = 0 , bool create = true );
static void disconnect( int clientId );
@@ -78,7 +78,7 @@ namespace mongo {
private:
struct WBInfo {
- WBInfo( ConnectionId c , OID o ) : connectionId( c ) , id( o ){}
+ WBInfo( ConnectionId c , OID o ) : connectionId( c ) , id( o ) {}
ConnectionId connectionId;
OID id;
};
@@ -87,10 +87,10 @@ namespace mongo {
void _addWriteBack( vector<WBInfo>& all , const BSONObj& o );
void _handleWriteBacks( vector<WBInfo>& all );
-
+
int _id; // unique client id
string _remote; // server:port of remote socket end
-
+
// we use _a and _b to store shards we've talked to on the current request and the previous
// we use 2 so we can flip for getLastError type operations
@@ -99,12 +99,12 @@ namespace mongo {
set<string> * _cur; // pointer to _a or _b depending on state
set<string> * _prev; // ""
-
-
+
+
set<string> _sinceLastGetError; // all shards accessed since last getLastError
-
+
int _lastAccess;
-
+
static mongo::mutex _clientsLock;
static Cache& _clients;
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 45deee8d9f8..cf81d66c57c 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -50,7 +50,7 @@ namespace mongo {
class GridAdminCmd : public Command {
public:
- GridAdminCmd( const char * n ) : Command( n , false, tolowerString(n).c_str() ){
+ GridAdminCmd( const char * n ) : Command( n , false, tolowerString(n).c_str() ) {
}
virtual bool slaveOk() const {
return true;
@@ -60,7 +60,7 @@ namespace mongo {
}
// all grid commands are designed not to lock
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
};
// --------------- misc commands ----------------------
@@ -71,31 +71,31 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << " shows status/reachability of servers in the cluster";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result.append("configserver", configServer.getPrimary().getConnString() );
result.append("isdbgrid", 1);
return true;
}
} netstat;
-
+
class ServerStatusCmd : public Command {
public:
- ServerStatusCmd() : Command( "serverStatus" , true ){
+ ServerStatusCmd() : Command( "serverStatus" , true ) {
_started = time(0);
}
-
+
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return NONE; }
-
+ virtual LockType locktype() const { return NONE; }
+
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
result.append("uptime",(double) (time(0)-_started));
result.appendDate( "localTime" , jsTime() );
{
BSONObjBuilder t( result.subobjStart( "mem" ) );
-
+
ProcessInfo p;
- if ( p.supported() ){
+ if ( p.supported() ) {
t.appendNumber( "resident" , p.getResidentSize() );
t.appendNumber( "virtual" , p.getVirtualMemorySize() );
t.appendBool( "supported" , true );
@@ -104,7 +104,7 @@ namespace mongo {
result.append( "note" , "not all mem info support on this platform" );
t.appendBool( "supported" , false );
}
-
+
t.done();
}
@@ -114,7 +114,7 @@ namespace mongo {
bb.append( "available" , connTicketHolder.available() );
bb.done();
}
-
+
{
BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
bb.append("note", "fields vary by platform");
@@ -122,7 +122,7 @@ namespace mongo {
p.getExtraInfo(bb);
bb.done();
}
-
+
result.append( "opcounters" , globalOpCounters.getObj() );
{
BSONObjBuilder bb( result.subobjStart( "ops" ) );
@@ -132,7 +132,7 @@ namespace mongo {
}
result.append( "shardCursorType" , shardedCursorTypes.getObj() );
-
+
{
BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
asserts.append( "regular" , assertionCount.regular );
@@ -142,7 +142,7 @@ namespace mongo {
asserts.append( "rollovers" , assertionCount.rollovers );
asserts.done();
}
-
+
{
BSONObjBuilder bb( result.subobjStart( "network" ) );
networkCounter.append( bb );
@@ -158,34 +158,34 @@ namespace mongo {
class FsyncCommand : public GridAdminCmd {
public:
- FsyncCommand() : GridAdminCmd( "fsync" ){}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- if ( cmdObj["lock"].trueValue() ){
+ FsyncCommand() : GridAdminCmd( "fsync" ) {}
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ if ( cmdObj["lock"].trueValue() ) {
errmsg = "can't do lock through mongos";
return false;
}
-
+
BSONObjBuilder sub;
bool ok = true;
int numFiles = 0;
-
+
vector<Shard> shards;
Shard::getAllShards( shards );
- for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ){
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) {
Shard s = *i;
BSONObj x = s.runCommand( "admin" , "fsync" );
sub.append( s.getName() , x );
- if ( ! x["ok"].trueValue() ){
+ if ( ! x["ok"].trueValue() ) {
ok = false;
errmsg = x["errmsg"].String();
}
-
+
numFiles += x["numFiles"].numberInt();
}
-
+
result.append( "numFiles" , numFiles );
result.append( "all" , sub.obj() );
return ok;
@@ -201,43 +201,43 @@ namespace mongo {
help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
// TODO: locking?
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string dbname = cmdObj.firstElement().valuestrsafe();
- if ( dbname.size() == 0 ){
+ if ( dbname.size() == 0 ) {
errmsg = "no db";
return false;
}
- if ( dbname == "config" ){
+ if ( dbname == "config" ) {
errmsg = "can't move config db";
return false;
}
DBConfigPtr config = grid.getDBConfig( dbname , false );
- if ( ! config ){
+ if ( ! config ) {
errmsg = "can't find db!";
return false;
}
string to = cmdObj["to"].valuestrsafe();
- if ( ! to.size() ){
+ if ( ! to.size() ) {
errmsg = "you have to specify where you want to move it";
return false;
}
Shard s = Shard::make( to );
- if ( config->getPrimary() == s.getConnString() ){
+ if ( config->getPrimary() == s.getConnString() ) {
errmsg = "thats already the primary";
return false;
}
- if ( ! grid.knowAboutShard( s.getConnString() ) ){
+ if ( ! grid.knowAboutShard( s.getConnString() ) ) {
errmsg = "that server isn't known to me";
return false;
}
-
- log() << "movePrimary: moving " << dbname << " primary from: " << config->getPrimary().toString()
+
+ log() << "movePrimary: moving " << dbname << " primary from: " << config->getPrimary().toString()
<< " to: " << s.toString() << endl;
// TODO LOCKING: this is not safe with multiple mongos
@@ -250,7 +250,7 @@ namespace mongo {
bool worked = toconn->runCommand( dbname.c_str() , BSON( "clone" << config->getPrimary().getConnString() ) , cloneRes );
toconn.done();
- if ( ! worked ){
+ if ( ! worked ) {
log() << "clone failed" << cloneRes << endl;
errmsg = "clone failed";
return false;
@@ -273,25 +273,25 @@ namespace mongo {
class EnableShardingCmd : public GridAdminCmd {
public:
- EnableShardingCmd() : GridAdminCmd( "enableSharding" ){}
+ EnableShardingCmd() : GridAdminCmd( "enableSharding" ) {}
virtual void help( stringstream& help ) const {
help
- << "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
- << " { enablesharding : \"<dbname>\" }\n";
+ << "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
+ << " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string dbname = cmdObj.firstElement().valuestrsafe();
- if ( dbname.size() == 0 ){
+ if ( dbname.size() == 0 ) {
errmsg = "no db";
return false;
}
DBConfigPtr config = grid.getDBConfig( dbname );
- if ( config->isShardingEnabled() ){
+ if ( config->isShardingEnabled() ) {
errmsg = "already enabled";
return false;
}
-
+
log() << "enabling sharding on: " << dbname << endl;
config->enableSharding();
@@ -304,46 +304,46 @@ namespace mongo {
class ShardCollectionCmd : public GridAdminCmd {
public:
- ShardCollectionCmd() : GridAdminCmd( "shardCollection" ){}
+ ShardCollectionCmd() : GridAdminCmd( "shardCollection" ) {}
virtual void help( stringstream& help ) const {
help
- << "Shard a collection. Requires key. Optional unique. Sharding must already be enabled for the database.\n"
- << " { enablesharding : \"<dbname>\" }\n";
+ << "Shard a collection. Requires key. Optional unique. Sharding must already be enabled for the database.\n"
+ << " { enablesharding : \"<dbname>\" }\n";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj.firstElement().valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
- if ( ! config->isShardingEnabled() ){
+ if ( ! config->isShardingEnabled() ) {
errmsg = "sharding not enabled for db";
return false;
}
- if ( config->isSharded( ns ) ){
+ if ( config->isSharded( ns ) ) {
errmsg = "already sharded";
return false;
}
BSONObj key = cmdObj.getObjectField( "key" );
- if ( key.isEmpty() ){
+ if ( key.isEmpty() ) {
errmsg = "no shard key";
return false;
}
- BSONForEach(e, key){
- if (!e.isNumber() || e.number() != 1.0){
+ BSONForEach(e, key) {
+ if (!e.isNumber() || e.number() != 1.0) {
errmsg = "shard keys must all be ascending";
return false;
}
}
- if ( ns.find( ".system." ) != string::npos ){
+ if ( ns.find( ".system." ) != string::npos ) {
errmsg = "can't shard system namespaces";
return false;
}
@@ -353,10 +353,10 @@ namespace mongo {
// 1. A unique index must have the sharding key as its prefix. Otherwise maintainig uniqueness would
// require coordinated access to all shards. Trying to shard a collection with such an index is not
// allowed.
- //
+ //
// 2. Sharding a collection requires an index over the sharding key. That index must be create upfront.
// The rationale is that sharding a non-empty collection would need to create the index and that could
- // be slow. Requiring the index upfront allows the admin to plan before sharding and perhaps use
+ // be slow. Requiring the index upfront allows the admin to plan before sharding and perhaps use
// background index construction. One exception to the rule: empty collections. It's fairly easy to
// create the index as part of the sharding process.
//
@@ -367,15 +367,15 @@ namespace mongo {
bool hasShardIndex = false;
ScopedDbConnection conn( config->getPrimary() );
- BSONObjBuilder b;
- b.append( "ns" , ns );
+ BSONObjBuilder b;
+ b.append( "ns" , ns );
auto_ptr<DBClientCursor> cursor = conn->query( config->getName() + ".system.indexes" , b.obj() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj idx = cursor->next();
// Is index key over the sharding key? Remember that.
- if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ){
+ if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ) {
hasShardIndex = true;
}
@@ -393,17 +393,17 @@ namespace mongo {
}
BSONObj res = conn->findOne( config->getName() + ".system.namespaces" , BSON( "name" << ns ) );
- if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ){
+ if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) {
errmsg = "can't shard capped collection";
conn.done();
return false;
}
- if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ){
+ if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ) {
errmsg = "please create an index over the sharding key before sharding.";
return false;
}
-
+
conn.done();
}
@@ -418,26 +418,26 @@ namespace mongo {
class GetShardVersion : public GridAdminCmd {
public:
- GetShardVersion() : GridAdminCmd( "getShardVersion" ){}
+ GetShardVersion() : GridAdminCmd( "getShardVersion" ) {}
virtual void help( stringstream& help ) const {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj.firstElement().valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "need to speciy fully namespace";
return false;
}
-
+
DBConfigPtr config = grid.getDBConfig( ns );
- if ( ! config->isSharded( ns ) ){
+ if ( ! config->isSharded( ns ) ) {
errmsg = "ns not sharded.";
return false;
}
-
+
ChunkManagerPtr cm = config->getChunkManager( ns );
- if ( ! cm ){
+ if ( ! cm ) {
errmsg = "no chunk manager?";
return false;
}
@@ -453,39 +453,39 @@ namespace mongo {
SplitCollectionCmd() : GridAdminCmd( "split" ) {}
virtual void help( stringstream& help ) const {
help
- << " example: - split the shard that contains give key \n"
- << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
- << " example: - split the shard that contains the key with this as the middle \n"
- << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
- << " NOTE: this does not move move the chunks, it merely creates a logical seperation \n"
- ;
+ << " example: - split the shard that contains give key \n"
+ << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
+ << " example: - split the shard that contains the key with this as the middle \n"
+ << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
+ << " NOTE: this does not move move the chunks, it merely creates a logical seperation \n"
+ ;
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ShardConnection::sync();
string ns = cmdObj.firstElement().valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
- if ( ! config->isSharded( ns ) ){
+ if ( ! config->isSharded( ns ) ) {
errmsg = "ns not sharded. have to shard before can split";
return false;
}
BSONObj find = cmdObj.getObjectField( "find" );
- if ( find.isEmpty() ){
+ if ( find.isEmpty() ) {
find = cmdObj.getObjectField( "middle" );
- if ( find.isEmpty() ){
+ if ( find.isEmpty() ) {
errmsg = "need to specify find or middle";
return false;
}
}
-
+
ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr chunk = info->findChunk( find );
BSONObj middle = cmdObj.getObjectField( "middle" );
@@ -498,7 +498,8 @@ namespace mongo {
if ( middle.isEmpty() ) {
p = chunk->singleSplit( true /* force a split even if not enough data */ , res );
- } else {
+ }
+ else {
// sanity check if the key provided is a valid split point
if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) {
errmsg = "cannot split on initial or final chunk's key";
@@ -522,38 +523,38 @@ namespace mongo {
class MoveChunkCmd : public GridAdminCmd {
public:
- MoveChunkCmd() : GridAdminCmd( "moveChunk" ){}
+ MoveChunkCmd() : GridAdminCmd( "moveChunk" ) {}
virtual void help( stringstream& help ) const {
help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ShardConnection::sync();
Timer t;
string ns = cmdObj.firstElement().valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "no ns";
return false;
}
DBConfigPtr config = grid.getDBConfig( ns );
- if ( ! config->isSharded( ns ) ){
+ if ( ! config->isSharded( ns ) ) {
errmsg = "ns not sharded. have to shard before can move a chunk";
return false;
}
BSONObj find = cmdObj.getObjectField( "find" );
- if ( find.isEmpty() ){
+ if ( find.isEmpty() ) {
errmsg = "need to specify find. see help";
return false;
}
string toString = cmdObj["to"].valuestrsafe();
- if ( ! toString.size() ){
+ if ( ! toString.size() ) {
errmsg = "you have to specify where you want to move the chunk";
return false;
}
-
+
Shard to = Shard::make( toString );
// so far, chunk size serves test purposes; it may or may not become a supported parameter
@@ -568,13 +569,13 @@ namespace mongo {
ChunkPtr c = info->findChunk( find );
const Shard& from = c->getShard();
- if ( from == to ){
+ if ( from == to ) {
errmsg = "that chunk is already on that shard";
return false;
}
-
+
BSONObj res;
- if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ){
+ if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ) {
errmsg = "move failed";
result.append( "cause" , res );
return false;
@@ -593,12 +594,12 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "list all shards of the system";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ScopedDbConnection conn( configServer.getPrimary() );
vector<BSONObj> all;
auto_ptr<DBClientCursor> cursor = conn->query( "config.shards" , BSONObj() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj o = cursor->next();
all.push_back( o );
}
@@ -610,27 +611,27 @@ namespace mongo {
}
} listShardsCmd;
- /* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
+ /* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
class AddShard : public GridAdminCmd {
public:
AddShard() : GridAdminCmd("addShard") { }
virtual void help( stringstream& help ) const {
help << "add a new shard to the system";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
errmsg.clear();
// get replica set component hosts
ConnectionString servers = ConnectionString::parse( cmdObj.firstElement().valuestrsafe() , errmsg );
- if ( ! errmsg.empty() ){
+ if ( ! errmsg.empty() ) {
log() << "addshard request " << cmdObj << " failed:" << errmsg << endl;
return false;
}
// using localhost in server names implies every other process must use locahost addresses too
vector<HostAndPort> serverAddrs = servers.getServers();
- for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ){
- if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ){
+ for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) {
+ if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) {
errmsg = "can't use localhost as a shard since all shards need to communicate. "
"either use all shards and configdbs in localhost or all in actual IPs " ;
log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
@@ -638,7 +639,7 @@ namespace mongo {
}
// it's fine if mongods of a set all use default port
- if ( ! serverAddrs[i].hasPort() ){
+ if ( ! serverAddrs[i].hasPort() ) {
serverAddrs[i].setPort( CmdLine::ShardServerPort );
}
}
@@ -647,15 +648,15 @@ namespace mongo {
string name = "";
if ( cmdObj["name"].type() == String ) {
name = cmdObj["name"].valuestrsafe();
- }
+ }
// maxSize is the space usage cap in a shard in MBs
long long maxSize = 0;
- if ( cmdObj[ ShardFields::maxSize.name() ].isNumber() ){
+ if ( cmdObj[ ShardFields::maxSize.name() ].isNumber() ) {
maxSize = cmdObj[ ShardFields::maxSize.name() ].numberLong();
}
-
- if ( ! grid.addShard( &name , servers , maxSize , errmsg ) ){
+
+ if ( ! grid.addShard( &name , servers , maxSize , errmsg ) ) {
log() << "addshard request " << cmdObj << " failed: " << errmsg << endl;
return false;
}
@@ -675,10 +676,10 @@ namespace mongo {
virtual void help( stringstream& help ) const {
help << "remove a shard to the system.";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string target = cmdObj.firstElement().valuestrsafe();
Shard s = Shard::make( target );
- if ( ! grid.knowAboutShard( s.getConnString() ) ){
+ if ( ! grid.knowAboutShard( s.getConnString() ) ) {
errmsg = "unknown shard";
return false;
}
@@ -689,7 +690,7 @@ namespace mongo {
BSONObj searchDoc = BSON( "_id" << s.getName() );
BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
- if ( shardDoc.isEmpty() ){
+ if ( shardDoc.isEmpty() ) {
// TODO prevent move chunks to this shard.
@@ -698,7 +699,7 @@ namespace mongo {
conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);
errmsg = conn->getLastError();
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
@@ -706,7 +707,7 @@ namespace mongo {
Shard::reloadShardInfo();
result.append( "msg" , "draining started successfully" );
- result.append( "state" , "started" );
+ result.append( "state" , "started" );
result.append( "shard" , s.getName() );
conn.done();
return true;
@@ -718,12 +719,12 @@ namespace mongo {
long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() );
long long dbCount = conn->count( "config.databases" , primaryDoc );
- if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ){
- log() << "going to remove shard: " << s.getName() << endl;
+ if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
+ log() << "going to remove shard: " << s.getName() << endl;
conn->remove( "config.shards" , searchDoc );
errmsg = conn->getLastError();
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
return false;
}
@@ -757,7 +758,7 @@ namespace mongo {
class IsDbGridCmd : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const {
return true;
}
@@ -771,7 +772,7 @@ namespace mongo {
class CmdIsMaster : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() const {
return true;
@@ -797,23 +798,23 @@ namespace mongo {
virtual bool slaveOk() const {
return true;
}
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() {
return false;
}
virtual void help( stringstream &help ) const {
help << "{whatsmyuri:1}";
- }
+ }
virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result << "you" << ClientInfo::get()->getRemote();
return true;
}
} cmdWhatsMyUri;
-
+
class CmdShardingGetPrevError : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() const {
return true;
@@ -830,7 +831,7 @@ namespace mongo {
class CmdShardingGetLastError : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() const {
return true;
@@ -839,34 +840,34 @@ namespace mongo {
help << "check for an error on the last command executed";
}
CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { }
-
+
virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
LastError *le = lastError.disableForCommand();
{
assert( le );
- if ( le->msg.size() && le->nPrev == 1 ){
+ if ( le->msg.size() && le->nPrev == 1 ) {
le->appendSelf( result );
return true;
}
}
-
+
ClientInfo * client = ClientInfo::get();
return client->getLastError( cmdObj , result );
}
} cmdGetLastError;
-
+
}
class CmdShardingResetError : public Command {
public:
- CmdShardingResetError() : Command( "resetError" , false , "reseterror" ){}
-
- virtual LockType locktype() const { return NONE; }
+ CmdShardingResetError() : Command( "resetError" , false , "reseterror" ) {}
+
+ virtual LockType locktype() const { return NONE; }
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() const {
return true;
}
-
+
bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
LastError *le = lastError.get();
if ( le )
@@ -875,18 +876,18 @@ namespace mongo {
ClientInfo * client = ClientInfo::get();
set<string> * shards = client->getPrev();
- for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ){
+ for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
string theShard = *i;
ShardConnection conn( theShard , "" );
BSONObj res;
conn->runCommand( dbName , cmdObj , res );
conn.done();
}
-
+
return true;
}
} cmdShardingResetError;
-
+
class CmdListDatabases : public Command {
public:
CmdListDatabases() : Command("listDatabases", true , "listdatabases" ) {}
@@ -895,64 +896,64 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "list databases on cluster"; }
-
+
bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
vector<Shard> shards;
Shard::getAllShards( shards );
-
+
map<string,long long> sizes;
map< string,shared_ptr<BSONObjBuilder> > dbShardInfo;
- for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ){
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) {
Shard s = *i;
BSONObj x = s.runCommand( "admin" , "listDatabases" );
BSONObjIterator j( x["databases"].Obj() );
- while ( j.more() ){
+ while ( j.more() ) {
BSONObj theDB = j.next().Obj();
-
+
string name = theDB["name"].String();
long long size = theDB["sizeOnDisk"].numberLong();
long long& totalSize = sizes[name];
- if ( size == 1 ){
+ if ( size == 1 ) {
if ( totalSize <= 1 )
totalSize = 1;
}
else
totalSize += size;
-
+
shared_ptr<BSONObjBuilder>& bb = dbShardInfo[name];
if ( ! bb.get() )
bb.reset( new BSONObjBuilder() );
bb->appendNumber( s.getName() , size );
}
-
+
}
-
+
long long totalSize = 0;
BSONArrayBuilder bb( result.subarrayStart( "databases" ) );
- for ( map<string,long long>::iterator i=sizes.begin(); i!=sizes.end(); ++i ){
+ for ( map<string,long long>::iterator i=sizes.begin(); i!=sizes.end(); ++i ) {
string name = i->first;
long long size = i->second;
totalSize += size;
-
+
BSONObjBuilder temp;
temp.append( "name" , name );
temp.appendNumber( "size" , size );
temp.appendBool( "empty" , size == 1 );
temp.append( "shards" , dbShardInfo[name]->obj() );
-
+
bb.append( temp.obj() );
}
bb.done();
result.appendNumber( "totalSize" , totalSize );
result.appendNumber( "totalSizeMb" , totalSize / ( 1024 * 1024 ) );
-
+
return 1;
}
@@ -965,9 +966,9 @@ namespace mongo {
virtual bool slaveOk() const { return true; }
virtual bool slaveOverrideOk() { return true; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual void help( stringstream& help ) const { help << "Not supported sharded"; }
-
+
bool run(const string& , BSONObj& jsobj, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
errmsg = "closeAllDatabases isn't supported through mongos";
return false;
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index de3f37c67d8..d40886073fd 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -33,10 +33,10 @@
namespace mongo {
namespace dbgrid_pub_cmds {
-
+
class PublicGridCommand : public Command {
public:
- PublicGridCommand( const char* n, const char* oldname=NULL ) : Command( n, false, oldname ){
+ PublicGridCommand( const char* n, const char* oldname=NULL ) : Command( n, false, oldname ) {
}
virtual bool slaveOk() const {
return true;
@@ -46,18 +46,18 @@ namespace mongo {
}
// all grid commands are designed not to lock
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
protected:
- bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ){
+ bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
return _passthrough(conf->getName(), conf, cmdObj, result);
}
- bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ){
+ bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
return _passthrough("admin", conf, cmdObj, result);
}
-
+
private:
- bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ){
+ bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
ShardConnection conn( conf->getPrimary() , "" );
BSONObj res;
bool ok = conn->runCommand( db , cmdObj , res );
@@ -75,33 +75,33 @@ namespace mongo {
virtual bool adminOnly() const { return false; }
// all grid commands are designed not to lock
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
// default impl uses all shards for DB
- virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards){
+ virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards) {
DBConfigPtr conf = grid.getDBConfig( dbName , false );
conf->getAllShards(shards);
}
-
+
virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {}
// don't override
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& output, bool){
+ virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& output, bool) {
set<Shard> shards;
getShards(dbName, cmdObj, shards);
list< shared_ptr<Future::CommandResult> > futures;
- for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ){
+ for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) );
}
-
+
vector<BSONObj> results;
BSONObjBuilder subobj (output.subobjStart("raw"));
BSONObjBuilder errors;
- for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ){
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
shared_ptr<Future::CommandResult> res = *i;
- if ( ! res->join() ){
+ if ( ! res->join() ) {
errors.appendAs(res->result()["errmsg"], res->getServer());
}
results.push_back( res->result() );
@@ -111,11 +111,11 @@ namespace mongo {
subobj.done();
BSONObj errobj = errors.done();
- if (! errobj.isEmpty()){
+ if (! errobj.isEmpty()) {
errmsg = errobj.toString(false, true);
return false;
}
-
+
aggregateResults(results, output);
return true;
}
@@ -126,39 +126,40 @@ namespace mongo {
public:
AllShardsCollectionCommand(const char* n, const char* oldname=NULL) : RunOnAllShardsCommand(n, oldname) {}
- virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards){
+ virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards) {
string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe();
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
shards.insert(conf->getShard(fullns));
- } else {
+ }
+ else {
conf->getChunkManager(fullns)->getAllShards(shards);
}
}
};
-
+
class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
public:
- NotAllowedOnShardedCollectionCmd( const char * n ) : PublicGridCommand( n ){}
+ NotAllowedOnShardedCollectionCmd( const char * n ) : PublicGridCommand( n ) {}
virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) = 0;
-
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = getFullNS( dbName , cmdObj );
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
errmsg = "can't do command: " + name + " on sharded collection";
return false;
}
};
-
+
// ----
class DropIndexesCmd : public AllShardsCollectionCommand {
@@ -194,7 +195,7 @@ namespace mongo {
long long indexSize = 0;
long long fileSize = 0;
- for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it != end; ++it){
+ for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it != end; ++it) {
const BSONObj& b = *it;
objects += b["objects"].numberLong();
dataSize += b["dataSize"].numberLong();
@@ -219,22 +220,22 @@ namespace mongo {
class DropCmd : public PublicGridCommand {
public:
- DropCmd() : PublicGridCommand( "drop" ){}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ DropCmd() : PublicGridCommand( "drop" ) {}
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
+
log() << "DROP: " << fullns << endl;
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 10418 , "how could chunk manager be null!" , cm );
-
+
cm->drop( cm );
uassert( 13512 , "drop collection attempted on non-sharded collection" , conf->removeSharding( fullns ) );
@@ -244,25 +245,25 @@ namespace mongo {
class DropDBCmd : public PublicGridCommand {
public:
- DropDBCmd() : PublicGridCommand( "dropDatabase" ){}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
-
+ DropDBCmd() : PublicGridCommand( "dropDatabase" ) {}
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+
BSONElement e = cmdObj.firstElement();
-
- if ( ! e.isNumber() || e.number() != 1 ){
+
+ if ( ! e.isNumber() || e.number() != 1 ) {
errmsg = "invalid params";
return 0;
}
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
+
log() << "DROP DATABASE: " << dbName << endl;
- if ( ! conf ){
+ if ( ! conf ) {
result.append( "info" , "database didn't exist" );
return true;
}
-
+
if ( ! conf->dropDatabase( errmsg ) )
return false;
@@ -273,8 +274,8 @@ namespace mongo {
class RenameCollectionCmd : public PublicGridCommand {
public:
- RenameCollectionCmd() : PublicGridCommand( "renameCollection" ){}
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ RenameCollectionCmd() : PublicGridCommand( "renameCollection" ) {}
+ bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string fullnsFrom = cmdObj.firstElement().valuestrsafe();
string dbNameFrom = nsToDatabase( fullnsFrom.c_str() );
DBConfigPtr confFrom = grid.getDBConfig( dbNameFrom , false );
@@ -298,18 +299,19 @@ namespace mongo {
class CopyDBCmd : public PublicGridCommand {
public:
- CopyDBCmd() : PublicGridCommand( "copydb" ){}
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ CopyDBCmd() : PublicGridCommand( "copydb" ) {}
+ bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string todb = cmdObj.getStringField("todb");
uassert(13402, "need a todb argument", !todb.empty());
-
+
DBConfigPtr confTo = grid.getDBConfig( todb );
uassert(13398, "cant copy to sharded DB", !confTo->isShardingEnabled());
string fromhost = cmdObj.getStringField("fromhost");
- if (!fromhost.empty()){
+ if (!fromhost.empty()) {
return adminPassthrough( confTo , cmdObj , result );
- } else {
+ }
+ else {
string fromdb = cmdObj.getStringField("fromdb");
uassert(13399, "need a fromdb argument", !fromdb.empty());
@@ -318,7 +320,7 @@ namespace mongo {
uassert(13401, "cant copy from sharded DB", !confFrom->isShardingEnabled());
BSONObjBuilder b;
- BSONForEach(e, cmdObj){
+ BSONForEach(e, cmdObj) {
if (strcmp(e.fieldName(), "fromhost") != 0)
b.append(e);
}
@@ -329,67 +331,67 @@ namespace mongo {
}
}
- }copyDBCmd;
+ } copyDBCmd;
class CountCmd : public PublicGridCommand {
public:
CountCmd() : PublicGridCommand("count") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool l){
+ bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool l) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
-
+
BSONObj filter;
if ( cmdObj["query"].isABSONObj() )
filter = cmdObj["query"].Obj();
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
ShardConnection conn( conf->getPrimary() , fullns );
BSONObj temp;
bool ok = conn->runCommand( dbName , cmdObj , temp );
conn.done();
-
- if ( ok ){
+
+ if ( ok ) {
result.append( temp["n"] );
return true;
}
-
- if ( temp["code"].numberInt() != StaleConfigInContextCode ){
+
+ if ( temp["code"].numberInt() != StaleConfigInContextCode ) {
errmsg = temp["errmsg"].String();
result.appendElements( temp );
return false;
}
-
+
// this collection got sharded
ChunkManagerPtr cm = conf->getChunkManager( fullns , true );
- if ( ! cm ){
+ if ( ! cm ) {
errmsg = "should be sharded now";
result.append( "root" , temp );
return false;
}
}
-
+
long long total = 0;
map<string,long long> shardCounts;
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
- while ( true ){
- if ( ! cm ){
+ while ( true ) {
+ if ( ! cm ) {
// probably unsharded now
return run( dbName , cmdObj , errmsg , result , l );
}
-
+
set<Shard> shards;
cm->getShardsForQuery( shards , filter );
assert( shards.size() );
-
+
bool hadToBreak = false;
- for (set<Shard>::iterator it=shards.begin(), end=shards.end(); it != end; ++it){
+ for (set<Shard>::iterator it=shards.begin(), end=shards.end(); it != end; ++it) {
ShardConnection conn(*it, fullns);
- if ( conn.setVersion() ){
+ if ( conn.setVersion() ) {
total = 0;
shardCounts.clear();
cm = conf->getChunkManager( fullns );
@@ -397,19 +399,19 @@ namespace mongo {
hadToBreak = true;
break;
}
-
+
BSONObj temp;
bool ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp );
conn.done();
-
- if ( ok ){
+
+ if ( ok ) {
long long mine = temp["n"].numberLong();
total += mine;
shardCounts[it->getName()] = mine;
continue;
}
-
- if ( StaleConfigInContextCode == temp["code"].numberInt() ){
+
+ if ( StaleConfigInContextCode == temp["code"].numberInt() ) {
// my version is old
total = 0;
shardCounts.clear();
@@ -426,7 +428,7 @@ namespace mongo {
if ( ! hadToBreak )
break;
}
-
+
total = applySkipLimit( total , cmdObj );
result.appendNumber( "n" , total );
BSONObjBuilder temp( result.subobjStart( "shards" ) );
@@ -440,13 +442,13 @@ namespace mongo {
class CollectionStats : public PublicGridCommand {
public:
CollectionStats() : PublicGridCommand("collStats", "collstats") { }
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
result.append( "ns" , fullns );
result.appendBool("sharded", false);
result.append( "primary" , conf->getPrimary().getName() );
@@ -459,17 +461,17 @@ namespace mongo {
set<Shard> servers;
cm->getAllShards(servers);
-
+
BSONObjBuilder shardStats;
long long count=0;
long long size=0;
long long storageSize=0;
int nindexes=0;
bool warnedAboutIndexes = false;
- for ( set<Shard>::iterator i=servers.begin(); i!=servers.end(); i++ ){
+ for ( set<Shard>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
ScopedDbConnection conn( *i );
BSONObj res;
- if ( ! conn->runCommand( dbName , cmdObj , res ) ){
+ if ( ! conn->runCommand( dbName , cmdObj , res ) ) {
errmsg = "failed on shard: " + res.toString();
return false;
}
@@ -481,19 +483,19 @@ namespace mongo {
int myIndexes = res["nindexes"].numberInt();
- if ( nindexes == 0 ){
+ if ( nindexes == 0 ) {
nindexes = myIndexes;
}
- else if ( nindexes == myIndexes ){
+ else if ( nindexes == myIndexes ) {
// no-op
}
else {
// hopefully this means we're building an index
-
+
if ( myIndexes > nindexes )
nindexes = myIndexes;
-
- if ( ! warnedAboutIndexes ){
+
+ if ( ! warnedAboutIndexes ) {
result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" );
warnedAboutIndexes = true;
}
@@ -511,7 +513,7 @@ namespace mongo {
result.append("nchunks", cm->numChunks());
result.append("shards", shardStats.obj());
-
+
return true;
}
} collectionStatsCmd;
@@ -519,19 +521,19 @@ namespace mongo {
class FindAndModifyCmd : public PublicGridCommand {
public:
FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result);
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13002 , "how could chunk manager be null!" , cm );
-
+
BSONObj filter = cmdObj.getObjectField("query");
uassert(13343, "query for sharded findAndModify must have shardkey", cm->hasShardKey(filter));
@@ -543,11 +545,11 @@ namespace mongo {
bool ok = conn->runCommand( conf->getName() , cmdObj , res );
conn.done();
- if (ok || (strcmp(res["errmsg"].valuestrsafe(), "No matching object found") != 0)){
+ if (ok || (strcmp(res["errmsg"].valuestrsafe(), "No matching object found") != 0)) {
result.appendElements(res);
return ok;
}
-
+
return true;
}
@@ -556,18 +558,18 @@ namespace mongo {
class DataSizeCmd : public PublicGridCommand {
public:
DataSizeCmd() : PublicGridCommand("dataSize", "datasize") { }
- bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = cmdObj.firstElement().String();
-
+
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result);
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13407 , "how could chunk manager be null!" , cm );
-
+
BSONObj min = cmdObj.getObjectField( "min" );
BSONObj max = cmdObj.getObjectField( "max" );
BSONObj keyPattern = cmdObj.getObjectField( "keyPattern" );
@@ -581,13 +583,13 @@ namespace mongo {
set<Shard> shards;
cm->getShardsForRange(shards, min, max);
- for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ){
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) {
ScopedDbConnection conn( *i );
BSONObj res;
bool ok = conn->runCommand( conf->getName() , cmdObj , res );
conn.done();
-
- if ( ! ok ){
+
+ if ( ! ok ) {
result.appendElements( res );
return false;
}
@@ -608,64 +610,64 @@ namespace mongo {
class ConvertToCappedCmd : public NotAllowedOnShardedCollectionCmd {
public:
- ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped"){}
-
- virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ){
+ ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped") {}
+
+ virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) {
return dbName + "." + cmdObj.firstElement().valuestrsafe();
}
-
+
} convertToCappedCmd;
class GroupCmd : public NotAllowedOnShardedCollectionCmd {
public:
- GroupCmd() : NotAllowedOnShardedCollectionCmd("group"){}
-
- virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ){
+ GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {}
+
+ virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) {
return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe();
}
-
+
} groupCmd;
class DistinctCmd : public PublicGridCommand {
public:
- DistinctCmd() : PublicGridCommand("distinct"){}
+ DistinctCmd() : PublicGridCommand("distinct") {}
virtual void help( stringstream &help ) const {
help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 10420 , "how could chunk manager be null!" , cm );
BSONObj query = getQuery(cmdObj);
set<Shard> shards;
cm->getShardsForQuery(shards, query);
-
+
set<BSONObj,BSONObjCmp> all;
int size = 32;
-
- for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ){
+
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) {
ShardConnection conn( *i , fullns );
BSONObj res;
bool ok = conn->runCommand( conf->getName() , cmdObj , res );
conn.done();
-
- if ( ! ok ){
+
+ if ( ! ok ) {
result.appendElements( res );
return false;
}
-
+
BSONObjIterator it( res["values"].embeddedObject() );
- while ( it.more() ){
+ while ( it.more() ) {
BSONElement nxt = it.next();
BSONObjBuilder temp(32);
temp.appendAs( nxt , "" );
@@ -673,13 +675,13 @@ namespace mongo {
}
}
-
+
BSONObjBuilder b( size );
int n=0;
- for ( set<BSONObj,BSONObjCmp>::iterator i = all.begin() ; i != all.end(); i++ ){
+ for ( set<BSONObj,BSONObjCmp>::iterator i = all.begin() ; i != all.end(); i++ ) {
b.appendAs( i->firstElement() , b.numStr( n++ ) );
}
-
+
result.appendArray( "values" , b.obj() );
return true;
}
@@ -687,11 +689,11 @@ namespace mongo {
class FileMD5Cmd : public PublicGridCommand {
public:
- FileMD5Cmd() : PublicGridCommand("filemd5"){}
+ FileMD5Cmd() : PublicGridCommand("filemd5") {}
virtual void help( stringstream &help ) const {
help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
}
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string fullns = dbName;
fullns += ".";
{
@@ -703,17 +705,17 @@ namespace mongo {
fullns += ".chunks";
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13091 , "how could chunk manager be null!" , cm );
uassert( 13092 , "GridFS chunks collection can only be sharded on files_id", cm->getShardKey().key() == BSON("files_id" << 1));
ChunkPtr chunk = cm->findChunk( BSON("files_id" << cmdObj.firstElement()) );
-
+
ShardConnection conn( chunk->getShard() , fullns );
BSONObj res;
bool ok = conn->runCommand( conf->getName() , cmdObj , res );
@@ -726,57 +728,57 @@ namespace mongo {
class Geo2dFindNearCmd : public PublicGridCommand {
public:
- Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ){}
+ Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ) {}
void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string collection = cmdObj.firstElement().valuestrsafe();
string fullns = dbName + "." + collection;
DBConfigPtr conf = grid.getDBConfig( dbName , false );
-
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
-
+
ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13500 , "how could chunk manager be null!" , cm );
BSONObj query = getQuery(cmdObj);
set<Shard> shards;
cm->getShardsForQuery(shards, query);
-
+
int limit = 100;
if (cmdObj["num"].isNumber())
limit = cmdObj["num"].numberInt();
list< shared_ptr<Future::CommandResult> > futures;
BSONArrayBuilder shardArray;
- for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ){
+ for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj ) );
shardArray.append(i->getName());
}
-
+
multimap<double, BSONObj> results; // TODO: maybe use merge-sort instead
string nearStr;
double time = 0;
double btreelocs = 0;
double nscanned = 0;
double objectsLoaded = 0;
- for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ){
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
shared_ptr<Future::CommandResult> res = *i;
- if ( ! res->join() ){
+ if ( ! res->join() ) {
errmsg = res->result()["errmsg"].String();
return false;
}
nearStr = res->result()["near"].String();
- time += res->result()["stats"]["time"].Number();
- btreelocs += res->result()["stats"]["btreelocs"].Number();
- nscanned += res->result()["stats"]["nscanned"].Number();
- objectsLoaded += res->result()["stats"]["objectsLoaded"].Number();
+ time += res->result()["stats"]["time"].Number();
+ btreelocs += res->result()["stats"]["btreelocs"].Number();
+ nscanned += res->result()["stats"]["nscanned"].Number();
+ objectsLoaded += res->result()["stats"]["objectsLoaded"].Number();
- BSONForEach(obj, res->result()["results"].embeddedObject()){
+ BSONForEach(obj, res->result()["results"].embeddedObject()) {
results.insert(make_pair(obj["dis"].Number(), obj.embeddedObject().getOwned()));
}
@@ -785,13 +787,13 @@ namespace mongo {
result.append("ns" , fullns);
result.append("near", nearStr);
-
+
int outCount = 0;
double totalDistance = 0;
double maxDistance = 0;
{
BSONArrayBuilder sub (result.subarrayStart("results"));
- for (multimap<double, BSONObj>::const_iterator it(results.begin()), end(results.end()); it!= end && outCount < limit; ++it, ++outCount){
+ for (multimap<double, BSONObj>::const_iterator it(results.begin()), end(results.end()); it!= end && outCount < limit; ++it, ++outCount) {
totalDistance += it->first;
maxDistance = it->first; // guaranteed to be highest so far
@@ -818,32 +820,32 @@ namespace mongo {
class MRCmd : public PublicGridCommand {
public:
- MRCmd() : PublicGridCommand( "mapreduce" ){}
-
- string getTmpName( const string& coll ){
+ MRCmd() : PublicGridCommand( "mapreduce" ) {}
+
+ string getTmpName( const string& coll ) {
static int inc = 1;
stringstream ss;
ss << "tmp.mrs." << coll << "_" << time(0) << "_" << inc++;
return ss.str();
}
- BSONObj fixForShards( const BSONObj& orig , const string& output ){
+ BSONObj fixForShards( const BSONObj& orig , const string& output ) {
BSONObjBuilder b;
BSONObjIterator i( orig );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
string fn = e.fieldName();
- if ( fn == "map" ||
- fn == "mapreduce" ||
- fn == "reduce" ||
- fn == "query" ||
- fn == "sort" ||
- fn == "scope" ||
- fn == "verbose" ){
+ if ( fn == "map" ||
+ fn == "mapreduce" ||
+ fn == "reduce" ||
+ fn == "query" ||
+ fn == "sort" ||
+ fn == "scope" ||
+ fn == "verbose" ) {
b.append( e );
}
else if ( fn == "out" ||
- fn == "finalize" ){
+ fn == "finalize" ) {
// we don't want to copy these
}
else {
@@ -853,8 +855,8 @@ namespace mongo {
b.append( "out" , output );
return b.obj();
}
-
- bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
Timer t;
string collection = cmdObj.firstElement().valuestrsafe();
@@ -862,47 +864,47 @@ namespace mongo {
DBConfigPtr conf = grid.getDBConfig( dbName , false );
- if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
return passthrough( conf , cmdObj , result );
}
-
+
BSONObjBuilder timingBuilder;
ChunkManagerPtr cm = conf->getChunkManager( fullns );
BSONObj q;
- if ( cmdObj["query"].type() == Object ){
+ if ( cmdObj["query"].type() == Object ) {
q = cmdObj["query"].embeddedObjectUserCheck();
}
-
+
set<Shard> shards;
cm->getShardsForQuery( shards , q );
-
+
const string shardedOutputCollection = getTmpName( collection );
-
+
BSONObj shardedCommand = fixForShards( cmdObj , shardedOutputCollection );
-
+
BSONObjBuilder finalCmd;
finalCmd.append( "mapreduce.shardedfinish" , cmdObj );
finalCmd.append( "shardedOutputCollection" , shardedOutputCollection );
-
+
list< shared_ptr<Future::CommandResult> > futures;
-
- for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ){
+
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
futures.push_back( Future::spawnCommand( i->getConnString() , dbName , shardedCommand ) );
}
-
+
BSONObjBuilder shardresults;
- for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ){
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
shared_ptr<Future::CommandResult> res = *i;
- if ( ! res->join() ){
+ if ( ! res->join() ) {
errmsg = "mongod mr failed: ";
errmsg += res->result().toString();
return 0;
}
shardresults.append( res->getServer() , res->result() );
}
-
+
finalCmd.append( "shards" , shardresults.obj() );
timingBuilder.append( "shards" , t.millis() );
@@ -912,7 +914,7 @@ namespace mongo {
bool ok = conn->runCommand( dbName , finalCmd.obj() , finalResult );
conn.done();
- if ( ! ok ){
+ if ( ! ok ) {
errmsg = "final reduce failed: ";
errmsg += finalResult.toString();
return 0;
@@ -922,22 +924,22 @@ namespace mongo {
result.appendElements( finalResult );
result.append( "timeMillis" , t.millis() );
result.append( "timing" , timingBuilder.obj() );
-
+
return 1;
}
} mrCmd;
-
+
class ApplyOpsCmd : public PublicGridCommand {
public:
- ApplyOpsCmd() : PublicGridCommand( "applyOps" ){}
-
- virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ ApplyOpsCmd() : PublicGridCommand( "applyOps" ) {}
+
+ virtual bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
errmsg = "applyOps not allowed through mongos";
return false;
}
-
+
} applyOpsCmd;
-
+
}
}
diff --git a/s/config.cpp b/s/config.cpp
index 5ff761e2056..36a4304afe3 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -35,7 +35,7 @@ namespace mongo {
int ConfigServer::VERSION = 3;
Shard Shard::EMPTY;
- string ShardNS::shard = "config.shards";
+ string ShardNS::shard = "config.shards";
string ShardNS::database = "config.databases";
string ShardNS::collection = "config.collections";
string ShardNS::chunk = "config.chunks";
@@ -50,7 +50,7 @@ namespace mongo {
/* --- DBConfig --- */
- DBConfig::CollectionInfo::CollectionInfo( const BSONObj& in ){
+ DBConfig::CollectionInfo::CollectionInfo( const BSONObj& in ) {
_dirty = false;
_dropped = in["dropped"].trueValue();
if ( in["key"].isABSONObj() )
@@ -58,28 +58,28 @@ namespace mongo {
}
- void DBConfig::CollectionInfo::shard( const string& ns , const ShardKeyPattern& key , bool unique ){
+ void DBConfig::CollectionInfo::shard( const string& ns , const ShardKeyPattern& key , bool unique ) {
_cm.reset( new ChunkManager( ns , key , unique ) );
_dirty = true;
_dropped = false;
}
- void DBConfig::CollectionInfo::unshard(){
+ void DBConfig::CollectionInfo::unshard() {
_cm.reset();
_dropped = true;
_dirty = true;
}
-
- void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ){
+
+ void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ) {
BSONObj key = BSON( "_id" << ns );
-
+
BSONObjBuilder val;
val.append( "_id" , ns );
val.appendDate( "lastmod" , time(0) );
val.appendBool( "dropped" , _dropped );
if ( _cm )
_cm->getInfo( val );
-
+
conn->update( ShardNS::collection , key , val.obj() , true );
string err = conn->getLastError();
uassert( 13473 , (string)"failed to save collection (" + ns + "): " + err , err.size() == 0 );
@@ -87,14 +87,14 @@ namespace mongo {
_dirty = false;
}
- bool DBConfig::isSharded( const string& ns ){
+ bool DBConfig::isSharded( const string& ns ) {
if ( ! _shardingEnabled )
return false;
scoped_lock lk( _lock );
return _isSharded( ns );
}
- bool DBConfig::_isSharded( const string& ns ){
+ bool DBConfig::_isSharded( const string& ns ) {
if ( ! _shardingEnabled )
return false;
Collections::iterator i = _collections.find( ns );
@@ -104,25 +104,25 @@ namespace mongo {
}
- const Shard& DBConfig::getShard( const string& ns ){
+ const Shard& DBConfig::getShard( const string& ns ) {
if ( isSharded( ns ) )
return Shard::EMPTY;
-
+
uassert( 10178 , "no primary!" , _primary.ok() );
return _primary;
}
-
- void DBConfig::enableSharding(){
+
+ void DBConfig::enableSharding() {
if ( _shardingEnabled )
return;
scoped_lock lk( _lock );
- _shardingEnabled = true;
+ _shardingEnabled = true;
_save();
}
-
- ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ){
+
+ ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ) {
uassert( 8042 , "db doesn't have sharding enabled" , _shardingEnabled );
-
+
scoped_lock lk( _lock );
CollectionInfo& ci = _collections[ns];
@@ -130,19 +130,19 @@ namespace mongo {
log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
- // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
+ // From this point on, 'ns' is going to be treated as a sharded collection. We assume this is the first
// time it is seen by the sharded system and thus create the first chunk for the collection. All the remaining
// chunks will be created as a by-product of splitting.
- ci.shard( ns , fieldsAndOrder , unique );
+ ci.shard( ns , fieldsAndOrder , unique );
ChunkManagerPtr cm = ci.getCM();
uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
cm->createFirstChunk( getPrimary() );
- _save();
-
+ _save();
+
try {
cm->maybeChunkCollection();
}
- catch ( UserException& e ){
+ catch ( UserException& e ) {
// failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state)
log() << "couldn't chunk recently created collection: " << ns << " " << e << endl;
}
@@ -150,28 +150,28 @@ namespace mongo {
return cm;
}
- bool DBConfig::removeSharding( const string& ns ){
- if ( ! _shardingEnabled ){
+ bool DBConfig::removeSharding( const string& ns ) {
+ if ( ! _shardingEnabled ) {
return false;
}
-
+
scoped_lock lk( _lock );
-
+
Collections::iterator i = _collections.find( ns );
if ( i == _collections.end() )
return false;
-
+
CollectionInfo& ci = _collections[ns];
if ( ! ci.isSharded() )
return false;
-
+
ci.unshard();
_save();
return true;
}
-
- ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ){
+
+ ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload ) {
scoped_lock lk( _lock );
if ( shouldReload )
@@ -182,19 +182,19 @@ namespace mongo {
return ci.getCM();
}
- void DBConfig::setPrimary( string s ){
+ void DBConfig::setPrimary( string s ) {
scoped_lock lk( _lock );
_primary.reset( s );
_save();
}
-
- void DBConfig::serialize(BSONObjBuilder& to){
+
+ void DBConfig::serialize(BSONObjBuilder& to) {
to.append("_id", _name);
to.appendBool("partitioned", _shardingEnabled );
to.append("primary", _primary.getName() );
}
-
- void DBConfig::unserialize(const BSONObj& from){
+
+ void DBConfig::unserialize(const BSONObj& from) {
log(1) << "DBConfig unserialize: " << _name << " " << from << endl;
assert( _name == from["_id"].String() );
@@ -203,59 +203,59 @@ namespace mongo {
// In the 1.5.x series, we used to have collection metadata nested in the database entry. The 1.6.x series
// had migration code that ported that info to where it belongs now: the 'collections' collection. We now
- // just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
+ // just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
BSONObj sharded = from.getObjectField( "sharded" );
if ( ! sharded.isEmpty() )
uasserted( 13509 , "can't migrate from 1.5.x release to the current one; need to upgrade to 1.6.x first");
}
- bool DBConfig::load(){
+ bool DBConfig::load() {
scoped_lock lk( _lock );
return _load();
}
- bool DBConfig::_load(){
+ bool DBConfig::_load() {
ScopedDbConnection conn( configServer.modelServer() );
-
+
BSONObj o = conn->findOne( ShardNS::database , BSON( "_id" << _name ) );
- if ( o.isEmpty() ){
+ if ( o.isEmpty() ) {
conn.done();
return false;
}
-
+
unserialize( o );
-
+
BSONObjBuilder b;
b.appendRegex( "_id" , (string)"^" + _name + "." );
auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection ,b.obj() );
assert( cursor.get() );
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj o = cursor->next();
_collections[o["_id"].String()] = CollectionInfo( o );
}
-
- conn.done();
+
+ conn.done();
return true;
}
- void DBConfig::_save(){
+ void DBConfig::_save() {
ScopedDbConnection conn( configServer.modelServer() );
-
+
BSONObj n;
{
BSONObjBuilder b;
serialize(b);
n = b.obj();
}
-
+
conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true );
string err = conn->getLastError();
uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 );
-
- for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ){
+
+ for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) {
if ( ! i->second.isDirty() )
continue;
i->second.save( i->first , conn.get() );
@@ -263,18 +263,18 @@ namespace mongo {
conn.done();
}
-
- bool DBConfig::reload(){
+
+ bool DBConfig::reload() {
scoped_lock lk( _lock );
return _reload();
}
-
- bool DBConfig::_reload(){
+
+ bool DBConfig::_reload() {
// TODO: i don't think is 100% correct
return _load();
}
-
- bool DBConfig::dropDatabase( string& errmsg ){
+
+ bool DBConfig::dropDatabase( string& errmsg ) {
/**
* 1) make sure everything is up
* 2) update config server
@@ -285,20 +285,20 @@ namespace mongo {
log() << "DBConfig::dropDatabase: " << _name << endl;
configServer.logChange( "dropDatabase.start" , _name , BSONObj() );
-
+
// 1
- if ( ! configServer.allUp( errmsg ) ){
+ if ( ! configServer.allUp( errmsg ) ) {
log(1) << "\t DBConfig::dropDatabase not all up" << endl;
return 0;
}
-
+
// 2
grid.removeDB( _name );
{
ScopedDbConnection conn( configServer.modelServer() );
conn->remove( ShardNS::database , BSON( "_id" << _name ) );
errmsg = conn->getLastError();
- if ( ! errmsg.empty() ){
+ if ( ! errmsg.empty() ) {
log() << "could not drop '" << _name << "': " << errmsg << endl;
conn.done();
return false;
@@ -307,16 +307,16 @@ namespace mongo {
conn.done();
}
- if ( ! configServer.allUp( errmsg ) ){
+ if ( ! configServer.allUp( errmsg ) ) {
log() << "error removing from config server even after checking!" << endl;
return 0;
}
log(1) << "\t removed entry from config server for: " << _name << endl;
-
+
set<Shard> allServers;
// 3
- while ( true ){
+ while ( true ) {
int num = 0;
if ( ! _dropShardedCollections( num , allServers , errmsg ) )
return 0;
@@ -324,41 +324,41 @@ namespace mongo {
if ( num == 0 )
break;
}
-
+
// 4
{
ScopedDbConnection conn( _primary );
BSONObj res;
- if ( ! conn->dropDatabase( _name , &res ) ){
+ if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
return 0;
}
conn.done();
}
-
+
// 5
- for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ){
+ for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) {
ScopedDbConnection conn( *i );
BSONObj res;
- if ( ! conn->dropDatabase( _name , &res ) ){
+ if ( ! conn->dropDatabase( _name , &res ) ) {
errmsg = res.toString();
return 0;
}
- conn.done();
+ conn.done();
}
-
+
log(1) << "\t dropped primary db for: " << _name << endl;
configServer.logChange( "dropDatabase" , _name , BSONObj() );
return true;
}
- bool DBConfig::_dropShardedCollections( int& num, set<Shard>& allServers , string& errmsg ){
+ bool DBConfig::_dropShardedCollections( int& num, set<Shard>& allServers , string& errmsg ) {
num = 0;
set<string> seen;
- while ( true ){
+ while ( true ) {
Collections::iterator i = _collections.begin();
- for ( ; i != _collections.end(); ++i ){
+ for ( ; i != _collections.end(); ++i ) {
if ( i->second.isSharded() )
break;
}
@@ -366,7 +366,7 @@ namespace mongo {
if ( i == _collections.end() )
break;
- if ( seen.count( i->first ) ){
+ if ( seen.count( i->first ) ) {
errmsg = "seen a collection twice!";
return false;
}
@@ -386,10 +386,10 @@ namespace mongo {
return true;
}
- void DBConfig::getAllShards(set<Shard>& shards) const{
+ void DBConfig::getAllShards(set<Shard>& shards) const {
shards.insert(getPrimary());
- for (Collections::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it){
- if (it->second.isSharded()){
+ for (Collections::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) {
+ if (it->second.isSharded()) {
it->second.getCM()->getAllShards(shards);
} // TODO: handle collections on non-primary shard
}
@@ -397,20 +397,20 @@ namespace mongo {
/* --- ConfigServer ---- */
- ConfigServer::ConfigServer() : DBConfig( "config" ){
+ ConfigServer::ConfigServer() : DBConfig( "config" ) {
_shardingEnabled = false;
}
-
+
ConfigServer::~ConfigServer() {
}
- bool ConfigServer::init( string s ){
+ bool ConfigServer::init( string s ) {
vector<string> configdbs;
splitStringDelim( s, &configdbs, ',' );
return init( configdbs );
}
- bool ConfigServer::init( vector<string> configHosts ){
+ bool ConfigServer::init( vector<string> configHosts ) {
uassert( 10187 , "need configdbs" , configHosts.size() );
string hn = getHostName();
@@ -418,19 +418,19 @@ namespace mongo {
sleepsecs(5);
dbexit( EXIT_BADOPTIONS );
}
-
+
set<string> hosts;
- for ( size_t i=0; i<configHosts.size(); i++ ){
+ for ( size_t i=0; i<configHosts.size(); i++ ) {
string host = configHosts[i];
hosts.insert( getHost( host , false ) );
configHosts[i] = getHost( host , true );
}
-
- for ( set<string>::iterator i=hosts.begin(); i!=hosts.end(); i++ ){
+
+ for ( set<string>::iterator i=hosts.begin(); i!=hosts.end(); i++ ) {
string host = *i;
bool ok = false;
- for ( int x=10; x>0; x-- ){
- if ( ! hostbyname( host.c_str() ).empty() ){
+ for ( int x=10; x>0; x-- ) {
+ if ( ! hostbyname( host.c_str() ).empty() ) {
ok = true;
break;
}
@@ -442,7 +442,7 @@ namespace mongo {
}
_config = configHosts;
-
+
string fullString;
joinStringDelim( configHosts, &fullString, ',' );
_primary.setAddress( fullString , true );
@@ -454,14 +454,14 @@ namespace mongo {
bool ConfigServer::checkConfigServersConsistent( string& errmsg , int tries ) const {
if ( _config.size() == 1 )
return true;
-
+
if ( tries <= 0 )
return false;
-
+
unsigned firstGood = 0;
int up = 0;
vector<BSONObj> res;
- for ( unsigned i=0; i<_config.size(); i++ ){
+ for ( unsigned i=0; i<_config.size(); i++ ) {
BSONObj x;
try {
ScopedDbConnection conn( _config[i] );
@@ -475,125 +475,125 @@ namespace mongo {
}
conn.done();
}
- catch ( std::exception& ){
+ catch ( std::exception& ) {
log(LL_WARNING) << " couldn't check on config server:" << _config[i] << " ok for now" << endl;
}
res.push_back(x);
}
- if ( up == 0 ){
+ if ( up == 0 ) {
errmsg = "no config servers reachable";
return false;
}
- if ( up == 1 ){
+ if ( up == 1 ) {
log( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
return true;
}
BSONObj base = res[firstGood];
- for ( unsigned i=firstGood+1; i<res.size(); i++ ){
+ for ( unsigned i=firstGood+1; i<res.size(); i++ ) {
if ( res[i].isEmpty() )
continue;
string c1 = base.getFieldDotted( "collections.chunks" );
string c2 = res[i].getFieldDotted( "collections.chunks" );
-
+
string d1 = base.getFieldDotted( "collections.databases" );
string d2 = res[i].getFieldDotted( "collections.databases" );
if ( c1 == c2 && d1 == d2 )
continue;
-
+
stringstream ss;
ss << "config servers " << _config[firstGood] << " and " << _config[i] << " differ";
log( LL_WARNING ) << ss.str();
- if ( tries <= 1 ){
+ if ( tries <= 1 ) {
ss << "\n" << c1 << "\t" << c2 << "\n" << d1 << "\t" << d2;
errmsg = ss.str();
return false;
}
-
+
return checkConfigServersConsistent( errmsg , tries - 1 );
}
-
+
return true;
}
- bool ConfigServer::ok( bool checkConsistency ){
+ bool ConfigServer::ok( bool checkConsistency ) {
if ( ! _primary.ok() )
return false;
-
- if ( checkConsistency ){
+
+ if ( checkConsistency ) {
string errmsg;
- if ( ! checkConfigServersConsistent( errmsg ) ){
+ if ( ! checkConfigServersConsistent( errmsg ) ) {
log( LL_ERROR ) << "config servers not in sync! " << errmsg << endl;
return false;
}
}
-
+
return true;
}
- bool ConfigServer::allUp(){
+ bool ConfigServer::allUp() {
string errmsg;
return allUp( errmsg );
}
-
- bool ConfigServer::allUp( string& errmsg ){
+
+ bool ConfigServer::allUp( string& errmsg ) {
try {
ScopedDbConnection conn( _primary );
conn->getLastError();
conn.done();
return true;
}
- catch ( DBException& ){
+ catch ( DBException& ) {
log() << "ConfigServer::allUp : " << _primary.toString() << " seems down!" << endl;
errmsg = _primary.toString() + " seems down";
return false;
}
-
+
}
-
- int ConfigServer::dbConfigVersion(){
+
+ int ConfigServer::dbConfigVersion() {
ScopedDbConnection conn( _primary );
int version = dbConfigVersion( conn.conn() );
conn.done();
return version;
}
-
- int ConfigServer::dbConfigVersion( DBClientBase& conn ){
+
+ int ConfigServer::dbConfigVersion( DBClientBase& conn ) {
auto_ptr<DBClientCursor> c = conn.query( "config.version" , BSONObj() );
int version = 0;
- if ( c->more() ){
+ if ( c->more() ) {
BSONObj o = c->next();
version = o["version"].numberInt();
uassert( 10189 , "should only have 1 thing in config.version" , ! c->more() );
}
else {
- if ( conn.count( ShardNS::shard ) || conn.count( ShardNS::database ) ){
+ if ( conn.count( ShardNS::shard ) || conn.count( ShardNS::database ) ) {
version = 1;
}
}
-
+
return version;
}
-
- void ConfigServer::reloadSettings(){
+
+ void ConfigServer::reloadSettings() {
set<string> got;
-
+
ScopedDbConnection conn( _primary );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
assert( c.get() );
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj o = c->next();
string name = o["_id"].valuestrsafe();
got.insert( name );
- if ( name == "chunksize" ){
+ if ( name == "chunksize" ) {
log(1) << "MaxChunkSize: " << o["value"] << endl;
Chunk::MaxChunkSize = o["value"].numberInt() * 1024 * 1024;
}
- else if ( name == "balancer" ){
+ else if ( name == "balancer" ) {
// ones we ignore here
}
else {
@@ -601,12 +601,12 @@ namespace mongo {
}
}
- if ( ! got.count( "chunksize" ) ){
+ if ( ! got.count( "chunksize" ) ) {
conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" <<
"value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) );
}
-
-
+
+
// indexes
try {
conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "min" << 1 ) , true );
@@ -614,31 +614,31 @@ namespace mongo {
conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "lastmod" << 1 ) , true );
conn->ensureIndex( ShardNS::shard , BSON( "host" << 1 ) , true );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log( LL_WARNING ) << "couldn't create indexes on config db: " << e.what() << endl;
}
conn.done();
}
- string ConfigServer::getHost( string name , bool withPort ){
- if ( name.find( ":" ) != string::npos ){
+ string ConfigServer::getHost( string name , bool withPort ) {
+ if ( name.find( ":" ) != string::npos ) {
if ( withPort )
return name;
return name.substr( 0 , name.find( ":" ) );
}
- if ( withPort ){
+ if ( withPort ) {
stringstream ss;
ss << name << ":" << CmdLine::ConfigServerPort;
return ss.str();
}
-
+
return name;
}
/* must never throw */
- void ConfigServer::logChange( const string& what , const string& ns , const BSONObj& detail ){
+ void ConfigServer::logChange( const string& what , const string& ns , const BSONObj& detail ) {
string changeID;
try {
@@ -651,25 +651,25 @@ namespace mongo {
// send a copy of the message to the log in case it doesn't manage to reach config.changelog
Client& c = cc();
BSONObj msg = BSON( "_id" << changeID << "server" << getHostNameCached() << "clientAddr" << c.clientAddress(true)
- << "time" << DATENOW << "what" << what << "ns" << ns << "details" << detail );
+ << "time" << DATENOW << "what" << what << "ns" << ns << "details" << detail );
log() << "about to log metadata event: " << msg << endl;
assert( _primary.ok() );
ScopedDbConnection conn( _primary );
-
+
static bool createdCapped = false;
- if ( ! createdCapped ){
+ if ( ! createdCapped ) {
try {
conn->createCollection( "config.changelog" , 1024 * 1024 * 10 , true );
}
- catch ( UserException& e ){
+ catch ( UserException& e ) {
log(1) << "couldn't create changelog (like race condition): " << e << endl;
// don't care
}
createdCapped = true;
}
-
+
conn->insert( "config.changelog" , msg );
conn.done();
@@ -682,18 +682,18 @@ namespace mongo {
}
}
- void ConfigServer::replicaSetChange( const ReplicaSetMonitor * monitor ){
+ void ConfigServer::replicaSetChange( const ReplicaSetMonitor * monitor ) {
try {
ScopedDbConnection conn( configServer.getConnectionString() );
conn->update( ShardNS::shard , BSON( "_id" << monitor->getName() ) , BSON( "$set" << BSON( "host" << monitor->getServerAddress() ) ) );
conn.done();
}
- catch ( DBException & ){
+ catch ( DBException & ) {
error() << "RSChangeWatcher: could not update config db for set: " << monitor->getName() << " to: " << monitor->getServerAddress() << endl;
}
}
- DBConfigPtr configServerPtr (new ConfigServer());
- ConfigServer& configServer = dynamic_cast<ConfigServer&>(*configServerPtr);
+ DBConfigPtr configServerPtr (new ConfigServer());
+ ConfigServer& configServer = dynamic_cast<ConfigServer&>(*configServerPtr);
-}
+}
diff --git a/s/config.h b/s/config.h
index f1e8c62e0ae..063683575ae 100644
--- a/s/config.h
+++ b/s/config.h
@@ -35,7 +35,7 @@ namespace mongo {
struct ShardNS {
static string shard;
-
+
static string database;
static string collection;
static string chunk;
@@ -51,7 +51,7 @@ namespace mongo {
static BSONField<bool> draining; // is it draining chunks?
static BSONField<long long> maxSize; // max allowed disk space usage
};
-
+
class ConfigServer;
class DBConfig;
@@ -66,17 +66,17 @@ namespace mongo {
class DBConfig {
struct CollectionInfo {
- CollectionInfo(){
+ CollectionInfo() {
_dirty = false;
_dropped = false;
}
-
+
CollectionInfo( const BSONObj& in );
-
+
bool isSharded() const {
return _cm.get();
}
-
+
ChunkManagerPtr getCM() const {
return _cm;
}
@@ -86,41 +86,41 @@ namespace mongo {
bool isDirty() const { return _dirty; }
bool wasDropped() const { return _dropped; }
-
+
void save( const string& ns , DBClientBase* conn );
-
+
private:
ChunkManagerPtr _cm;
bool _dirty;
bool _dropped;
};
-
+
typedef map<string,CollectionInfo> Collections;
-
+
public:
- DBConfig( string name )
- : _name( name ) ,
- _primary("config","") ,
- _shardingEnabled(false),
- _lock("DBConfig"){
+ DBConfig( string name )
+ : _name( name ) ,
+ _primary("config","") ,
+ _shardingEnabled(false),
+ _lock("DBConfig") {
assert( name.size() );
}
- virtual ~DBConfig(){}
-
- string getName(){ return _name; };
+ virtual ~DBConfig() {}
+
+ string getName() { return _name; };
/**
* @return if anything in this db is partitioned or not
*/
- bool isShardingEnabled(){
+ bool isShardingEnabled() {
return _shardingEnabled;
}
-
+
void enableSharding();
ChunkManagerPtr shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique );
-
+
/**
@return true if there was sharding info to remove
*/
@@ -130,25 +130,25 @@ namespace mongo {
* @return whether or not the 'ns' collection is partitioned
*/
bool isSharded( const string& ns );
-
+
ChunkManagerPtr getChunkManager( const string& ns , bool reload = false );
-
+
/**
* @return the correct for shard for the ns
* if the namespace is sharded, will return NULL
*/
const Shard& getShard( const string& ns );
-
+
const Shard& getPrimary() const {
uassert( 8041 , (string)"no primary shard configured for db: " + _name , _primary.ok() );
return _primary;
}
-
+
void setPrimary( string s );
bool load();
bool reload();
-
+
bool dropDatabase( string& errmsg );
// model stuff
@@ -162,7 +162,7 @@ namespace mongo {
protected:
- /**
+ /**
lockless
*/
bool _isSharded( const string& ns );
@@ -176,7 +176,7 @@ namespace mongo {
string _name; // e.g. "alleyinsider"
Shard _primary; // e.g. localhost , mongo.foo.com:9999
bool _shardingEnabled;
-
+
//map<string,CollectionInfo> _sharded; // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
//map<string,ChunkManagerPtr> _shards; // this will only have entries for things that have been looked at
@@ -190,34 +190,34 @@ namespace mongo {
ConfigServer();
~ConfigServer();
-
+
bool ok( bool checkConsistency = false );
-
- virtual string modelServer(){
+
+ virtual string modelServer() {
uassert( 10190 , "ConfigServer not setup" , _primary.ok() );
return _primary.getConnString();
}
-
+
/**
- call at startup, this will initiate connection to the grid db
+ call at startup, this will initiate connection to the grid db
*/
bool init( vector<string> configHosts );
-
+
bool init( string s );
bool allUp();
bool allUp( string& errmsg );
-
+
int dbConfigVersion();
int dbConfigVersion( DBClientBase& conn );
-
+
void reloadSettings();
/**
* @return 0 = ok, otherwise error #
*/
int checkConfigVersion( bool upgrade );
-
+
/**
* Create a metadata change log entry in the config.changelog collection.
*
@@ -236,7 +236,7 @@ namespace mongo {
void replicaSetChange( const ReplicaSetMonitor * monitor );
static int VERSION;
-
+
/**
* check to see if all config servers have the same state
diff --git a/s/config_migrate.cpp b/s/config_migrate.cpp
index 1a4214416f2..57890a01531 100644
--- a/s/config_migrate.cpp
+++ b/s/config_migrate.cpp
@@ -30,12 +30,12 @@
namespace mongo {
- int ConfigServer::checkConfigVersion( bool upgrade ){
+ int ConfigServer::checkConfigVersion( bool upgrade ) {
int cur = dbConfigVersion();
if ( cur == VERSION )
return 0;
-
- if ( cur == 0 ){
+
+ if ( cur == 0 ) {
ScopedDbConnection conn( _primary );
conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
pool.flush();
@@ -43,20 +43,20 @@ namespace mongo {
conn.done();
return 0;
}
-
- if ( cur == 2 ){
+
+ if ( cur == 2 ) {
// need to upgrade
assert( VERSION == 3 );
- if ( ! upgrade ){
+ if ( ! upgrade ) {
log() << "newer version of mongo meta data\n"
<< "need to --upgrade after shutting all mongos down"
<< endl;
return -9;
}
-
+
ScopedDbConnection conn( _primary );
-
+
// do a backup
string backupName;
{
@@ -67,20 +67,20 @@ namespace mongo {
log() << "backing up config to: " << backupName << endl;
conn->copyDatabase( "config" , backupName );
- map<string,string> hostToShard;
+ map<string,string> hostToShard;
set<string> shards;
// shards
{
unsigned n = 0;
auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj o = c->next();
string host = o["host"].String();
string name = "";
-
+
BSONElement id = o["_id"];
- if ( id.type() == String ){
+ if ( id.type() == String ) {
name = id.String();
}
else {
@@ -88,18 +88,18 @@ namespace mongo {
ss << "shard" << hostToShard.size();
name = ss.str();
}
-
+
hostToShard[host] = name;
shards.insert( name );
n++;
}
-
+
assert( n == hostToShard.size() );
assert( n == shards.size() );
-
+
conn->remove( ShardNS::shard , BSONObj() );
-
- for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ){
+
+ for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {
conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );
}
}
@@ -109,27 +109,27 @@ namespace mongo {
auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );
map<string,BSONObj> newDBs;
unsigned n = 0;
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj old = c->next();
n++;
-
- if ( old["name"].eoo() ){
+
+ if ( old["name"].eoo() ) {
// already done
newDBs[old["_id"].String()] = old;
continue;
}
-
+
BSONObjBuilder b(old.objsize());
b.appendAs( old["name"] , "_id" );
-
+
BSONObjIterator i(old);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( strcmp( "_id" , e.fieldName() ) == 0 ||
- strcmp( "name" , e.fieldName() ) == 0 ){
+ strcmp( "name" , e.fieldName() ) == 0 ) {
continue;
}
-
+
b.append( e );
}
@@ -139,45 +139,45 @@ namespace mongo {
}
assert( n == newDBs.size() );
-
+
conn->remove( ShardNS::database , BSONObj() );
-
- for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ){
+
+ for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ) {
conn->insert( ShardNS::database , i->second );
}
-
+
}
-
+
// chunks
{
unsigned num = 0;
map<string,BSONObj> chunks;
auto_ptr<DBClientCursor> c = conn->query( ShardNS::chunk , BSONObj() );
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj x = c->next();
BSONObjBuilder b;
string id = Chunk::genID( x["ns"].String() , x["min"].Obj() );
b.append( "_id" , id );
-
+
BSONObjIterator i(x);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( strcmp( e.fieldName() , "_id" ) == 0 )
continue;
b.append( e );
}
-
+
BSONObj n = b.obj();
log() << x << "\n\t" << n << endl;
chunks[id] = n;
num++;
}
-
+
assert( num == chunks.size() );
-
+
conn->remove( ShardNS::chunk , BSONObj() );
- for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ){
+ for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
conn->insert( ShardNS::chunk , i->second );
}
@@ -188,7 +188,7 @@ namespace mongo {
pool.flush();
return 1;
}
-
+
log() << "don't know how to upgrade " << cur << " to " << VERSION << endl;
return -8;
}
diff --git a/s/cursors.cpp b/s/cursors.cpp
index 0d748500f6a..c28832ca60d 100644
--- a/s/cursors.cpp
+++ b/s/cursors.cpp
@@ -24,87 +24,87 @@
#include "../util/concurrency/task.h"
namespace mongo {
-
+
// -------- ShardedCursor -----------
- ShardedClientCursor::ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor ){
+ ShardedClientCursor::ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor ) {
assert( cursor );
_cursor = cursor;
-
+
_skip = q.ntoskip;
_ntoreturn = q.ntoreturn;
-
+
_totalSent = 0;
_done = false;
_id = 0;
-
- if ( q.queryOptions & QueryOption_NoCursorTimeout ){
+
+ if ( q.queryOptions & QueryOption_NoCursorTimeout ) {
_lastAccessMillis = 0;
}
- else
+ else
_lastAccessMillis = Listener::getElapsedTimeMillis();
}
- ShardedClientCursor::~ShardedClientCursor(){
+ ShardedClientCursor::~ShardedClientCursor() {
assert( _cursor );
delete _cursor;
_cursor = 0;
}
- long long ShardedClientCursor::getId(){
- if ( _id <= 0 ){
+ long long ShardedClientCursor::getId() {
+ if ( _id <= 0 ) {
_id = cursorCache.genId();
assert( _id >= 0 );
}
return _id;
}
- void ShardedClientCursor::accessed(){
+ void ShardedClientCursor::accessed() {
if ( _lastAccessMillis > 0 )
_lastAccessMillis = Listener::getElapsedTimeMillis();
}
- long long ShardedClientCursor::idleTime( long long now ){
+ long long ShardedClientCursor::idleTime( long long now ) {
if ( _lastAccessMillis == 0 )
return 0;
return now - _lastAccessMillis;
}
- bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ){
+ bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ) {
uassert( 10191 , "cursor already done" , ! _done );
-
+
int maxSize = 1024 * 1024;
if ( _totalSent > 0 )
maxSize *= 3;
-
+
BufBuilder b(32768);
-
+
int num = 0;
bool sendMore = true;
- while ( _cursor->more() ){
+ while ( _cursor->more() ) {
BSONObj o = _cursor->next();
b.appendBuf( (void*)o.objdata() , o.objsize() );
num++;
-
- if ( b.len() > maxSize ){
+
+ if ( b.len() > maxSize ) {
break;
}
- if ( num == ntoreturn ){
+ if ( num == ntoreturn ) {
// soft limit aka batch size
break;
}
- if ( ntoreturn != 0 && ( -1 * num + _totalSent ) == ntoreturn ){
+ if ( ntoreturn != 0 && ( -1 * num + _totalSent ) == ntoreturn ) {
// hard limit - total to send
sendMore = false;
break;
}
- if ( ntoreturn == 0 && _totalSent == 0 && num > 100 ){
+ if ( ntoreturn == 0 && _totalSent == 0 && num > 100 ) {
// first batch should be max 100 unless batch size specified
break;
}
@@ -112,123 +112,123 @@ namespace mongo {
bool hasMore = sendMore && _cursor->more();
log(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
-
+
replyToQuery( 0 , r.p() , r.m() , b.buf() , b.len() , num , _totalSent , hasMore ? getId() : 0 );
_totalSent += num;
_done = ! hasMore;
-
+
return hasMore;
}
// ---- CursorCache -----
-
+
long long CursorCache::TIMEOUT = 600000;
CursorCache::CursorCache()
- :_mutex( "CursorCache" ), _shardedTotal(0){
+ :_mutex( "CursorCache" ), _shardedTotal(0) {
}
- CursorCache::~CursorCache(){
+ CursorCache::~CursorCache() {
// TODO: delete old cursors?
int logLevel = 1;
if ( _cursors.size() || _refs.size() )
logLevel = 0;
log( logLevel ) << " CursorCache at shutdown - "
- << " sharded: " << _cursors.size()
+ << " sharded: " << _cursors.size()
<< " passthrough: " << _refs.size()
<< endl;
}
- ShardedClientCursorPtr CursorCache::get( long long id ){
+ ShardedClientCursorPtr CursorCache::get( long long id ) {
scoped_lock lk( _mutex );
MapSharded::iterator i = _cursors.find( id );
- if ( i == _cursors.end() ){
+ if ( i == _cursors.end() ) {
OCCASIONALLY log() << "Sharded CursorCache missing cursor id: " << id << endl;
return ShardedClientCursorPtr();
}
i->second->accessed();
return i->second;
}
-
- void CursorCache::store( ShardedClientCursorPtr cursor ){
+
+ void CursorCache::store( ShardedClientCursorPtr cursor ) {
assert( cursor->getId() );
scoped_lock lk( _mutex );
_cursors[cursor->getId()] = cursor;
_shardedTotal++;
}
- void CursorCache::remove( long long id ){
+ void CursorCache::remove( long long id ) {
assert( id );
scoped_lock lk( _mutex );
_cursors.erase( id );
}
- void CursorCache::storeRef( const string& server , long long id ){
+ void CursorCache::storeRef( const string& server , long long id ) {
assert( id );
scoped_lock lk( _mutex );
_refs[id] = server;
}
-
- long long CursorCache::genId(){
- while ( true ){
+
+ long long CursorCache::genId() {
+ while ( true ) {
long long x = security.getNonce();
if ( x == 0 )
continue;
if ( x < 0 )
x *= -1;
-
+
scoped_lock lk( _mutex );
MapSharded::iterator i = _cursors.find( x );
if ( i != _cursors.end() )
continue;
-
+
MapNormal::iterator j = _refs.find( x );
if ( j != _refs.end() )
continue;
-
+
return x;
}
}
- void CursorCache::gotKillCursors(Message& m ){
+ void CursorCache::gotKillCursors(Message& m ) {
int *x = (int *) m.singleData()->_data;
x++; // reserved
int n = *x++;
- if ( n > 2000 ){
+ if ( n > 2000 ) {
log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
}
uassert( 13286 , "sent 0 cursors to kill" , n >= 1 );
uassert( 13287 , "too many cursors to kill" , n < 30000 );
-
+
long long * cursors = (long long *)x;
- for ( int i=0; i<n; i++ ){
+ for ( int i=0; i<n; i++ ) {
long long id = cursors[i];
- if ( ! id ){
+ if ( ! id ) {
log( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
continue;
}
-
- string server;
+
+ string server;
{
scoped_lock lk( _mutex );
MapSharded::iterator i = _cursors.find( id );
- if ( i != _cursors.end() ){
+ if ( i != _cursors.end() ) {
_cursors.erase( i );
continue;
}
-
+
MapNormal::iterator j = _refs.find( id );
- if ( j == _refs.end() ){
+ if ( j == _refs.end() ) {
log( LL_WARNING ) << "can't find cursor: " << id << endl;
continue;
}
server = j->second;
_refs.erase( j );
}
-
+
assert( server.size() );
ScopedDbConnection conn( server );
conn->killCursor( id );
@@ -236,7 +236,7 @@ namespace mongo {
}
}
- void CursorCache::appendInfo( BSONObjBuilder& result ){
+ void CursorCache::appendInfo( BSONObjBuilder& result ) {
scoped_lock lk( _mutex );
result.append( "sharded" , (int)_cursors.size() );
result.appendNumber( "shardedEver" , _shardedTotal );
@@ -244,12 +244,12 @@ namespace mongo {
result.append( "totalOpen" , (int)(_cursors.size() + _refs.size() ) );
}
- void CursorCache::doTimeouts(){
+ void CursorCache::doTimeouts() {
long long now = Listener::getElapsedTimeMillis();
scoped_lock lk( _mutex );
- for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ){
+ for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ) {
long long idleFor = i->second->idleTime( now );
- if ( idleFor < TIMEOUT ){
+ if ( idleFor < TIMEOUT ) {
continue;
}
log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
@@ -258,16 +258,16 @@ namespace mongo {
}
CursorCache cursorCache;
-
+
class CursorTimeoutTask : public task::Task {
public:
virtual string name() const { return "cursorTimeout"; }
- virtual void doWork(){
+ virtual void doWork() {
cursorCache.doTimeouts();
}
} cursorTimeoutTask;
- void CursorCache::startTimeoutThread(){
+ void CursorCache::startTimeoutThread() {
task::repeat( &cursorTimeoutTask , 400 );
}
@@ -279,7 +279,7 @@ namespace mongo {
help << " example: { cursorInfo : 1 }";
}
virtual LockType locktype() const { return NONE; }
- bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string&, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
cursorCache.appendInfo( result );
if ( jsobj["setTimeout"].isNumber() )
CursorCache::TIMEOUT = jsobj["setTimeout"].numberLong();
diff --git a/s/cursors.h b/s/cursors.h
index 4e3db8cc239..d1654680057 100644
--- a/s/cursors.h
+++ b/s/cursors.h
@@ -16,7 +16,7 @@
*/
-#pragma once
+#pragma once
#include "../pch.h"
@@ -35,21 +35,21 @@ namespace mongo {
virtual ~ShardedClientCursor();
long long getId();
-
+
/**
* @return whether there is more data left
*/
- bool sendNextBatch( Request& r ){ return sendNextBatch( r , _ntoreturn ); }
+ bool sendNextBatch( Request& r ) { return sendNextBatch( r , _ntoreturn ); }
bool sendNextBatch( Request& r , int ntoreturn );
-
+
void accessed();
/** @return idle time in ms */
long long idleTime( long long now );
protected:
-
+
ClusteredCursor * _cursor;
-
+
int _skip;
int _ntoreturn;
@@ -62,10 +62,10 @@ namespace mongo {
};
typedef boost::shared_ptr<ShardedClientCursor> ShardedClientCursorPtr;
-
+
class CursorCache {
public:
-
+
static long long TIMEOUT;
typedef map<long long,ShardedClientCursorPtr> MapSharded;
@@ -73,7 +73,7 @@ namespace mongo {
CursorCache();
~CursorCache();
-
+
ShardedClientCursorPtr get( long long id );
void store( ShardedClientCursorPtr cursor );
void remove( long long id );
@@ -81,9 +81,9 @@ namespace mongo {
void storeRef( const string& server , long long id );
void gotKillCursors(Message& m );
-
+
void appendInfo( BSONObjBuilder& result );
-
+
long long genId();
void doTimeouts();
@@ -93,9 +93,9 @@ namespace mongo {
MapSharded _cursors;
MapNormal _refs;
-
+
long long _shardedTotal;
};
-
+
extern CursorCache cursorCache;
}
diff --git a/s/d_background_splitter.cpp b/s/d_background_splitter.cpp
index 630821199c7..355be1c375f 100644
--- a/s/d_background_splitter.cpp
+++ b/s/d_background_splitter.cpp
@@ -24,13 +24,13 @@
namespace mongo {
-void Splitter::run() {
- while ( ! inShutdown() ) {
+ void Splitter::run() {
+ while ( ! inShutdown() ) {
- log() << "splitter not implemented yet" << endl;
+ log() << "splitter not implemented yet" << endl;
- sleepsecs(60);
+ sleepsecs(60);
+ }
}
-}
} // namespace mongo
diff --git a/s/d_background_splitter.h b/s/d_background_splitter.h
index 63e3ccdbd4d..6ea72dcdb95 100644
--- a/s/d_background_splitter.h
+++ b/s/d_background_splitter.h
@@ -25,7 +25,7 @@
namespace mongo {
/**
- * Traverses continuously this shard's chunk and splits the ones that are above the
+ * Traverses continuously this shard's chunk and splits the ones that are above the
* maximum desired size
*/
class Splitter : public BackgroundJob {
@@ -40,5 +40,5 @@ namespace mongo {
virtual string name() const { return "BackgroundSplitter"; }
};
-} // namespace mongo
-
+} // namespace mongo
+
diff --git a/s/d_chunk_manager.cpp b/s/d_chunk_manager.cpp
index 9fd85d130a0..e5dd3c1d575 100644
--- a/s/d_chunk_manager.cpp
+++ b/s/d_chunk_manager.cpp
@@ -26,7 +26,7 @@
namespace mongo {
- ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) {
+ ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) {
// have to get a connection to the config db
// special case if i'm the configdb since i'm locked and if i connect to myself
@@ -34,15 +34,16 @@ namespace mongo {
scoped_ptr<ScopedDbConnection> scoped;
scoped_ptr<DBDirectClient> direct;
DBClientBase * conn;
- if ( configServer.empty() ){
+ if ( configServer.empty() ) {
direct.reset( new DBDirectClient() );
conn = direct.get();
- } else {
+ }
+ else {
scoped.reset( new ScopedDbConnection( configServer ) );
conn = scoped->get();
}
- // get this collection's sharding key
+ // get this collection's sharding key
BSONObj collectionDoc = conn->findOne( "config.collections", BSON( "_id" << ns ) );
uassert( 13539 , str::stream() << ns << " does not exist" , !collectionDoc.isEmpty() );
uassert( 13540 , str::stream() << ns << " collection config entry corrupted" , collectionDoc["dropped"].type() );
@@ -80,13 +81,13 @@ namespace mongo {
b.append( key.fieldName() , 1 );
}
_key = b.obj();
- }
+ }
void ShardChunkManager::_fillChunks( DBClientCursorInterface* cursor ) {
assert( cursor );
ShardChunkVersion version;
- while ( cursor->more() ){
+ while ( cursor->more() ) {
BSONObj d = cursor->next();
_chunksMap.insert( make_pair( d["min"].Obj().getOwned() , d["max"].Obj().getOwned() ) );
@@ -106,13 +107,13 @@ namespace mongo {
// the version for this shard would be the highest version for any of the chunks
RangeMap::const_iterator it = _chunksMap.begin();
BSONObj min,max;
- while ( it != _chunksMap.end() ){
+ while ( it != _chunksMap.end() ) {
BSONObj currMin = it->first;
BSONObj currMax = it->second;
++it;
- // coallesce the chunk's bounds in ranges if they are adjacent chunks
- if ( min.isEmpty() ){
+ // coallesce the chunk's bounds in ranges if they are adjacent chunks
+ if ( min.isEmpty() ) {
min = currMin;
max = currMax;
continue;
@@ -128,7 +129,7 @@ namespace mongo {
max = currMax;
}
assert( ! min.isEmpty() );
-
+
_rangesMap.insert( make_pair( min , max ) );
}
@@ -145,17 +146,17 @@ namespace mongo {
RangeMap::const_iterator it = _rangesMap.upper_bound( x );
if ( it != _rangesMap.begin() )
it--;
-
+
bool good = contains( it->first , it->second , x );
- #if 0
- if ( ! good ){
+#if 0
+ if ( ! good ) {
log() << "bad: " << x << " " << it->first << " " << x.woCompare( it->first ) << " " << x.woCompare( it->second ) << endl;
- for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ){
+ for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ) {
log() << "\t" << i->first << "\t" << i->second << "\t" << endl;
}
}
- #endif
+#endif
return good;
}
@@ -184,7 +185,7 @@ namespace mongo {
*foundMax = it->second;
return false;
}
-
+
return true;
}
@@ -197,14 +198,14 @@ namespace mongo {
if ( it->second.woCompare( max ) != 0 ) {
ostringstream os;
os << "ranges differ, "
- << "requested: " << min << " -> " << max << " "
+ << "requested: " << min << " -> " << max << " "
<< "existing: " << (it == _chunksMap.end()) ? "<empty>" : it->first.toString() + " -> " + it->second.toString();
uasserted( 13587 , os.str() );
}
}
ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) {
-
+
// check that we have the exact chunk that'll be subtracted
_assertChunkExists( min , max );
@@ -217,7 +218,8 @@ namespace mongo {
p->_version = 0;
- } else {
+ }
+ else {
// can't move version backwards when subtracting chunks
// this is what guarantees that no read or write would be taken once we subtract data from the current shard
if ( version <= _version ) {
@@ -232,16 +234,16 @@ namespace mongo {
return p.release();
}
-
+
static bool overlap( const BSONObj& l1 , const BSONObj& h1 , const BSONObj& l2 , const BSONObj& h2 ) {
return ! ( ( h1.woCompare( l2 ) <= 0 ) || ( h2.woCompare( l1 ) <= 0 ) );
}
-
+
ShardChunkManager* ShardChunkManager::clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version ) {
// it is acceptable to move version backwards (e.g., undoing a migration that went bad during commit)
// but only cloning away the last chunk may reset the version to 0
- uassert( 13591 , "version can't be set to zero" , version > 0 );
+ uassert( 13591 , "version can't be set to zero" , version > 0 );
if ( ! _chunksMap.empty() ) {
@@ -253,11 +255,11 @@ namespace mongo {
if ( overlap( min , max , it->first , it->second ) ) {
ostringstream os;
os << "ranges overlap, "
- << "requested: " << min << " -> " << max << " "
+ << "requested: " << min << " -> " << max << " "
<< "existing: " << it->first.toString() + " -> " + it->second.toString();
uasserted( 13588 , os.str() );
}
- }
+ }
auto_ptr<ShardChunkManager> p( new ShardChunkManager );
@@ -270,17 +272,17 @@ namespace mongo {
return p.release();
}
- ShardChunkManager* ShardChunkManager::cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- const ShardChunkVersion& version ) {
+ ShardChunkManager* ShardChunkManager::cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
+ const ShardChunkVersion& version ) {
// the version required in both resulting chunks could be simply an increment in the minor portion of the current version
- // however, we are enforcing uniqueness over the attributes <ns, lastmod> of the configdb collection 'chunks'
+ // however, we are enforcing uniqueness over the attributes <ns, lastmod> of the configdb collection 'chunks'
// so in practice, a migrate somewhere may force this split to pick up a version that has the major portion higher
// than the one that this shard has been using
//
// TODO drop the uniqueness constraint and tigthen the check below so that only the minor portion of version changes
if ( version <= _version ) {
- uasserted( 13592 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
+ uasserted( 13592 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
}
// check that we have the exact chunk that'll be split and that the split point is valid
@@ -292,7 +294,7 @@ namespace mongo {
}
auto_ptr<ShardChunkManager> p( new ShardChunkManager );
-
+
p->_key = this->_key;
p->_chunksMap = this->_chunksMap;
p->_version = version; // will increment second, third, ... chunks below
diff --git a/s/d_chunk_manager.h b/s/d_chunk_manager.h
index e0c03f400c6..32762bfe995 100644
--- a/s/d_chunk_manager.h
+++ b/s/d_chunk_manager.h
@@ -28,13 +28,13 @@ namespace mongo {
/**
* Controls the boundaries of all the chunks for a given collection that live in this shard.
*
- * ShardChunkManager instances never change after construction. There are methods provided that would generate a
+ * ShardChunkManager instances never change after construction. There are methods provided that would generate a
* new manager if new chunks are added, subtracted, or split.
*
* TODO
* The responsibility of maintaining the version for a shard is still shared between this class and its caller. The
* manager does check corner cases (e.g. cloning out the last chunk generates a manager with version 0) but ultimately
- * still cannot be responsible to set all versions. Currently, they are a function of the global state as opposed to
+ * still cannot be responsible to set all versions. Currently, they are a function of the global state as opposed to
* the per-shard one.
*/
class ShardChunkManager : public boost::noncopyable {
@@ -60,18 +60,18 @@ namespace mongo {
* @param chunksDocs simulates config.chunks' entries for one collection's shard
*/
ShardChunkManager( const BSONObj& collectionDoc , const BSONArray& chunksDoc );
-
+
~ShardChunkManager() {}
/**
- * Generates a new manager based on 'this's state minus a given chunk.
+ * Generates a new manager based on 'this's state minus a given chunk.
*
* @param min max chunk boundaries for the chunk to subtract
* @param version that the resulting manager should be at. The version has to be higher than the current one.
* When cloning away the last chunk, verstion must be 0.
* @return a new ShardChunkManager, to be owned by the caller
*/
- ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+ ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
/**
* Generates a new manager based on 'this's state plus a given chunk.
@@ -80,7 +80,7 @@ namespace mongo {
* @param version that the resulting manager should be at. It can never be 0, though (see CloneMinus).
* @return a new ShardChunkManager, to be owned by the caller
*/
- ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+ ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
/**
* Generates a new manager by splitting an existing chunk at one or more points.
@@ -146,5 +146,5 @@ namespace mongo {
};
typedef shared_ptr<ShardChunkManager> ShardChunkManagerPtr;
-
+
} // namespace mongo
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index 7de7a1ef7e6..1df05ac6732 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -43,27 +43,27 @@ using namespace std;
namespace mongo {
- bool handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ){
+ bool handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) {
if ( ! shardingState.enabled() )
return false;
int op = m.operation();
- if ( op < 2000
- || op >= 3000
- || op == dbGetMore // cursors are weird
- )
+ if ( op < 2000
+ || op >= 3000
+ || op == dbGetMore // cursors are weird
+ )
return false;
-
- DbMessage d(m);
+
+ DbMessage d(m);
const char *ns = d.getns();
string errmsg;
- if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ){
+ if ( shardVersionOk( ns , opIsWrite( op ) , errmsg ) ) {
return false;
}
log(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
-
- if ( doesOpGetAResponse( op ) ){
+
+ if ( doesOpGetAResponse( op ) ) {
assert( dbresponse );
BufBuilder b( 32768 );
b.skip( sizeof( QueryResult ) );
@@ -71,7 +71,7 @@ namespace mongo {
BSONObj obj = BSON( "$err" << errmsg );
b.appendBuf( obj.objdata() , obj.objsize() );
}
-
+
QueryResult *qr = (QueryResult*)b.buf();
qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale;
qr->len = b.len();
@@ -83,19 +83,19 @@ namespace mongo {
Message * resp = new Message();
resp->setData( qr , true );
-
+
dbresponse->response = resp;
dbresponse->responseTo = m.header()->id;
return true;
}
-
+
OID writebackID;
writebackID.init();
lastError.getSafe()->writeback( writebackID );
const OID& clientID = ShardedConnectionInfo::get(false)->getID();
massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() );
-
+
log(1) << "got write with an old config - writing back ns: " << ns << endl;
if ( logLevel ) log(1) << debugString( m ) << endl;
diff --git a/s/d_logic.h b/s/d_logic.h
index 0e2223b6bde..3e70365ce1a 100644
--- a/s/d_logic.h
+++ b/s/d_logic.h
@@ -26,7 +26,7 @@
#include "util.h"
namespace mongo {
-
+
typedef ShardChunkVersion ConfigVersion;
typedef map<string,ConfigVersion> NSVersionMap;
@@ -37,23 +37,23 @@ namespace mongo {
class ShardingState {
public:
ShardingState();
-
+
bool enabled() const { return _enabled; }
const string& getConfigServer() const { return _configServer; }
void enable( const string& server );
void gotShardName( const string& name );
void gotShardHost( string host );
-
+
// versioning support
bool hasVersion( const string& ns );
bool hasVersion( const string& ns , ConfigVersion& version );
const ConfigVersion getVersion( const string& ns ) const;
-
+
/**
- * Uninstalls the manager for a given collection. This should be used when the collection is dropped.
- *
+ * Uninstalls the manager for a given collection. This should be used when the collection is dropped.
+ *
* NOTE:
* An existing collection with no chunks on this shard will have a manager on version 0, which is different than a
* a dropped collection, which will not have a manager.
@@ -77,9 +77,9 @@ namespace mongo {
* @return true if the access can be allowed at the provided version
*/
bool trySetVersion( const string& ns , ConfigVersion& version );
-
+
void appendInfo( BSONObjBuilder& b );
-
+
// querying support
bool needShardChunkManager( const string& ns ) const;
@@ -87,7 +87,7 @@ namespace mongo {
// chunk migrate and split support
- /**
+ /**
* Creates and installs a new chunk manager for a given collection by "forgetting" about one of its chunks.
* The new manager uses the provided version, which has to be higher than the current manager's.
* One exception: if the forgotten chunk is the last one in this shard for the collection, version has to be 0.
@@ -104,7 +104,7 @@ namespace mongo {
* Creates and installs a new chunk manager for a given collection by reclaiming a previously donated chunk.
* The previous manager's version has to be provided.
*
- * If it runs successfully, clients that became stale by the previous donateChunk will be able to access the
+ * If it runs successfully, clients that became stale by the previous donateChunk will be able to access the
* collection again.
*
* @param ns the collection
@@ -123,19 +123,19 @@ namespace mongo {
*
* @param ns the collection
* @param min max the chunk that should be split
- * @param splitKeys point in which to split
+ * @param splitKeys point in which to split
* @param version at which the new manager should be at
*/
void splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
ShardChunkVersion version );
bool inCriticalMigrateSection();
-
- private:
+
+ private:
bool _enabled;
-
+
string _configServer;
-
+
string _shardName;
string _shardHost;
@@ -147,7 +147,7 @@ namespace mongo {
typedef map<string,ShardChunkManagerPtr> ChunkManagersMap;
ChunkManagersMap _chunks;
};
-
+
extern ShardingState shardingState;
/**
@@ -157,26 +157,26 @@ namespace mongo {
class ShardedConnectionInfo {
public:
ShardedConnectionInfo();
-
+
const OID& getID() const { return _id; }
bool hasID() const { return _id.isSet(); }
void setID( const OID& id );
-
+
const ConfigVersion getVersion( const string& ns ) const;
void setVersion( const string& ns , const ConfigVersion& version );
-
+
static ShardedConnectionInfo* get( bool create );
static void reset();
-
- bool inForceVersionOkMode() const {
+
+ bool inForceVersionOkMode() const {
return _forceVersionOk;
}
-
- void enterForceVersionOkMode(){ _forceVersionOk = true; }
- void leaveForceVersionOkMode(){ _forceVersionOk = false; }
+
+ void enterForceVersionOkMode() { _forceVersionOk = true; }
+ void leaveForceVersionOkMode() { _forceVersionOk = false; }
private:
-
+
OID _id;
NSVersionMap _versions;
bool _forceVersionOk; // if this is true, then chunk version #s aren't check, and all ops are allowed
@@ -185,31 +185,31 @@ namespace mongo {
};
struct ShardForceVersionOkModeBlock {
- ShardForceVersionOkModeBlock(){
+ ShardForceVersionOkModeBlock() {
info = ShardedConnectionInfo::get( false );
if ( info )
info->enterForceVersionOkMode();
}
- ~ShardForceVersionOkModeBlock(){
+ ~ShardForceVersionOkModeBlock() {
if ( info )
info->leaveForceVersionOkMode();
}
ShardedConnectionInfo * info;
};
-
+
// -----------------
// --- core ---
// -----------------
unsigned long long extractVersion( BSONElement e , string& errmsg );
-
+
/**
* @return true if we have any shard info for the ns
*/
bool haveLocalShardingInfo( const string& ns );
-
+
/**
* @return true if the current threads shard version is ok, or not in sharded version
*/
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index feeb006777e..e6fd7d4ce7a 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -62,55 +62,56 @@ namespace mongo {
_b.append( "max" , max );
}
- ~MoveTimingHelper(){
+ ~MoveTimingHelper() {
// even if logChange doesn't throw, bson does
// sigh
- try {
+ try {
if ( _next != _total ) {
note( "aborted" );
}
configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() );
- } catch ( const std::exception& e ) {
+ }
+ catch ( const std::exception& e ) {
log( LL_WARNING ) << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << endl;
}
}
-
- void done( int step ){
+
+ void done( int step ) {
assert( step == ++_next );
assert( step <= _total );
-
+
stringstream ss;
ss << "step" << step;
string s = ss.str();
-
+
CurOp * op = cc().curop();
if ( op )
op->setMessage( s.c_str() );
- else
+ else
log( LL_WARNING ) << "op is null in MoveTimingHelper::done" << endl;
-
+
_b.appendNumber( s , _t.millis() );
_t.reset();
#if 0
// debugging for memory leak?
ProcessInfo pi;
- ss << " v:" << pi.getVirtualMemorySize()
+ ss << " v:" << pi.getVirtualMemorySize()
<< " r:" << pi.getResidentSize();
log() << ss.str() << endl;
#endif
}
-
-
- void note( const string& s ){
+
+
+ void note( const string& s ) {
string field = "note";
- if ( _nextNote > 0 ){
+ if ( _nextNote > 0 ) {
StringBuilder buf;
buf << "note" << _nextNote;
field = buf.str();
}
_nextNote++;
-
+
_b.append( field , s );
}
@@ -119,11 +120,11 @@ namespace mongo {
string _where;
string _ns;
-
+
int _next;
int _total; // expected # of steps
int _nextNote;
-
+
BSONObjBuilder _b;
};
@@ -133,7 +134,7 @@ namespace mongo {
BSONObj min;
BSONObj max;
set<CursorId> initial;
- void doRemove(){
+ void doRemove() {
ShardForceVersionOkModeBlock sf;
writelock lk(ns);
RemoveSaver rs("moveChunk",ns,"post-cleanup");
@@ -143,76 +144,76 @@ namespace mongo {
};
static const char * const cleanUpThreadName = "cleanupOldData";
-
- void _cleanupOldData( OldDataCleanup cleanup ){
+
+ void _cleanupOldData( OldDataCleanup cleanup ) {
Client::initThread( cleanUpThreadName );
log() << " (start) waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
int loops = 0;
Timer t;
- while ( t.seconds() < 900 ){ // 15 minutes
+ while ( t.seconds() < 900 ) { // 15 minutes
assert( dbMutex.getState() == 0 );
sleepmillis( 20 );
-
+
set<CursorId> now;
- ClientCursor::find( cleanup.ns , now );
-
+ ClientCursor::find( cleanup.ns , now );
+
set<CursorId> left;
- for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ){
+ for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
CursorId id = *i;
if ( now.count(id) )
left.insert( id );
}
-
+
if ( left.size() == 0 )
break;
cleanup.initial = left;
-
- if ( ( loops++ % 200 ) == 0 ){
+
+ if ( ( loops++ % 200 ) == 0 ) {
log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << endl;
-
+
stringstream ss;
- for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ){
+ for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
CursorId id = *i;
ss << id << " ";
}
log() << " cursors: " << ss.str() << endl;
}
}
-
+
cleanup.doRemove();
cc().shutdown();
}
- void cleanupOldData( OldDataCleanup cleanup ){
+ void cleanupOldData( OldDataCleanup cleanup ) {
try {
_cleanupOldData( cleanup );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log() << " error cleaning old data:" << e.what() << endl;
}
- catch ( ... ){
+ catch ( ... ) {
log() << " unknown error cleaning old data" << endl;
}
}
class ChunkCommandHelper : public Command {
public:
- ChunkCommandHelper( const char * name )
- : Command( name ){
+ ChunkCommandHelper( const char * name )
+ : Command( name ) {
}
-
+
virtual void help( stringstream& help ) const {
help << "internal - should not be called directly" << endl;
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
};
- bool isInRange( const BSONObj& obj , const BSONObj& min , const BSONObj& max ){
+ bool isInRange( const BSONObj& obj , const BSONObj& min , const BSONObj& max ) {
BSONObj k = obj.extractFields( min, true );
return k.woCompare( min ) >= 0 && k.woCompare( max ) < 0;
@@ -221,26 +222,26 @@ namespace mongo {
class MigrateFromStatus {
public:
-
+
MigrateFromStatus() : _m("MigrateFromStatus") {
_active = false;
_inCriticalSection = false;
_memoryUsed = 0;
}
- void start( string ns , const BSONObj& min , const BSONObj& max ){
+ void start( string ns , const BSONObj& min , const BSONObj& max ) {
scoped_lock l(_m); // reads and writes _active
assert( ! _active );
-
+
assert( ! min.isEmpty() );
assert( ! max.isEmpty() );
assert( ns.size() );
-
+
_ns = ns;
_min = min;
_max = max;
-
+
assert( _cloneLocs.size() == 0 );
assert( _deleted.size() == 0 );
assert( _reload.size() == 0 );
@@ -248,8 +249,8 @@ namespace mongo {
_active = true;
}
-
- void done(){
+
+ void done() {
_deleted.clear();
_reload.clear();
_cloneLocs.clear();
@@ -259,8 +260,8 @@ namespace mongo {
_active = false;
_inCriticalSection = false;
}
-
- void logOp( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ){
+
+ void logOp( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
if ( ! _getActive() )
return;
@@ -277,68 +278,68 @@ namespace mongo {
BSONElement ide;
if ( patt )
ide = patt->getField( "_id" );
- else
+ else
ide = obj["_id"];
-
- if ( ide.eoo() ){
+
+ if ( ide.eoo() ) {
log( LL_WARNING ) << "logOpForSharding got mod with no _id, ignoring obj: " << obj << endl;
return;
}
-
+
BSONObj it;
- switch ( opstr[0] ){
-
+ switch ( opstr[0] ) {
+
case 'd': {
-
- if ( getThreadName() == cleanUpThreadName ){
+
+ if ( getThreadName() == cleanUpThreadName ) {
// we don't want to xfer things we're cleaning
// as then they'll be deleted on TO
// which is bad
return;
}
-
+
// can't filter deletes :(
_deleted.push_back( ide.wrap() );
_memoryUsed += ide.size() + 5;
return;
}
-
- case 'i':
+
+ case 'i':
it = obj;
break;
-
- case 'u':
- if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ){
+
+ case 'u':
+ if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) {
log( LL_WARNING ) << "logOpForSharding couldn't find: " << ide << " even though should have" << endl;
return;
}
break;
-
+
}
-
+
if ( ! isInRange( it , _min , _max ) )
return;
-
+
_reload.push_back( ide.wrap() );
_memoryUsed += ide.size() + 5;
}
- void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ){
+ void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) {
const long long maxSize = 1024 * 1024;
-
+
if ( l->size() == 0 || size > maxSize )
return;
-
+
BSONArrayBuilder arr(b.subarrayStart(name));
-
- list<BSONObj>::iterator i = l->begin();
-
- while ( i != l->end() && size < maxSize ){
+
+ list<BSONObj>::iterator i = l->begin();
+
+ while ( i != l->end() && size < maxSize ) {
BSONObj t = *i;
- if ( explode ){
+ if ( explode ) {
BSONObj it;
- if ( Helpers::findById( cc() , _ns.c_str() , t, it ) ){
+ if ( Helpers::findById( cc() , _ns.c_str() , t, it ) ) {
arr.append( it );
size += it.objsize();
}
@@ -349,7 +350,7 @@ namespace mongo {
i = l->erase( i );
size += t.objsize();
}
-
+
arr.done();
}
@@ -357,8 +358,8 @@ namespace mongo {
* called from the dest of a migrate
* transfers mods from src to dest
*/
- bool transferMods( string& errmsg , BSONObjBuilder& b ){
- if ( ! _getActive() ){
+ bool transferMods( string& errmsg , BSONObjBuilder& b ) {
+ if ( ! _getActive() ) {
errmsg = "no active migration!";
return false;
}
@@ -378,18 +379,18 @@ namespace mongo {
return true;
}
- /**
+ /**
* Get the disklocs that belong to the chunk migrated and sort them in _cloneLocs (to avoid seeking disk later)
*
* @param maxChunkSize number of bytes beyond which a chunk's base data (no indices) is considered too large to move
* @param errmsg filled with textual description of error if this call return false
* @return false if approximate chunk size is too big to move or true otherwise
*/
- bool storeCurrentLocs( long long maxChunkSize , string& errmsg ){
- readlock l( _ns );
+ bool storeCurrentLocs( long long maxChunkSize , string& errmsg ) {
+ readlock l( _ns );
Client::Context ctx( _ns );
NamespaceDetails *d = nsdetails( _ns.c_str() );
- if ( ! d ){
+ if ( ! d ) {
errmsg = "ns not found, should be impossible";
return false;
}
@@ -398,15 +399,15 @@ namespace mongo {
// the copies are needed because the indexDetailsForRange destroys the input
BSONObj min = _min.copy();
BSONObj max = _max.copy();
- IndexDetails *idx = indexDetailsForRange( _ns.c_str() , errmsg , min , max , keyPattern );
- if ( idx == NULL ){
+ IndexDetails *idx = indexDetailsForRange( _ns.c_str() , errmsg , min , max , keyPattern );
+ if ( idx == NULL ) {
errmsg = "can't find index in storeCurrentLocs";
return false;
}
- scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
- shared_ptr<Cursor>( new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
- _ns ) );
+ scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
+ shared_ptr<Cursor>( new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
+ _ns ) );
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
@@ -414,11 +415,12 @@ namespace mongo {
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = d->stats.nrecords;
- if ( totalRecs > 0 ){
+ if ( totalRecs > 0 ) {
avgRecSize = d->stats.datasize / totalRecs;
maxRecsWhenFull = maxChunkSize / avgRecSize;
maxRecsWhenFull = 130 * maxRecsWhenFull / 100; // slack
- } else {
+ }
+ else {
avgRecSize = 0;
maxRecsWhenFull = numeric_limits<long long>::max();
}
@@ -427,57 +429,57 @@ namespace mongo {
// we want the number of records to better report, in that case
bool isLargeChunk = false;
unsigned long long recCount = 0;;
- while ( cc->ok() ){
+ while ( cc->ok() ) {
DiskLoc dl = cc->currLoc();
if ( ! isLargeChunk ) {
- _cloneLocs.insert( dl );
+ _cloneLocs.insert( dl );
}
cc->advance();
- // we can afford to yield here because any change to the base data that we might miss is already being
+ // we can afford to yield here because any change to the base data that we might miss is already being
// queued and will be migrated in the 'transferMods' stage
if ( ! cc->yieldSometimes() ) {
- break;
+ break;
}
if ( ++recCount > maxRecsWhenFull ) {
isLargeChunk = true;
- }
+ }
}
if ( isLargeChunk ) {
- errmsg = str::stream() << "can't move chunk of size (aprox) " << recCount * avgRecSize
- << " because maximum size allowed to move is " << maxChunkSize;
+ errmsg = str::stream() << "can't move chunk of size (aprox) " << recCount * avgRecSize
+ << " because maximum size allowed to move is " << maxChunkSize;
log( LL_WARNING ) << errmsg << endl;
return false;
}
-
+
log() << "moveChunk number of documents: " << _cloneLocs.size() << endl;
return true;
}
- bool clone( string& errmsg , BSONObjBuilder& result ){
- if ( ! _getActive() ){
+ bool clone( string& errmsg , BSONObjBuilder& result ) {
+ if ( ! _getActive() ) {
errmsg = "not active";
return false;
}
- readlock l( _ns );
+ readlock l( _ns );
Client::Context ctx( _ns );
-
+
NamespaceDetails *d = nsdetails( _ns.c_str() );
assert( d );
BSONArrayBuilder a( std::min( BSONObjMaxUserSize , (int)( ( 12 + d->averageObjectSize() )* _cloneLocs.size() ) ) );
-
+
set<DiskLoc>::iterator i = _cloneLocs.begin();
- for ( ; i!=_cloneLocs.end(); ++i ){
+ for ( ; i!=_cloneLocs.end(); ++i ) {
DiskLoc dl = *i;
BSONObj o = dl.obj();
// use the builder size instead of accumulating 'o's size so that we take into consideration
// the overhead of BSONArray indices
- if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ){
+ if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
i--;
break;
}
@@ -489,18 +491,18 @@ namespace mongo {
return true;
}
- void aboutToDelete( const Database* db , const DiskLoc& dl ){
+ void aboutToDelete( const Database* db , const DiskLoc& dl ) {
dbMutex.assertWriteLocked();
if ( ! _getActive() )
return;
-
+
if ( ! db->ownsNS( _ns ) )
return;
_cloneLocs.erase( dl );
}
-
+
long long mbUsed() const { return _memoryUsed / ( 1024 * 1024 ); }
bool getInCriticalSection() const { scoped_lock l(_m); return _inCriticalSection; }
@@ -521,7 +523,7 @@ namespace mongo {
// updates applied by 1 thread in a write lock
set<DiskLoc> _cloneLocs;
- list<BSONObj> _reload; // objects that were modified that must be recloned
+ list<BSONObj> _reload; // objects that were modified that must be recloned
list<BSONObj> _deleted; // objects deleted during clone that should be deleted later
long long _memoryUsed; // bytes in _reload + _deleted
@@ -529,39 +531,39 @@ namespace mongo {
void _setActive( bool b ) { scoped_lock l(_m); _active = b; }
} migrateFromStatus;
-
+
struct MigrateStatusHolder {
- MigrateStatusHolder( string ns , const BSONObj& min , const BSONObj& max ){
+ MigrateStatusHolder( string ns , const BSONObj& min , const BSONObj& max ) {
migrateFromStatus.start( ns , min , max );
}
- ~MigrateStatusHolder(){
+ ~MigrateStatusHolder() {
migrateFromStatus.done();
}
};
- void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ){
+ void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
migrateFromStatus.logOp( opstr , ns , obj , patt );
}
- void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ){
+ void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ) {
migrateFromStatus.aboutToDelete( db , dl );
}
- class TransferModsCommand : public ChunkCommandHelper{
+ class TransferModsCommand : public ChunkCommandHelper {
public:
- TransferModsCommand() : ChunkCommandHelper( "_transferMods" ){}
+ TransferModsCommand() : ChunkCommandHelper( "_transferMods" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.transferMods( errmsg, result );
}
} transferModsCommand;
- class InitialCloneCommand : public ChunkCommandHelper{
+ class InitialCloneCommand : public ChunkCommandHelper {
public:
- InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ){}
+ InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
return migrateFromStatus.clone( errmsg, result );
}
} initialCloneCommand;
@@ -575,17 +577,17 @@ namespace mongo {
*/
class MoveChunkCommand : public Command {
public:
- MoveChunkCommand() : Command( "moveChunk" ){}
+ MoveChunkCommand() : Command( "moveChunk" ) {}
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
- virtual LockType locktype() const { return NONE; }
-
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual LockType locktype() const { return NONE; }
+
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
// 1. parse options
// 2. make sure my view is complete and lock
// 3. start migrate
@@ -599,9 +601,9 @@ namespace mongo {
// d) logChange to config server
// 6. wait for all current cursors to expire
// 7. remove data locally
-
+
// -------------------------------
-
+
// 1.
string ns = cmdObj.firstElement().str();
string to = cmdObj["to"].str();
@@ -610,44 +612,44 @@ namespace mongo {
BSONObj max = cmdObj["max"].Obj();
BSONElement shardId = cmdObj["shardId"];
BSONElement maxSizeElem = cmdObj["maxChunkSizeBytes"];
-
- if ( ns.empty() ){
+
+ if ( ns.empty() ) {
errmsg = "need to specify namespace in command";
return false;
}
-
- if ( to.empty() ){
+
+ if ( to.empty() ) {
errmsg = "need to specify server to move chunk to";
return false;
}
- if ( from.empty() ){
+ if ( from.empty() ) {
errmsg = "need to specify server to move chunk from";
return false;
}
-
- if ( min.isEmpty() ){
+
+ if ( min.isEmpty() ) {
errmsg = "need to specify a min";
return false;
}
- if ( max.isEmpty() ){
+ if ( max.isEmpty() ) {
errmsg = "need to specify a max";
return false;
}
-
- if ( shardId.eoo() ){
+
+ if ( shardId.eoo() ) {
errmsg = "need shardId";
return false;
}
- if ( maxSizeElem.eoo() || ! maxSizeElem.isNumber() ){
+ if ( maxSizeElem.eoo() || ! maxSizeElem.isNumber() ) {
errmsg = "need to specify maxChunkSizeBytes";
return false;
- }
+ }
const long long maxChunkSize = maxSizeElem.numberLong(); // in bytes
-
- if ( ! shardingState.enabled() ){
- if ( cmdObj["configdb"].type() != String ){
+
+ if ( ! shardingState.enabled() ) {
+ if ( cmdObj["configdb"].type() != String ) {
errmsg = "sharding not enabled";
return false;
}
@@ -660,15 +662,15 @@ namespace mongo {
Shard fromShard( from );
Shard toShard( to );
-
+
log() << "received moveChunk request: " << cmdObj << endl;
timing.done(1);
- // 2.
+ // 2.
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns );
dist_lock_try dlk( &lockSetup , (string)"migrate-" + min.toString() );
- if ( ! dlk.got() ){
+ if ( ! dlk.got() ) {
errmsg = "the collection's metadata lock is taken";
result.append( "who" , dlk.other() );
return false;
@@ -681,7 +683,7 @@ namespace mongo {
string myOldShard;
{
ScopedDbConnection conn( shardingState.getConfigServer() );
-
+
BSONObj x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
maxVersion = x["lastmod"];
@@ -691,7 +693,7 @@ namespace mongo {
assert( currChunk["max"].type() );
myOldShard = currChunk["shard"].String();
conn.done();
-
+
BSONObj currMin = currChunk["min"].Obj();
BSONObj currMax = currChunk["max"].Obj();
if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
@@ -701,27 +703,27 @@ namespace mongo {
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
- log( LL_WARNING ) << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
+ log( LL_WARNING ) << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
<< " is now " << currMin << "->" << currMax << endl;
return false;
}
- if ( myOldShard != fromShard.getName() ){
+ if ( myOldShard != fromShard.getName() ) {
errmsg = "location is outdated (likely balance or migrate occurred)";
result.append( "from" , fromShard.getName() );
result.append( "official" , myOldShard );
- log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
+ log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
<< " and not at " << fromShard.getName() << endl;
return false;
- }
+ }
- if ( maxVersion < shardingState.getVersion( ns ) ){
+ if ( maxVersion < shardingState.getVersion( ns ) ) {
errmsg = "official version less than mine?";
result.appendTimestamp( "officialVersion" , maxVersion );
result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
- log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": official " << maxVersion
+ log( LL_WARNING ) << "aborted moveChunk because " << errmsg << ": official " << maxVersion
<< " mine: " << shardingState.getVersion(ns) << endl;
return false;
}
@@ -733,29 +735,29 @@ namespace mongo {
log() << "moveChunk request accepted at version " << shardVersion << endl;
}
-
+
timing.done(2);
-
+
// 3.
MigrateStatusHolder statusHolder( ns , min , max );
- {
+ {
// this gets a read lock, so we know we have a checkpoint for mods
if ( ! migrateFromStatus.storeCurrentLocs( maxChunkSize , errmsg ) )
return false;
ScopedDbConnection connTo( to );
BSONObj res;
- bool ok = connTo->runCommand( "admin" ,
+ bool ok = connTo->runCommand( "admin" ,
BSON( "_recvChunkStart" << ns <<
"from" << from <<
"min" << min <<
"max" << max <<
"configServer" << configServer.modelServer()
- ) ,
+ ) ,
res );
connTo.done();
- if ( ! ok ){
+ if ( ! ok ) {
errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
assert( res["errmsg"].type() );
errmsg += res["errmsg"].String();
@@ -765,20 +767,20 @@ namespace mongo {
}
timing.done( 3 );
-
- // 4.
- for ( int i=0; i<86400; i++ ){ // don't want a single chunk move to take more than a day
+
+ // 4.
+ for ( int i=0; i<86400; i++ ) { // don't want a single chunk move to take more than a day
assert( dbMutex.getState() == 0 );
- sleepsecs( 1 );
+ sleepsecs( 1 );
ScopedDbConnection conn( to );
BSONObj res;
bool ok = conn->runCommand( "admin" , BSON( "_recvChunkStatus" << 1 ) , res );
res = res.getOwned();
conn.done();
-
+
log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << endl;
-
- if ( ! ok || res["state"].String() == "fail" ){
+
+ if ( ! ok || res["state"].String() == "fail" ) {
log( LL_WARNING ) << "moveChunk error transfering data caused migration abort: " << res << endl;
errmsg = "data transfer error";
result.append( "cause" , res );
@@ -788,7 +790,7 @@ namespace mongo {
if ( res["state"].String() == "steady" )
break;
- if ( migrateFromStatus.mbUsed() > (500 * 1024 * 1024) ){
+ if ( migrateFromStatus.mbUsed() > (500 * 1024 * 1024) ) {
// this is too much memory for us to use for this
// so we're going to abort the migrate
ScopedDbConnection conn( to );
@@ -807,7 +809,7 @@ namespace mongo {
timing.done(4);
// 5.
- {
+ {
// 5.a
// we're under the collection lock here, so no other migrate can change maxVersion or ShardChunkManager state
migrateFromStatus.setInCriticalSection( true );
@@ -824,20 +826,20 @@ namespace mongo {
shardingState.donateChunk( ns , min , max , myVersion );
}
- log() << "moveChunk setting version to: " << myVersion << endl;
-
+ log() << "moveChunk setting version to: " << myVersion << endl;
+
// 5.b
// we're under the collection lock here, too, so we can undo the chunk donation because no other state change
// could be ongoing
{
BSONObj res;
ScopedDbConnection connTo( to );
- bool ok = connTo->runCommand( "admin" ,
+ bool ok = connTo->runCommand( "admin" ,
BSON( "_recvChunkCommit" << 1 ) ,
res );
connTo.done();
- if ( ! ok ){
+ if ( ! ok ) {
{
writelock lk( ns );
@@ -845,7 +847,7 @@ namespace mongo {
shardingState.undoDonateChunk( ns , min , max , currVersion );
}
- log() << "movChunk migrate commit not accepted by TO-shard: " << res
+ log() << "movChunk migrate commit not accepted by TO-shard: " << res
<< " resetting shard version to: " << currVersion << endl;
errmsg = "_recvChunkCommit failed!";
@@ -870,7 +872,7 @@ namespace mongo {
BSONObjBuilder cmdBuilder;
- BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
+ BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
{
// update for the chunk being moved
BSONObjBuilder op;
@@ -886,7 +888,7 @@ namespace mongo {
n.append( "max" , max );
n.append( "shard" , toShard.getName() );
n.done();
-
+
BSONObjBuilder q( op.subobjStart( "o2" ) );
q.append( "_id" , Chunk::genID( ns , min ) );
q.done();
@@ -906,11 +908,12 @@ namespace mongo {
// get another chunk on that shard
BSONObj lookupKey;
BSONObj bumpMin, bumpMax;
- do {
+ do {
chunkManager->getNextChunk( lookupKey , &bumpMin , &bumpMax );
lookupKey = bumpMin;
- } while( bumpMin == min );
-
+ }
+ while( bumpMin == min );
+
BSONObjBuilder op;
op.append( "op" , "u" );
op.appendBool( "b" , false );
@@ -929,13 +932,14 @@ namespace mongo {
BSONObjBuilder q( op.subobjStart( "o2" ) );
q.append( "_id" , Chunk::genID( ns , bumpMin ) );
q.done();
-
+
updates.append( op.obj() );
log() << "moveChunk updating self version to: " << nextVersion << " through "
<< bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << endl;
- } else {
+ }
+ else {
log() << "moveChunk moved last chunk out for collection '" << ns << "'" << endl;
}
@@ -968,20 +972,20 @@ namespace mongo {
conn.done();
}
- if ( ! ok ){
+ if ( ! ok ) {
// this could be a blip in the connectivity
// wait out a few seconds and check if the commit request made it
- //
+ //
// if the commit made it to the config, we'll see the chunk in the new shard and there's no action
// if the commit did not make it, currently the only way to fix this state is to bounce the mongod so
// that the old state (before migrating) be brought in
log( LL_WARNING ) << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << endl;
- sleepsecs( 10 );
+ sleepsecs( 10 );
try {
- ScopedDbConnection conn( shardingState.getConfigServer() );
+ ScopedDbConnection conn( shardingState.getConfigServer() );
// look for the chunk in this shard whose version got bumped
// we assume that if that mod made it to the config, the applyOps was successful
@@ -991,7 +995,8 @@ namespace mongo {
if ( checkVersion == nextVersion ) {
log() << "moveChunk commit confirmed" << endl;
- } else {
+ }
+ else {
log( LL_ERROR ) << "moveChunk commit failed: version is at"
<< checkVersion << " instead of " << nextVersion << endl;
log( LL_ERROR ) << "TERMINATING" << endl;
@@ -1000,7 +1005,8 @@ namespace mongo {
conn.done();
- } catch ( ... ) {
+ }
+ catch ( ... ) {
log( LL_ERROR ) << "moveChunk failed to get confirmation of commit" << endl;
log( LL_ERROR ) << "TERMINATING" << endl;
dbexit( EXIT_SHARDING_ERROR );
@@ -1012,17 +1018,18 @@ namespace mongo {
// 5.d
configServer.logChange( "moveChunk.commit" , ns , chunkInfo );
}
-
+
migrateFromStatus.done();
timing.done(5);
-
- { // 6.
+
+ {
+ // 6.
OldDataCleanup c;
c.ns = ns;
c.min = min.getOwned();
c.max = max.getOwned();
ClientCursor::find( ns , c.initial );
- if ( c.initial.size() ){
+ if ( c.initial.size() ) {
log() << "forking for cleaning up chunk data" << endl;
boost::thread t( boost::bind( &cleanupOldData , c ) );
}
@@ -1031,24 +1038,24 @@ namespace mongo {
// 7.
c.doRemove();
}
-
-
+
+
}
- timing.done(6);
+ timing.done(6);
return true;
-
+
}
-
+
} moveChunkCmd;
- bool ShardingState::inCriticalMigrateSection(){
+ bool ShardingState::inCriticalMigrateSection() {
return migrateFromStatus.getInCriticalSection();
}
/* -----
below this are the "to" side commands
-
+
command to initiate
worker thread
does initial clone
@@ -1061,10 +1068,10 @@ namespace mongo {
class MigrateStatus {
public:
-
+
MigrateStatus() : m_active("MigrateStatus") { active = false; }
- void prepare(){
+ void prepare() {
scoped_lock l(m_active); // reading and writing 'active'
assert( ! active );
@@ -1079,54 +1086,56 @@ namespace mongo {
active = true;
}
- void go(){
+ void go() {
try {
_go();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
state = FAIL;
errmsg = e.what();
log( LL_ERROR ) << "migrate failed: " << e.what() << endl;
}
- catch ( ... ){
+ catch ( ... ) {
state = FAIL;
errmsg = "UNKNOWN ERROR";
log( LL_ERROR ) << "migrate failed with unknown exception" << endl;
}
setActive( false );
}
-
- void _go(){
+
+ void _go() {
assert( getActive() );
assert( state == READY );
assert( ! min.isEmpty() );
assert( ! max.isEmpty() );
-
+
MoveTimingHelper timing( "to" , ns , min , max , 5 /* steps */ );
-
+
ScopedDbConnection conn( from );
conn->getLastError(); // just test connection
- { // 1. copy indexes
+ {
+ // 1. copy indexes
auto_ptr<DBClientCursor> indexes = conn->getIndexes( ns );
vector<BSONObj> all;
- while ( indexes->more() ){
+ while ( indexes->more() ) {
all.push_back( indexes->next().getOwned() );
}
-
+
writelock lk( ns );
Client::Context ct( ns );
-
+
string system_indexes = cc().database()->name + ".system.indexes";
- for ( unsigned i=0; i<all.size(); i++ ){
+ for ( unsigned i=0; i<all.size(); i++ ) {
BSONObj idx = all[i];
theDataFileMgr.insert( system_indexes.c_str() , idx.objdata() , idx.objsize() );
}
-
+
timing.done(1);
}
-
- { // 2. delete any data already in range
+
+ {
+ // 2. delete any data already in range
writelock lk( ns );
RemoveSaver rs( "moveChunk" , ns , "preCleanup" );
long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
@@ -1135,14 +1144,15 @@ namespace mongo {
timing.done(2);
}
-
-
- { // 3. initial bulk clone
+
+
+ {
+ // 3. initial bulk clone
state = CLONE;
-
- while ( true ){
+
+ while ( true ) {
BSONObj res;
- if ( ! conn->runCommand( "admin" , BSON( "_migrateClone" << 1 ) , res ) ){
+ if ( ! conn->runCommand( "admin" , BSON( "_migrateClone" << 1 ) , res ) ) {
state = FAIL;
errmsg = "_migrateClone failed: ";
errmsg += res.toString();
@@ -1150,12 +1160,12 @@ namespace mongo {
conn.done();
return;
}
-
+
BSONObj arr = res["objects"].Obj();
int thisTime = 0;
BSONObjIterator i( arr );
- while( i.more() ){
+ while( i.more() ) {
BSONObj o = i.next().Obj();
{
writelock lk( ns );
@@ -1165,22 +1175,23 @@ namespace mongo {
numCloned++;
clonedBytes += o.objsize();
}
-
+
if ( thisTime == 0 )
break;
}
timing.done(3);
}
-
+
// if running on a replicated system, we'll need to flush the docs we cloned to the secondaries
ReplTime lastOpApplied;
- { // 4. do bulk of mods
+ {
+ // 4. do bulk of mods
state = CATCHUP;
- while ( true ){
+ while ( true ) {
BSONObj res;
- if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ){
+ if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) {
state = FAIL;
errmsg = "_transferMods failed: ";
errmsg += res.toString();
@@ -1190,10 +1201,10 @@ namespace mongo {
}
if ( res["size"].number() == 0 )
break;
-
+
apply( res , &lastOpApplied );
- if ( state == ABORT ){
+ if ( state == ABORT ) {
timing.note( "aborted" );
return;
}
@@ -1201,14 +1212,15 @@ namespace mongo {
timing.done(4);
}
-
- { // 5. wait for commit
+
+ {
+ // 5. wait for commit
Timer timeWaitingForCommit;
state = STEADY;
- while ( state == STEADY || state == COMMIT_START ){
+ while ( state == STEADY || state == COMMIT_START ) {
BSONObj res;
- if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ){
+ if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) {
log() << "_transferMods failed in STEADY state: " << res << endl;
errmsg = res.toString();
state = FAIL;
@@ -1218,19 +1230,19 @@ namespace mongo {
if ( res["size"].number() > 0 && apply( res , &lastOpApplied ) )
continue;
-
+
if ( state == COMMIT_START && flushPendingWrites( lastOpApplied ) )
break;
sleepmillis( 10 );
}
- if ( state == ABORT ){
+ if ( state == ABORT ) {
timing.note( "aborted" );
return;
}
-
- if ( timeWaitingForCommit.seconds() > 86400 ){
+
+ if ( timeWaitingForCommit.seconds() > 86400 ) {
state = FAIL;
errmsg = "timed out waiting for commit";
return;
@@ -1238,19 +1250,19 @@ namespace mongo {
timing.done(5);
}
-
+
state = DONE;
conn.done();
}
- void status( BSONObjBuilder& b ){
+ void status( BSONObjBuilder& b ) {
b.appendBool( "active" , getActive() );
b.append( "ns" , ns );
b.append( "from" , from );
b.append( "min" , min );
b.append( "max" , max );
-
+
b.append( "state" , stateString() );
if ( state == FAIL )
b.append( "errmsg" , errmsg );
@@ -1266,22 +1278,22 @@ namespace mongo {
}
- bool apply( const BSONObj& xfer , ReplTime* lastOpApplied ){
+ bool apply( const BSONObj& xfer , ReplTime* lastOpApplied ) {
ReplTime dummy;
if ( lastOpApplied == NULL ) {
lastOpApplied = &dummy;
}
bool didAnything = false;
-
- if ( xfer["deleted"].isABSONObj() ){
+
+ if ( xfer["deleted"].isABSONObj() ) {
writelock lk(ns);
Client::Context cx(ns);
-
+
RemoveSaver rs( "moveChunk" , ns , "removedDuring" );
BSONObjIterator i( xfer["deleted"].Obj() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONObj id = i.next().Obj();
// do not apply deletes if they do not belong to the chunk being migrated
@@ -1300,13 +1312,13 @@ namespace mongo {
didAnything = true;
}
}
-
- if ( xfer["reload"].isABSONObj() ){
+
+ if ( xfer["reload"].isABSONObj() ) {
writelock lk(ns);
Client::Context cx(ns);
BSONObjIterator i( xfer["reload"].Obj() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONObj it = i.next().Obj();
Helpers::upsert( ns , it );
@@ -1318,14 +1330,14 @@ namespace mongo {
return didAnything;
}
-
+
bool flushPendingWrites( const ReplTime& lastOpApplied ) {
// if replication is on, try to force enough secondaries to catch up
// TODO opReplicatedEnough should eventually honor priorities and geo-awareness
// for now, we try to replicate to a sensible number of secondaries
const int slaveCount = getSlaveCount() / 2 + 1;
if ( ! opReplicatedEnough( lastOpApplied , slaveCount ) ) {
- log( LL_WARNING ) << "migrate commit attempt timed out contacting " << slaveCount
+ log( LL_WARNING ) << "migrate commit attempt timed out contacting " << slaveCount
<< " slaves for '" << ns << "' " << min << " -> " << max << endl;
return false;
}
@@ -1339,8 +1351,8 @@ namespace mongo {
return true;
}
- string stateString(){
- switch ( state ){
+ string stateString() {
+ switch ( state ) {
case READY: return "ready";
case CLONE: return "clone";
case CATCHUP: return "catchup";
@@ -1354,12 +1366,12 @@ namespace mongo {
return "";
}
- bool startCommit(){
+ bool startCommit() {
if ( state != STEADY )
return false;
state = COMMIT_START;
-
- for ( int i=0; i<86400; i++ ){
+
+ for ( int i=0; i<86400; i++ ) {
sleepmillis(1);
if ( state == DONE )
return true;
@@ -1368,7 +1380,7 @@ namespace mongo {
return false;
}
- void abort(){
+ void abort() {
state = ABORT;
errmsg = "aborted";
}
@@ -1378,13 +1390,13 @@ namespace mongo {
mutable mongo::mutex m_active;
bool active;
-
+
string ns;
string from;
-
+
BSONObj min;
BSONObj max;
-
+
long long numCloned;
long long clonedBytes;
long long numCatchup;
@@ -1392,28 +1404,28 @@ namespace mongo {
enum State { READY , CLONE , CATCHUP , STEADY , COMMIT_START , DONE , FAIL , ABORT } state;
string errmsg;
-
+
} migrateStatus;
-
- void migrateThread(){
+
+ void migrateThread() {
Client::initThread( "migrateThread" );
migrateStatus.go();
cc().shutdown();
}
-
+
class RecvChunkStartCommand : public ChunkCommandHelper {
public:
- RecvChunkStartCommand() : ChunkCommandHelper( "_recvChunkStart" ){}
+ RecvChunkStartCommand() : ChunkCommandHelper( "_recvChunkStart" ) {}
virtual LockType locktype() const { return WRITE; } // this is so don't have to do locking internally
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
-
- if ( migrateStatus.getActive() ){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( migrateStatus.getActive() ) {
errmsg = "migrate already in progress";
return false;
}
-
+
if ( ! configServer.ok() )
configServer.init( cmdObj["configServer"].String() );
@@ -1423,9 +1435,9 @@ namespace mongo {
migrateStatus.from = cmdObj["from"].String();
migrateStatus.min = cmdObj["min"].Obj().getOwned();
migrateStatus.max = cmdObj["max"].Obj().getOwned();
-
+
boost::thread m( migrateThread );
-
+
result.appendBool( "started" , true );
return true;
}
@@ -1434,20 +1446,20 @@ namespace mongo {
class RecvChunkStatusCommand : public ChunkCommandHelper {
public:
- RecvChunkStatusCommand() : ChunkCommandHelper( "_recvChunkStatus" ){}
+ RecvChunkStatusCommand() : ChunkCommandHelper( "_recvChunkStatus" ) {}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
migrateStatus.status( result );
return 1;
}
-
+
} recvChunkStatusCommand;
class RecvChunkCommitCommand : public ChunkCommandHelper {
public:
- RecvChunkCommitCommand() : ChunkCommandHelper( "_recvChunkCommit" ){}
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ RecvChunkCommitCommand() : ChunkCommandHelper( "_recvChunkCommit" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
bool ok = migrateStatus.startCommit();
migrateStatus.status( result );
return ok;
@@ -1457,9 +1469,9 @@ namespace mongo {
class RecvChunkAbortCommand : public ChunkCommandHelper {
public:
- RecvChunkAbortCommand() : ChunkCommandHelper( "_recvChunkAbort" ){}
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ RecvChunkAbortCommand() : ChunkCommandHelper( "_recvChunkAbort" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
migrateStatus.abort();
migrateStatus.status( result );
return true;
@@ -1470,7 +1482,7 @@ namespace mongo {
class IsInRangeTest : public UnitTest {
public:
- void run(){
+ void run() {
BSONObj min = BSON( "x" << 1 );
BSONObj max = BSON( "x" << 5 );
diff --git a/s/d_split.cpp b/s/d_split.cpp
index 02224013bb3..490c469f3e3 100644
--- a/s/d_split.cpp
+++ b/s/d_split.cpp
@@ -50,19 +50,19 @@ namespace mongo {
public:
CmdMedianKey() : Command( "medianKey" ) {}
virtual bool slaveOk() const { return true; }
- virtual LockType locktype() const { return READ; }
+ virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
- help <<
- "Internal command.\n"
- "example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
- "NOTE: This command may take a while to run";
+ help <<
+ "Internal command.\n"
+ "example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
+ "NOTE: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
const char *ns = jsobj.getStringField( "medianKey" );
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
-
+
Client::Context ctx( ns );
IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
@@ -73,22 +73,22 @@ namespace mongo {
int num = 0;
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
-
+
// only yielding on firt half for now
// after this it should be in ram, so 2nd should be fast
{
shared_ptr<Cursor> c( new BtreeCursor( d, idxNo, *id, min, max, false, 1 ) );
scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
- while ( c->ok() ){
+ while ( c->ok() ) {
num++;
c->advance();
if ( ! cc->yieldSometimes() )
break;
}
}
-
+
num /= 2;
-
+
BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
for( ; num; c.advance(), --num );
@@ -106,15 +106,15 @@ namespace mongo {
int x = median.woCompare( min , BSONObj() , false );
int y = median.woCompare( max , BSONObj() , false );
- if ( x == 0 || y == 0 ){
+ if ( x == 0 || y == 0 ) {
// its on an edge, ok
}
- else if ( x < 0 && y < 0 ){
+ else if ( x < 0 && y < 0 ) {
log( LL_ERROR ) << "median error (1) min: " << min << " max: " << max << " median: " << median << endl;
errmsg = "median error 1";
return false;
}
- else if ( x > 0 && y > 0 ){
+ else if ( x > 0 && y > 0 ) {
log( LL_ERROR ) << "median error (2) min: " << min << " max: " << max << " median: " << median << endl;
errmsg = "median error 2";
return false;
@@ -124,25 +124,25 @@ namespace mongo {
}
} cmdMedianKey;
- class SplitVector : public Command {
- public:
- SplitVector() : Command( "splitVector" , false ){}
+ class SplitVector : public Command {
+ public:
+ SplitVector() : Command( "splitVector" , false ) {}
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help <<
- "Internal command.\n"
- "examples:\n"
- " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, maxChunkSize:200 }\n"
- " maxChunkSize unit in MBs\n"
- " May optionally specify 'maxSplitPoints' and 'maxChunkObjects' to avoid traversing the whole chunk\n"
- " \n"
- " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, force: true }\n"
- " 'force' will produce one split point even if data is small; defaults to false\n"
- "NOTE: This command may take a while to run";
+ "Internal command.\n"
+ "examples:\n"
+ " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, maxChunkSize:200 }\n"
+ " maxChunkSize unit in MBs\n"
+ " May optionally specify 'maxSplitPoints' and 'maxChunkObjects' to avoid traversing the whole chunk\n"
+ " \n"
+ " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, force: true }\n"
+ " 'force' will produce one split point even if data is small; defaults to false\n"
+ "NOTE: This command may take a while to run";
}
- bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
//
// 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
@@ -155,16 +155,17 @@ namespace mongo {
// If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
- if ( min.isEmpty() && max.isEmpty() ){
+ if ( min.isEmpty() && max.isEmpty() ) {
BSONObjBuilder minBuilder;
BSONObjBuilder maxBuilder;
- BSONForEach(key, keyPattern){
+ BSONForEach(key, keyPattern) {
minBuilder.appendMinKey( key.fieldName() );
maxBuilder.appendMaxKey( key.fieldName() );
}
min = minBuilder.obj();
max = maxBuilder.obj();
- } else if ( min.isEmpty() || max.isEmpty() ){
+ }
+ else if ( min.isEmpty() || max.isEmpty() ) {
errmsg = "either provide both min and max or leave both empty";
return false;
}
@@ -172,13 +173,13 @@ namespace mongo {
// Get the size estimate for this namespace
Client::Context ctx( ns );
NamespaceDetails *d = nsdetails( ns );
- if ( ! d ){
+ if ( ! d ) {
errmsg = "ns not found";
return false;
}
-
+
IndexDetails *idx = cmdIndexDetailsForRange( ns , errmsg , min , max , keyPattern );
- if ( idx == NULL ){
+ if ( idx == NULL ) {
errmsg = "couldn't find index over splitting key";
return false;
}
@@ -190,8 +191,8 @@ namespace mongo {
// 1.b Now that we have the size estimate, go over the remaining parameters and apply any maximum size
// restrictions specified there.
//
-
- // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current chunk, i.e., the
+
+ // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current chunk, i.e., the
// logic below will split that chunk in half
long long maxChunkSize = 0;
bool force = false;
@@ -203,17 +204,19 @@ namespace mongo {
force = true;
maxChunkSize = dataSize;
- } else if ( maxSizeElem.isNumber() ){
- maxChunkSize = maxSizeElem.numberLong() * 1<<20;
+ }
+ else if ( maxSizeElem.isNumber() ) {
+ maxChunkSize = maxSizeElem.numberLong() * 1<<20;
- } else {
+ }
+ else {
maxSizeElem = jsobj["maxChunkSizeBytes"];
- if ( maxSizeElem.isNumber() ){
+ if ( maxSizeElem.isNumber() ) {
maxChunkSize = maxSizeElem.numberLong();
}
}
-
- if ( maxChunkSize <= 0 ){
+
+ if ( maxChunkSize <= 0 ) {
errmsg = "need to specify the desired max chunk size (maxChunkSize or maxChunkSizeBytes)";
return false;
}
@@ -221,13 +224,13 @@ namespace mongo {
long long maxSplitPoints = 0;
BSONElement maxSplitPointsElem = jsobj[ "maxSplitPoints" ];
- if ( maxSplitPointsElem.isNumber() ){
+ if ( maxSplitPointsElem.isNumber() ) {
maxSplitPoints = maxSplitPointsElem.numberLong();
}
long long maxChunkObjects = 0;
BSONElement MaxChunkObjectsElem = jsobj[ "maxChunkObjects" ];
- if ( MaxChunkObjectsElem.isNumber() ){
+ if ( MaxChunkObjectsElem.isNumber() ) {
maxChunkObjects = MaxChunkObjectsElem.numberLong();
}
@@ -239,9 +242,9 @@ namespace mongo {
}
log() << "request split points lookup for chunk " << ns << " " << min << " -->> " << max << endl;
-
+
// We'll use the average object size and number of object to find approximately how many keys
- // each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, if
+ // each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, if
// provided.
const long long avgRecSize = dataSize / recCount;
long long keyCount = maxChunkSize / (2 * avgRecSize);
@@ -252,38 +255,39 @@ namespace mongo {
//
// 2. Traverse the index and add the keyCount-th key to the result vector. If that key
- // appeared in the vector before, we omit it. The invariant here is that all the
+ // appeared in the vector before, we omit it. The invariant here is that all the
// instances of a given key value live in the same chunk.
//
Timer timer;
long long currCount = 0;
long long numChunks = 0;
-
+
BtreeCursor * bc = new BtreeCursor( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
shared_ptr<Cursor> c( bc );
scoped_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
- if ( ! cc->ok() ){
+ if ( ! cc->ok() ) {
errmsg = "can't open a cursor for splitting (desired range is possibly empty)";
return false;
}
// Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel, to be removed
- // at the end. If a key appears more times than entries allowed on a chunk, we issue a warning and
+ // at the end. If a key appears more times than entries allowed on a chunk, we issue a warning and
// split on the following key.
vector<BSONObj> splitKeys;
set<BSONObj> tooFrequentKeys;
splitKeys.push_back( c->currKey() );
- while ( cc->ok() ){
+ while ( cc->ok() ) {
currCount++;
- if ( currCount > keyCount ){
+ if ( currCount > keyCount ) {
BSONObj currKey = c->currKey();
// Do not use this split key if it is the same used in the previous split point.
- if ( currKey.woCompare( splitKeys.back() ) == 0 ){
+ if ( currKey.woCompare( splitKeys.back() ) == 0 ) {
tooFrequentKeys.insert( currKey );
- } else {
+ }
+ else {
splitKeys.push_back( currKey );
currCount = 0;
numChunks++;
@@ -295,14 +299,14 @@ namespace mongo {
cc->advance();
// Stop if we have enough split points.
- if ( maxSplitPoints && ( numChunks >= maxSplitPoints ) ){
- log() << "max number of requested split points reached (" << numChunks
- << ") before the end of chunk " << ns << " " << min << " -->> " << max
- << endl;
+ if ( maxSplitPoints && ( numChunks >= maxSplitPoints ) ) {
+ log() << "max number of requested split points reached (" << numChunks
+ << ") before the end of chunk " << ns << " " << min << " -->> " << max
+ << endl;
break;
}
-
- if ( ! cc->yieldSometimes() ){
+
+ if ( ! cc->yieldSometimes() ) {
// we were near and and got pushed to the end
// i think returning the splits we've already found is fine
@@ -319,23 +323,23 @@ namespace mongo {
//
// Warn for keys that are more numerous than maxChunkSize allows.
- for ( set<BSONObj>::const_iterator it = tooFrequentKeys.begin(); it != tooFrequentKeys.end(); ++it ){
- log( LL_WARNING ) << "chunk is larger than " << maxChunkSize
+ for ( set<BSONObj>::const_iterator it = tooFrequentKeys.begin(); it != tooFrequentKeys.end(); ++it ) {
+ log( LL_WARNING ) << "chunk is larger than " << maxChunkSize
<< " bytes because of key " << bc->prettyKey( *it ) << endl;
}
// Remove the sentinel at the beginning before returning and add fieldnames.
splitKeys.erase( splitKeys.begin() );
- for ( vector<BSONObj>::iterator it = splitKeys.begin(); it != splitKeys.end() ; ++it ){
+ for ( vector<BSONObj>::iterator it = splitKeys.begin(); it != splitKeys.end() ; ++it ) {
*it = bc->prettyKey( *it );
}
ostringstream os;
- os << "Finding the split vector for " << ns << " over "<< keyPattern
+ os << "Finding the split vector for " << ns << " over "<< keyPattern
<< " keyCount: " << keyCount << " numSplits: " << splitKeys.size();
logIfSlow( timer , os.str() );
- // Warning: we are sending back an array of keys but are currently limited to
+ // Warning: we are sending back an array of keys but are currently limited to
// 4MB work of 'result' size. This should be okay for now.
result.append( "splitKeys" , splitKeys );
@@ -348,25 +352,25 @@ namespace mongo {
// ** temporary ** 2010-10-22
// chunkInfo is a helper to collect and log information about the chunks generated in splitChunk.
// It should hold the chunk state for this module only, while we don't have min/max key info per chunk on the
- // mongod side. Do not build on this; it will go away.
- struct ChunkInfo {
+ // mongod side. Do not build on this; it will go away.
+ struct ChunkInfo {
BSONObj min;
BSONObj max;
ShardChunkVersion lastmod;
- ChunkInfo() { }
+ ChunkInfo() { }
ChunkInfo( BSONObj aMin , BSONObj aMax , ShardChunkVersion aVersion ) : min(aMin) , max(aMax) , lastmod(aVersion) {}
void appendShortVersion( const char* name, BSONObjBuilder& b ) const;
string toString() const;
};
- void ChunkInfo::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
- BSONObjBuilder bb( b.subobjStart( name ) );
- bb.append( "min" , min );
+ void ChunkInfo::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ bb.append( "min" , min );
bb.append( "max" , max );
bb.appendTimestamp( "lastmod" , lastmod );
- bb.done();
- }
+ bb.done();
+ }
string ChunkInfo::toString() const {
ostringstream os;
@@ -377,56 +381,56 @@ namespace mongo {
class SplitChunkCommand : public Command {
public:
- SplitChunkCommand() : Command( "splitChunk" ){}
+ SplitChunkCommand() : Command( "splitChunk" ) {}
virtual void help( stringstream& help ) const {
- help <<
- "internal command usage only\n"
- "example:\n"
- " { splitChunk:\"db.foo\" , keyPattern: {a:1} , min : {a:100} , max: {a:200} { splitKeys : [ {a:150} , ... ]}";
+ help <<
+ "internal command usage only\n"
+ "example:\n"
+ " { splitChunk:\"db.foo\" , keyPattern: {a:1} , min : {a:100} , max: {a:200} { splitKeys : [ {a:150} , ... ]}";
}
virtual bool slaveOk() const { return false; }
virtual bool adminOnly() const { return true; }
virtual LockType locktype() const { return NONE; }
- bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
+ bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
//
// 1. check whether parameters passed to splitChunk are sound
//
const string ns = cmdObj.firstElement().str();
- if ( ns.empty() ){
+ if ( ns.empty() ) {
errmsg = "need to specify namespace in command";
return false;
}
BSONObj keyPattern = cmdObj["keyPattern"].Obj();
- if ( keyPattern.isEmpty() ){
+ if ( keyPattern.isEmpty() ) {
errmsg = "need to specify the key pattern the collection is sharded over";
return false;
}
BSONObj min = cmdObj["min"].Obj();
- if ( min.isEmpty() ){
+ if ( min.isEmpty() ) {
errmsg = "neet to specify the min key for the chunk";
return false;
}
BSONObj max = cmdObj["max"].Obj();
- if ( max.isEmpty() ){
+ if ( max.isEmpty() ) {
errmsg = "neet to specify the max key for the chunk";
return false;
}
string from = cmdObj["from"].str();
- if ( from.empty() ){
+ if ( from.empty() ) {
errmsg = "need specify server to split chunk at";
return false;
}
BSONObj splitKeysElem = cmdObj["splitKeys"].Obj();
- if ( splitKeysElem.isEmpty() ){
+ if ( splitKeysElem.isEmpty() ) {
errmsg = "need to provide the split points to chunk over";
return false;
}
@@ -443,10 +447,10 @@ namespace mongo {
}
// It is possible that this is the first sharded command this mongod is asked to perform. If so,
- // start sharding apparatus. We'd still be missing some more shard-related info but we'll get it
+ // start sharding apparatus. We'd still be missing some more shard-related info but we'll get it
// in step 2. below.
- if ( ! shardingState.enabled() ){
- if ( cmdObj["configdb"].type() != String ){
+ if ( ! shardingState.enabled() ) {
+ if ( cmdObj["configdb"].type() != String ) {
errmsg = "sharding not enabled";
return false;
}
@@ -465,7 +469,7 @@ namespace mongo {
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC) , ns );
dist_lock_try dlk( &lockSetup, string("split-") + min.toString() );
- if ( ! dlk.got() ){
+ if ( ! dlk.got() ) {
errmsg = "the collection's metadata lock is taken";
result.append( "who" , dlk.other() );
return false;
@@ -478,7 +482,7 @@ namespace mongo {
ChunkInfo origChunk;
{
ScopedDbConnection conn( shardingState.getConfigServer() );
-
+
BSONObj x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
maxVersion = x["lastmod"];
@@ -488,7 +492,7 @@ namespace mongo {
assert( currChunk["max"].type() );
shard = currChunk["shard"].String();
conn.done();
-
+
BSONObj currMin = currChunk["min"].Obj();
BSONObj currMax = currChunk["max"].Obj();
if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
@@ -498,7 +502,7 @@ namespace mongo {
result.append( "requestedMin" , min );
result.append( "requestedMax" , max );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
+ log( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
<< " is now " << currMin << "->" << currMax << endl;
return false;
}
@@ -513,12 +517,12 @@ namespace mongo {
return false;
}
- if ( maxVersion < shardingState.getVersion( ns ) ){
+ if ( maxVersion < shardingState.getVersion( ns ) ) {
errmsg = "official version less than mine?";
result.appendTimestamp( "officialVersion" , maxVersion );
result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
- log( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
+ log( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
<< " mine: " << shardingState.getVersion(ns) << endl;
return false;
}
@@ -536,7 +540,7 @@ namespace mongo {
}
- //
+ //
// 3. create the batch of updates to metadata ( the new chunks ) to be applied via 'applyOps' command
//
@@ -552,13 +556,13 @@ namespace mongo {
BSONObjBuilder cmdBuilder;
BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
- for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it ){
+ for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it ) {
BSONObj endKey = *it;
// splits only update the 'minor' portion of version
myVersion.incMinor();
- // build an update operation against the chunks collection of the config database with
+ // build an update operation against the chunks collection of the config database with
// upsert true
BSONObjBuilder op;
op.append( "op" , "u" );
@@ -586,10 +590,10 @@ namespace mongo {
newChunks.push_back( ChunkInfo( startKey , endKey, myVersion ) );
startKey = endKey;
- }
+ }
updates.done();
-
+
{
BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
BSONObjBuilder b;
@@ -604,7 +608,7 @@ namespace mongo {
preCond.done();
}
- //
+ //
// 4. apply the batch of updates to metadata and to the chunk manager
//
@@ -620,7 +624,7 @@ namespace mongo {
conn.done();
}
- if ( ! ok ){
+ if ( ! ok ) {
stringstream ss;
ss << "saving chunks failed. cmd: " << cmd << " result: " << cmdResult;
error() << ss.str() << endl;
@@ -630,7 +634,7 @@ namespace mongo {
// install a chunk manager with knowledge about newly split chunks in this shard's state
splitKeys.pop_back(); // 'max' was used as sentinel
maxVersion.incMinor();
- shardingState.splitChunk( ns , min , max , splitKeys , maxVersion );
+ shardingState.splitChunk( ns , min , max , splitKeys , maxVersion );
//
// 5. logChanges
@@ -642,12 +646,13 @@ namespace mongo {
newChunks[1].appendShortVersion( "right" , logDetail );
configServer.logChange( "split" , ns , logDetail.obj() );
- } else {
+ }
+ else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
const int newChunksSize = newChunks.size();
- for ( int i=0; i < newChunksSize; i++ ){
+ for ( int i=0; i < newChunksSize; i++ ) {
BSONObjBuilder chunkDetail;
chunkDetail.appendElements( beforeDetailObj );
chunkDetail.append( "number", i );
diff --git a/s/d_state.cpp b/s/d_state.cpp
index a8d6f9ed4c1..49ceb132c88 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -44,12 +44,12 @@ using namespace std;
namespace mongo {
// -----ShardingState START ----
-
+
ShardingState::ShardingState()
- : _enabled(false) , _mutex( "ShardingState" ){
+ : _enabled(false) , _mutex( "ShardingState" ) {
}
-
- void ShardingState::enable( const string& server ){
+
+ void ShardingState::enable( const string& server ) {
_enabled = true;
assert( server.size() );
if ( _configServer.size() == 0 )
@@ -58,56 +58,56 @@ namespace mongo {
assert( server == _configServer );
}
}
-
- void ShardingState::gotShardName( const string& name ){
- if ( _shardName.size() == 0 ){
+
+ void ShardingState::gotShardName( const string& name ) {
+ if ( _shardName.size() == 0 ) {
// TODO SERVER-2299 verify the name is sound w.r.t IPs
_shardName = name;
return;
}
-
+
if ( _shardName == name )
return;
stringstream ss;
- ss << "gotShardName different than what i had before "
- << " before [" << _shardName << "] "
- << " got [" << name << "] "
- ;
+ ss << "gotShardName different than what i had before "
+ << " before [" << _shardName << "] "
+ << " got [" << name << "] "
+ ;
uasserted( 13298 , ss.str() );
}
-
- void ShardingState::gotShardHost( string host ){
-
+
+ void ShardingState::gotShardHost( string host ) {
+
size_t slash = host.find( '/' );
if ( slash != string::npos )
host = host.substr( 0 , slash );
- if ( _shardHost.size() == 0 ){
+ if ( _shardHost.size() == 0 ) {
_shardHost = host;
return;
}
-
+
if ( _shardHost == host )
return;
stringstream ss;
- ss << "gotShardHost different than what i had before "
- << " before [" << _shardHost << "] "
- << " got [" << host << "] "
- ;
+ ss << "gotShardHost different than what i had before "
+ << " before [" << _shardHost << "] "
+ << " got [" << host << "] "
+ ;
uasserted( 13299 , ss.str() );
}
-
+
// TODO we shouldn't need three ways for checking the version. Fix this.
- bool ShardingState::hasVersion( const string& ns ){
+ bool ShardingState::hasVersion( const string& ns ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
return it != _chunks.end();
}
-
- bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ){
+
+ bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ) {
scoped_lock lk(_mutex);
ChunkManagersMap::const_iterator it = _chunks.find(ns);
@@ -118,7 +118,7 @@ namespace mongo {
version = p->getVersion();
return true;
}
-
+
const ConfigVersion ShardingState::getVersion( const string& ns ) const {
scoped_lock lk(_mutex);
@@ -126,11 +126,12 @@ namespace mongo {
if ( it != _chunks.end() ) {
ShardChunkManagerPtr p = it->second;
return p->getVersion();
- } else {
+ }
+ else {
return 0;
}
}
-
+
void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
scoped_lock lk( _mutex );
@@ -157,28 +158,28 @@ namespace mongo {
void ShardingState::splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
ShardChunkVersion version ) {
scoped_lock lk( _mutex );
-
+
ChunkManagersMap::const_iterator it = _chunks.find( ns );
assert( it != _chunks.end() ) ;
ShardChunkManagerPtr p( it->second->cloneSplit( min , max , splitKeys , version ) );
_chunks[ns] = p;
}
- void ShardingState::resetVersion( const string& ns ) {
+ void ShardingState::resetVersion( const string& ns ) {
scoped_lock lk( _mutex );
_chunks.erase( ns );
}
-
+
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {
// fast path - requested version is at the same version as this chunk manager
//
- // cases:
+ // cases:
// + this shard updated the version for a migrate's commit (FROM side)
// a client reloaded chunk state from config and picked the newest version
// + two clients reloaded
- // one triggered the 'slow path' (below)
+ // one triggered the 'slow path' (below)
// when the second's request gets here, the version is already current
{
scoped_lock lk( _mutex );
@@ -187,11 +188,11 @@ namespace mongo {
return true;
}
- // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
+ // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
// newest version in the config server
//
// cases:
- // + a chunk moved TO here
+ // + a chunk moved TO here
// (we don't bump up the version on the TO side but the commit to config does use higher version)
// a client reloads from config an issued the request
// + there was a take over from a secondary
@@ -216,7 +217,7 @@ namespace mongo {
}
}
- void ShardingState::appendInfo( BSONObjBuilder& b ){
+ void ShardingState::appendInfo( BSONObjBuilder& b ) {
b.appendBool( "enabled" , _enabled );
if ( ! _enabled )
return;
@@ -227,10 +228,10 @@ namespace mongo {
{
BSONObjBuilder bb( b.subobjStart( "versions" ) );
-
+
scoped_lock lk(_mutex);
- for ( ChunkManagersMap::iterator it = _chunks.begin(); it != _chunks.end(); ++it ){
+ for ( ChunkManagersMap::iterator it = _chunks.begin(); it != _chunks.end(); ++it ) {
ShardChunkManagerPtr p = it->second;
bb.appendTimestamp( it->first , p->getVersion() );
}
@@ -242,20 +243,21 @@ namespace mongo {
bool ShardingState::needShardChunkManager( const string& ns ) const {
if ( ! _enabled )
return false;
-
+
if ( ! ShardedConnectionInfo::get( false ) )
return false;
return true;
}
- ShardChunkManagerPtr ShardingState::getShardChunkManager( const string& ns ){
+ ShardChunkManagerPtr ShardingState::getShardChunkManager( const string& ns ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it == _chunks.end() ) {
return ShardChunkManagerPtr();
- } else {
+ }
+ else {
return it->second;
}
}
@@ -263,19 +265,19 @@ namespace mongo {
ShardingState shardingState;
// -----ShardingState END ----
-
+
// -----ShardedConnectionInfo START ----
boost::thread_specific_ptr<ShardedConnectionInfo> ShardedConnectionInfo::_tl;
- ShardedConnectionInfo::ShardedConnectionInfo(){
+ ShardedConnectionInfo::ShardedConnectionInfo() {
_forceVersionOk = false;
_id.clear();
}
-
- ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ){
+
+ ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ) {
ShardedConnectionInfo* info = _tl.get();
- if ( ! info && create ){
+ if ( ! info && create ) {
log(1) << "entering shard mode for connection" << endl;
info = new ShardedConnectionInfo();
_tl.reset( info );
@@ -283,7 +285,7 @@ namespace mongo {
return info;
}
- void ShardedConnectionInfo::reset(){
+ void ShardedConnectionInfo::reset() {
_tl.reset();
}
@@ -291,41 +293,42 @@ namespace mongo {
NSVersionMap::const_iterator it = _versions.find( ns );
if ( it != _versions.end() ) {
return it->second;
- } else {
+ }
+ else {
return 0;
}
}
-
- void ShardedConnectionInfo::setVersion( const string& ns , const ConfigVersion& version ){
+
+ void ShardedConnectionInfo::setVersion( const string& ns , const ConfigVersion& version ) {
_versions[ns] = version;
}
- void ShardedConnectionInfo::setID( const OID& id ){
+ void ShardedConnectionInfo::setID( const OID& id ) {
_id = id;
}
// -----ShardedConnectionInfo END ----
- unsigned long long extractVersion( BSONElement e , string& errmsg ){
- if ( e.eoo() ){
+ unsigned long long extractVersion( BSONElement e , string& errmsg ) {
+ if ( e.eoo() ) {
errmsg = "no version";
return 0;
}
-
+
if ( e.isNumber() )
return (unsigned long long)e.number();
-
+
if ( e.type() == Date || e.type() == Timestamp )
return e._numberLong();
-
+
errmsg = "version is not a numeric type";
return 0;
}
class MongodShardCommand : public Command {
public:
- MongodShardCommand( const char * n ) : Command( n ){
+ MongodShardCommand( const char * n ) : Command( n ) {
}
virtual bool slaveOk() const {
return false;
@@ -334,12 +337,12 @@ namespace mongo {
return true;
}
};
-
-
- bool haveLocalShardingInfo( const string& ns ){
+
+
+ bool haveLocalShardingInfo( const string& ns ) {
if ( ! shardingState.enabled() )
return false;
-
+
if ( ! shardingState.hasVersion( ns ) )
return false;
@@ -348,32 +351,32 @@ namespace mongo {
class UnsetShardingCommand : public MongodShardCommand {
public:
- UnsetShardingCommand() : MongodShardCommand("unsetSharding"){}
+ UnsetShardingCommand() : MongodShardCommand("unsetSharding") {}
virtual void help( stringstream& help ) const {
help << " example: { unsetSharding : 1 } ";
}
-
- virtual LockType locktype() const { return NONE; }
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ShardedConnectionInfo::reset();
return true;
- }
-
+ }
+
} unsetShardingCommand;
-
+
class SetShardVersion : public MongodShardCommand {
public:
- SetShardVersion() : MongodShardCommand("setShardVersion"){}
+ SetShardVersion() : MongodShardCommand("setShardVersion") {}
virtual void help( stringstream& help ) const {
help << " example: { setShardVersion : 'alleyinsider.foo' , version : 1 , configdb : '' } ";
}
-
+
virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
-
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
lastError.disableForCommand();
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
@@ -381,20 +384,21 @@ namespace mongo {
bool authoritative = cmdObj.getBoolField( "authoritative" );
string configdb = cmdObj["configdb"].valuestrsafe();
- { // configdb checking
- if ( configdb.size() == 0 ){
+ {
+ // configdb checking
+ if ( configdb.size() == 0 ) {
errmsg = "no configdb";
return false;
}
-
- if ( shardingState.enabled() ){
- if ( configdb != shardingState.getConfigServer() ){
+
+ if ( shardingState.enabled() ) {
+ if ( configdb != shardingState.getConfigServer() ) {
errmsg = "specified a different configdb!";
return false;
}
}
else {
- if ( ! authoritative ){
+ if ( ! authoritative ) {
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
@@ -404,23 +408,24 @@ namespace mongo {
}
}
- if ( cmdObj["shard"].type() == String ){
+ if ( cmdObj["shard"].type() == String ) {
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
- { // setting up ids
- if ( cmdObj["serverID"].type() != jstOID ){
+ {
+ // setting up ids
+ if ( cmdObj["serverID"].type() != jstOID ) {
// TODO: fix this
//errmsg = "need serverID to be an OID";
//return 0;
}
else {
OID clientId = cmdObj["serverID"].__oid();
- if ( ! info->hasID() ){
+ if ( ! info->hasID() ) {
info->setID( clientId );
}
- else if ( clientId != info->getID() ){
+ else if ( clientId != info->getID() ) {
errmsg = "server id has changed!";
return 0;
}
@@ -429,32 +434,32 @@ namespace mongo {
unsigned long long version = extractVersion( cmdObj["version"] , errmsg );
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
return false;
}
-
+
string ns = cmdObj["setShardVersion"].valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "need to speciy fully namespace";
return false;
}
-
+
const ConfigVersion oldVersion = info->getVersion(ns);
const ConfigVersion globalVersion = shardingState.getVersion(ns);
-
- if ( oldVersion > 0 && globalVersion == 0 ){
+
+ if ( oldVersion > 0 && globalVersion == 0 ) {
// this had been reset
info->setVersion( ns , 0 );
}
- if ( version == 0 && globalVersion == 0 ){
+ if ( version == 0 && globalVersion == 0 ) {
// this connection is cleaning itself
info->setVersion( ns , 0 );
return true;
}
- if ( version == 0 && globalVersion > 0 ){
- if ( ! authoritative ){
+ if ( version == 0 && globalVersion > 0 ) {
+ if ( ! authoritative ) {
result.appendBool( "need_authoritative" , true );
result.append( "ns" , ns );
result.appendTimestamp( "globalVersion" , globalVersion );
@@ -471,7 +476,7 @@ namespace mongo {
return true;
}
- if ( version < oldVersion ){
+ if ( version < oldVersion ) {
errmsg = "you already have a newer version of collection '" + ns + "'";
result.append( "ns" , ns );
result.appendTimestamp( "oldVersion" , oldVersion );
@@ -479,9 +484,9 @@ namespace mongo {
result.appendTimestamp( "globalVersion" , globalVersion );
return false;
}
-
- if ( version < globalVersion ){
- while ( shardingState.inCriticalMigrateSection() ){
+
+ if ( version < globalVersion ) {
+ while ( shardingState.inCriticalMigrateSection() ) {
dbtemprelease r;
sleepmillis(2);
log() << "waiting till out of critical section" << endl;
@@ -492,8 +497,8 @@ namespace mongo {
result.appendTimestamp( "globalVersion" , globalVersion );
return false;
}
-
- if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ){
+
+ if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ) {
// need authoritative for first look
result.append( "ns" , ns );
result.appendBool( "need_authoritative" , true );
@@ -505,7 +510,7 @@ namespace mongo {
dbtemprelease unlock;
ShardChunkVersion currVersion = version;
- if ( ! shardingState.trySetVersion( ns , currVersion ) ){
+ if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'";
result.append( "ns" , ns );
result.appendTimestamp( "version" , version );
@@ -520,72 +525,72 @@ namespace mongo {
return true;
}
-
+
} setShardVersionCmd;
-
+
class GetShardVersion : public MongodShardCommand {
public:
- GetShardVersion() : MongodShardCommand("getShardVersion"){}
+ GetShardVersion() : MongodShardCommand("getShardVersion") {}
virtual void help( stringstream& help ) const {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
-
- virtual LockType locktype() const { return NONE; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj["getShardVersion"].valuestrsafe();
- if ( ns.size() == 0 ){
+ if ( ns.size() == 0 ) {
errmsg = "need to speciy fully namespace";
return false;
}
-
+
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns) );
-
+
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns) );
- else
+ else
result.appendTimestamp( "mine" , 0 );
-
+
return true;
}
-
+
} getShardVersion;
class ShardingStateCmd : public MongodShardCommand {
public:
- ShardingStateCmd() : MongodShardCommand( "shardingState" ){}
+ ShardingStateCmd() : MongodShardCommand( "shardingState" ) {}
virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
shardingState.appendInfo( result );
return true;
}
-
+
} shardingStateCmd;
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
- bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){
+ bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
if ( ! shardingState.enabled() )
return true;
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
- if ( ! info ){
+ if ( ! info ) {
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
-
- if ( info->inForceVersionOkMode() ){
+
+ if ( info->inForceVersionOkMode() ) {
return true;
}
@@ -593,33 +598,33 @@ namespace mongo {
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
- ConfigVersion version;
- if ( ! shardingState.hasVersion( ns , version ) ){
+ ConfigVersion version;
+ if ( ! shardingState.hasVersion( ns , version ) ) {
return true;
}
ConfigVersion clientVersion = info->getVersion(ns);
- if ( version == 0 && clientVersion > 0 ){
+ if ( version == 0 && clientVersion > 0 ) {
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
-
+
if ( clientVersion >= version )
return true;
-
- if ( clientVersion == 0 ){
+
+ if ( clientVersion == 0 ) {
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
return false;
}
- if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){
- // this means there was just a split
+ if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ) {
+ // this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept write
return true;
diff --git a/s/d_writeback.cpp b/s/d_writeback.cpp
index 46b7893cc3b..401e0aa1638 100644
--- a/s/d_writeback.cpp
+++ b/s/d_writeback.cpp
@@ -32,29 +32,29 @@ namespace mongo {
// TODO init at mongod startup
WriteBackManager writeBackManager;
- WriteBackManager::WriteBackManager() : _writebackQueueLock("sharding:writebackQueueLock"){
+ WriteBackManager::WriteBackManager() : _writebackQueueLock("sharding:writebackQueueLock") {
}
- WriteBackManager::~WriteBackManager(){
+ WriteBackManager::~WriteBackManager() {
}
- void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ){
+ void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) {
getWritebackQueue( remote )->push( o );
}
- BlockingQueue<BSONObj>* WriteBackManager::getWritebackQueue( const string& remote ){
+ BlockingQueue<BSONObj>* WriteBackManager::getWritebackQueue( const string& remote ) {
scoped_lock lk ( _writebackQueueLock );
BlockingQueue<BSONObj>*& q = _writebackQueues[remote];
if ( ! q )
q = new BlockingQueue<BSONObj>();
return q;
- }
+ }
- bool WriteBackManager::queuesEmpty() const{
+ bool WriteBackManager::queuesEmpty() const {
scoped_lock lk( _writebackQueueLock );
- for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ){
+ for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
const BlockingQueue<BSONObj>* queue = it->second;
- if (! queue->empty() ){
+ if (! queue->empty() ) {
return false;
}
}
@@ -66,25 +66,25 @@ namespace mongo {
// Note, this command will block until there is something to WriteBack
class WriteBackCommand : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
-
- WriteBackCommand() : Command( "writebacklisten" ){}
+
+ WriteBackCommand() : Command( "writebacklisten" ) {}
void help(stringstream& h) const { h<<"internal"; }
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
BSONElement e = cmdObj.firstElement();
- if ( e.type() != jstOID ){
+ if ( e.type() != jstOID ) {
errmsg = "need oid as first value";
return 0;
}
// get the command issuer's (a mongos) serverID
const OID id = e.__oid();
-
+
// the command issuer is blocked awaiting a response
// we want to do return at least at every 5 minutes so sockets don't timeout
BSONObj z;
@@ -95,25 +95,25 @@ namespace mongo {
else {
result.appendBool( "noop" , true );
}
-
+
return true;
}
} writeBackCommand;
class WriteBacksQueuedCommand : public Command {
public:
- virtual LockType locktype() const { return NONE; }
+ virtual LockType locktype() const { return NONE; }
virtual bool slaveOk() const { return true; }
virtual bool adminOnly() const { return true; }
-
- WriteBacksQueuedCommand() : Command( "writeBacksQueued" ){}
- void help(stringstream& help) const {
+ WriteBacksQueuedCommand() : Command( "writeBacksQueued" ) {}
+
+ void help(stringstream& help) const {
help << "Returns whether there are operations in the writeback queue at the time the command was called. "
- << "This is an internal comand";
+ << "This is an internal comand";
}
- bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
result.appendBool( "hasOpsQueued" , ! writeBackManager.queuesEmpty() );
return true;
}
diff --git a/s/d_writeback.h b/s/d_writeback.h
index 365a3e6aadc..32f5b1ca7da 100644
--- a/s/d_writeback.h
+++ b/s/d_writeback.h
@@ -31,7 +31,7 @@ namespace mongo {
*
* The class is thread safe.
*/
- class WriteBackManager{
+ class WriteBackManager {
public:
WriteBackManager();
~WriteBackManager();
diff --git a/s/grid.cpp b/s/grid.cpp
index 969f4448f85..5d337d38ce4 100644
--- a/s/grid.cpp
+++ b/s/grid.cpp
@@ -27,38 +27,39 @@
#include "shard.h"
namespace mongo {
-
- DBConfigPtr Grid::getDBConfig( string database , bool create , const string& shardNameHint ){
+
+ DBConfigPtr Grid::getDBConfig( string database , bool create , const string& shardNameHint ) {
{
string::size_type i = database.find( "." );
if ( i != string::npos )
database = database.substr( 0 , i );
}
-
+
if ( database == "config" )
return configServerPtr;
scoped_lock l( _lock );
DBConfigPtr& cc = _databases[database];
- if ( !cc ){
+ if ( !cc ) {
cc.reset(new DBConfig( database ));
- if ( ! cc->load() ){
- if ( create ){
+ if ( ! cc->load() ) {
+ if ( create ) {
// note here that cc->primary == 0.
log() << "couldn't find database [" << database << "] in config db" << endl;
-
- { // lets check case
+
+ {
+ // lets check case
ScopedDbConnection conn( configServer.modelServer() );
BSONObjBuilder b;
b.appendRegex( "_id" , (string)"^" + database + "$" , "i" );
BSONObj d = conn->findOne( ShardNS::database , b.obj() );
conn.done();
- if ( ! d.isEmpty() ){
+ if ( ! d.isEmpty() ) {
cc.reset();
stringstream ss;
- ss << "can't have 2 databases that just differ on case "
+ ss << "can't have 2 databases that just differ on case "
<< " have: " << d["_id"].String()
<< " want to add: " << database;
@@ -67,20 +68,22 @@ namespace mongo {
}
Shard primary;
- if ( database == "admin" ){
+ if ( database == "admin" ) {
primary = configServer.getPrimary();
- } else if ( shardNameHint.empty() ){
+ }
+ else if ( shardNameHint.empty() ) {
primary = Shard::pick();
- } else {
+ }
+ else {
// use the shard name if provided
Shard shard;
shard.reset( shardNameHint );
primary = shard;
}
- if ( primary.ok() ){
+ if ( primary.ok() ) {
cc->setPrimary( primary.getName() ); // saves 'cc' to configDB
log() << "\t put [" << database << "] on: " << primary << endl;
}
@@ -94,45 +97,45 @@ namespace mongo {
cc.reset();
}
}
-
+
}
-
+
return cc;
}
- void Grid::removeDB( string database ){
+ void Grid::removeDB( string database ) {
uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos );
scoped_lock l( _lock );
_databases.erase( database );
-
+
}
bool Grid::allowLocalHost() const {
return _allowLocalShard;
}
- void Grid::setAllowLocalHost( bool allow ){
+ void Grid::setAllowLocalHost( bool allow ) {
_allowLocalShard = allow;
}
- bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ){
+ bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) {
// name can be NULL, so privide a dummy one here to avoid testing it elsewhere
string nameInternal;
if ( ! name ) {
name = &nameInternal;
}
- // Check whether the host (or set) exists and run several sanity checks on this request.
+ // Check whether the host (or set) exists and run several sanity checks on this request.
// There are two set of sanity checks: making sure adding this particular shard is consistent
- // with the replica set state (if it exists) and making sure this shards databases can be
+ // with the replica set state (if it exists) and making sure this shards databases can be
// brought into the grid without conflict.
vector<string> dbNames;
try {
ScopedDbConnection newShardConn( servers );
newShardConn->getLastError();
-
- if ( newShardConn->type() == ConnectionString::SYNC ){
+
+ if ( newShardConn->type() == ConnectionString::SYNC ) {
newShardConn.done();
errMsg = "can't use sync cluster as a shard. for replica set, have to use <setname>/<server1>,<server2>,...";
return false;
@@ -147,10 +150,10 @@ namespace mongo {
newShardConn.done();
return false;
}
-
+
BSONObj resIsMaster;
ok = newShardConn->runCommand( "admin" , BSON( "isMaster" << 1 ) , resIsMaster );
- if ( !ok ){
+ if ( !ok ) {
ostringstream ss;
ss << "failed running isMaster: " << resIsMaster;
errMsg = ss.str();
@@ -161,7 +164,7 @@ namespace mongo {
// if the shard has only one host, make sure it is not part of a replica set
string setName = resIsMaster["setName"].str();
string commandSetName = servers.getSetName();
- if ( commandSetName.empty() && ! setName.empty() ){
+ if ( commandSetName.empty() && ! setName.empty() ) {
ostringstream ss;
ss << "host is part of set: " << setName << " use replica set url format <setname>/<server1>,<server2>,....";
errMsg = ss.str();
@@ -170,7 +173,7 @@ namespace mongo {
}
// if the shard is part of replica set, make sure it is the right one
- if ( ! commandSetName.empty() && ( commandSetName != setName ) ){
+ if ( ! commandSetName.empty() && ( commandSetName != setName ) ) {
ostringstream ss;
ss << "host is part of a different set: " << setName;
errMsg = ss.str();
@@ -178,34 +181,34 @@ namespace mongo {
return false;
}
- // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
+ // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
// the set. It is fine if not all members of the set are present in 'servers'.
bool foundAll = true;
string offendingHost;
- if ( ! commandSetName.empty() ){
+ if ( ! commandSetName.empty() ) {
set<string> hostSet;
BSONObjIterator iter( resIsMaster["hosts"].Obj() );
- while ( iter.more() ){
+ while ( iter.more() ) {
hostSet.insert( iter.next().String() ); // host:port
}
- if ( resIsMaster["passives"].isABSONObj() ){
+ if ( resIsMaster["passives"].isABSONObj() ) {
BSONObjIterator piter( resIsMaster["passives"].Obj() );
- while ( piter.more() ){
+ while ( piter.more() ) {
hostSet.insert( piter.next().String() ); // host:port
}
}
vector<HostAndPort> hosts = servers.getServers();
- for ( size_t i = 0 ; i < hosts.size() ; i++ ){
+ for ( size_t i = 0 ; i < hosts.size() ; i++ ) {
string host = hosts[i].toString(); // host:port
- if ( hostSet.find( host ) == hostSet.end() ){
+ if ( hostSet.find( host ) == hostSet.end() ) {
offendingHost = host;
foundAll = false;
break;
}
}
}
- if ( ! foundAll ){
+ if ( ! foundAll ) {
ostringstream ss;
ss << "host " << offendingHost << " does not belong to replica set as a non-passive member" << setName;;
errMsg = ss.str();
@@ -215,15 +218,15 @@ namespace mongo {
// shard name defaults to the name of the replica set
if ( name->empty() && ! setName.empty() )
- *name = setName;
+ *name = setName;
- // In order to be accepted as a new shard, that mongod must not have any database name that exists already
- // in any other shards. If that test passes, the new shard's databases are going to be entered as
+ // In order to be accepted as a new shard, that mongod must not have any database name that exists already
+ // in any other shards. If that test passes, the new shard's databases are going to be entered as
// non-sharded db's whose primary is the newly added shard.
BSONObj resListDB;
ok = newShardConn->runCommand( "admin" , BSON( "listDatabases" << 1 ) , resListDB );
- if ( !ok ){
+ if ( !ok ) {
ostringstream ss;
ss << "failed listing " << servers.toString() << "'s databases:" << resListDB;
errMsg = ss.str();
@@ -232,20 +235,21 @@ namespace mongo {
}
BSONObjIterator i( resListDB["databases"].Obj() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONObj dbEntry = i.next().Obj();
const string& dbName = dbEntry["name"].String();
- if ( _isSpecialLocalDB( dbName ) ){
+ if ( _isSpecialLocalDB( dbName ) ) {
// 'local', 'admin', and 'config' are system DBs and should be excluded here
continue;
- } else {
+ }
+ else {
dbNames.push_back( dbName );
}
}
newShardConn.done();
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
ostringstream ss;
ss << "couldn't connect to new shard ";
ss << e.what();
@@ -254,9 +258,9 @@ namespace mongo {
}
// check that none of the existing shard candidate's db's exist elsewhere
- for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ){
+ for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ) {
DBConfigPtr config = getDBConfig( *it , false );
- if ( config.get() != NULL ){
+ if ( config.get() != NULL ) {
ostringstream ss;
ss << "can't add shard " << servers.toString() << " because a local database '" << *it;
ss << "' exists in another " << config->getPrimary().toString();
@@ -266,26 +270,26 @@ namespace mongo {
}
// if a name for a shard wasn't provided, pick one.
- if ( name->empty() && ! _getNewShardName( name ) ){
+ if ( name->empty() && ! _getNewShardName( name ) ) {
errMsg = "error generating new shard name";
return false;
}
-
+
// build the ConfigDB shard document
BSONObjBuilder b;
b.append( "_id" , *name );
b.append( "host" , servers.toString() );
- if ( maxSize > 0 ){
+ if ( maxSize > 0 ) {
b.append( ShardFields::maxSize.name() , maxSize );
}
BSONObj shardDoc = b.obj();
{
ScopedDbConnection conn( configServer.getPrimary() );
-
+
// check whether the set of hosts (or single host) is not an already a known shard
BSONObj old = conn->findOne( ShardNS::shard , BSON( "host" << servers.toString() ) );
- if ( ! old.isEmpty() ){
+ if ( ! old.isEmpty() ) {
errMsg = "host already used";
conn.done();
return false;
@@ -295,7 +299,7 @@ namespace mongo {
conn->insert( ShardNS::shard , shardDoc );
errMsg = conn->getLastError();
- if ( ! errMsg.empty() ){
+ if ( ! errMsg.empty() ) {
log() << "error adding shard: " << shardDoc << " err: " << errMsg << endl;
conn.done();
return false;
@@ -307,37 +311,37 @@ namespace mongo {
Shard::reloadShardInfo();
// add all databases of the new shard
- for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ){
+ for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ) {
DBConfigPtr config = getDBConfig( *it , true , *name );
- if ( ! config ){
- log() << "adding shard " << servers << " even though could not add database " << *it << endl;
+ if ( ! config ) {
+ log() << "adding shard " << servers << " even though could not add database " << *it << endl;
}
}
return true;
}
-
- bool Grid::knowAboutShard( const string& name ) const{
+
+ bool Grid::knowAboutShard( const string& name ) const {
ShardConnection conn( configServer.getPrimary() , "" );
BSONObj shard = conn->findOne( ShardNS::shard , BSON( "host" << name ) );
conn.done();
return ! shard.isEmpty();
}
- bool Grid::_getNewShardName( string* name ) const{
+ bool Grid::_getNewShardName( string* name ) const {
DEV assert( name );
bool ok = false;
- int count = 0;
+ int count = 0;
ShardConnection conn( configServer.getPrimary() , "" );
- BSONObj o = conn->findOne( ShardNS::shard , Query( fromjson ( "{_id: /^shard/}" ) ).sort( BSON( "_id" << -1 ) ) );
+ BSONObj o = conn->findOne( ShardNS::shard , Query( fromjson ( "{_id: /^shard/}" ) ).sort( BSON( "_id" << -1 ) ) );
if ( ! o.isEmpty() ) {
string last = o["_id"].String();
istringstream is( last.substr( 5 ) );
is >> count;
count++;
- }
+ }
if (count < 9999) {
stringstream ss;
ss << "shard" << setfill('0') << setw(4) << count;
@@ -364,7 +368,7 @@ namespace mongo {
return true;
}
- bool Grid::_balancerStopped( const BSONObj& balancerDoc ) {
+ bool Grid::_balancerStopped( const BSONObj& balancerDoc ) {
// check the 'stopped' marker maker
// if present, it is a simple bool
BSONElement stoppedElem = balancerDoc["stopped"];
@@ -374,10 +378,10 @@ namespace mongo {
return false;
}
- bool Grid::_inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ) {
+ bool Grid::_inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ) {
// check the 'activeWindow' marker
// if present, it is an interval during the day when the balancer should be active
- // { start: "08:00" , stop: "19:30" }, strftime format is %H:%M
+ // { start: "08:00" , stop: "19:30" }, strftime format is %H:%M
BSONElement windowElem = balancerDoc["activeWindow"];
if ( windowElem.eoo() ) {
return true;
@@ -402,14 +406,15 @@ namespace mongo {
log(1) << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl;
return true;
}
-
+
// allow balancing if during the activeWindow
// note that a window may be open during the night
if ( stopTime > startTime ) {
if ( ( now >= startTime ) && ( now <= stopTime ) ) {
return true;
}
- } else if ( startTime > stopTime ) {
+ }
+ else if ( startTime > stopTime ) {
if ( ( now >=startTime ) || ( now <= stopTime ) ) {
return true;
}
@@ -420,7 +425,7 @@ namespace mongo {
unsigned long long Grid::getNextOpTime() const {
ScopedDbConnection conn( configServer.getPrimary() );
-
+
BSONObj result;
massert( 10421 , "getoptime failed" , conn->simpleCommand( "admin" , &result , "getoptime" ) );
conn.done();
@@ -428,7 +433,7 @@ namespace mongo {
return result["optime"]._numberLong();
}
- bool Grid::_isSpecialLocalDB( const string& dbName ){
+ bool Grid::_isSpecialLocalDB( const string& dbName ) {
return ( dbName == "local" ) || ( dbName == "admin" ) || ( dbName == "config" );
}
@@ -438,7 +443,7 @@ namespace mongo {
class BalancingWindowUnitTest : public UnitTest {
public:
- void run(){
+ void run() {
// T0 < T1 < now < T2 < T3 and Error
const string T0 = "9:00";
const string T1 = "11:00";
@@ -446,17 +451,17 @@ namespace mongo {
const string T2 = "17:00";
const string T3 = "21:30";
const string E = "28:35";
-
+
BSONObj w1 = BSON( "activeWindow" << BSON( "start" << T0 << "stop" << T1 ) ); // closed in the past
BSONObj w2 = BSON( "activeWindow" << BSON( "start" << T2 << "stop" << T3 ) ); // not opened until the future
BSONObj w3 = BSON( "activeWindow" << BSON( "start" << T1 << "stop" << T2 ) ); // open now
BSONObj w4 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << T2 ) ); // open since last day
-
+
assert( ! Grid::_inBalancingWindow( w1 , now ) );
assert( ! Grid::_inBalancingWindow( w2 , now ) );
assert( Grid::_inBalancingWindow( w3 , now ) );
assert( Grid::_inBalancingWindow( w4 , now ) );
-
+
// bad input should not stop the balancer
BSONObj w5; // empty window
@@ -464,7 +469,7 @@ namespace mongo {
BSONObj w7 = BSON( "activeWindow" << BSON( "stop" << 1 ) ); // missing start
BSONObj w8 = BSON( "wrongMarker" << 1 << "start" << 1 << "stop" << 1 ); // active window marker missing
BSONObj w9 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << E ) ); // garbage in window
-
+
assert( Grid::_inBalancingWindow( w5 , now ) );
assert( Grid::_inBalancingWindow( w6 , now ) );
assert( Grid::_inBalancingWindow( w7 , now ) );
@@ -475,4 +480,4 @@ namespace mongo {
}
} BalancingWindowObjTest;
-}
+}
diff --git a/s/grid.h b/s/grid.h
index fe8b04d3652..5692a82dab1 100644
--- a/s/grid.h
+++ b/s/grid.h
@@ -40,7 +40,7 @@ namespace mongo {
* will return an empty DBConfig if not in db already
*/
DBConfigPtr getDBConfig( string ns , bool create=true , const string& shardNameHint="" );
-
+
/**
* removes db entry.
* on next getDBConfig call will fetch from db
@@ -60,14 +60,14 @@ namespace mongo {
/**
*
* addShard will create a new shard in the grid. It expects a mongod process to be runing
- * on the provided address. Adding a shard that is a replica set is supported.
+ * on the provided address. Adding a shard that is a replica set is supported.
*
* @param name is an optional string with the name of the shard. if ommited, grid will
* generate one and update the parameter.
* @param servers is the connection string of the shard being added
* @param maxSize is the optional space quota in bytes. Zeros means there's no limitation to
* space usage
- * @param errMsg is the error description in case the operation failed.
+ * @param errMsg is the error description in case the operation failed.
* @return true if shard was successfully added.
*/
bool addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg );
@@ -76,7 +76,7 @@ namespace mongo {
* @return true if the config database knows about a host 'name'
*/
bool knowAboutShard( const string& name ) const;
-
+
/**
* @return true if the chunk balancing functionality is enabled
*/
@@ -88,7 +88,7 @@ namespace mongo {
/**
* @param balancerDoc bson that may contain a window of time for the balancer to work
- * format { ... , activeWindow: { start: "8:30" , stop: "19:00" } , ... }
+ * format { ... , activeWindow: { start: "8:30" , stop: "19:00" } , ... }
* @return true if there is no window of time specified for the balancer or it we're currently in it
*/
static bool _inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now );
@@ -101,7 +101,7 @@ namespace mongo {
/**
* @param name is the chose name for the shard. Parameter is mandatory.
* @return true if it managed to generate a shard name. May return false if (currently)
- * 10000 shard
+ * 10000 shard
*/
bool _getNewShardName( string* name ) const;
diff --git a/s/request.cpp b/s/request.cpp
index 3eebf24ccdd..d5aabd1c6d9 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -35,50 +35,50 @@
namespace mongo {
- Request::Request( Message& m, AbstractMessagingPort* p ) :
- _m(m) , _d( m ) , _p(p) , _didInit(false){
-
+ Request::Request( Message& m, AbstractMessagingPort* p ) :
+ _m(m) , _d( m ) , _p(p) , _didInit(false) {
+
assert( _d.getns() );
_id = _m.header()->id;
-
+
_clientId = p ? p->getClientId() : 0;
_clientInfo = ClientInfo::get( _clientId );
_clientInfo->newRequest( p );
-
+
}
-
- void Request::init(){
+
+ void Request::init() {
if ( _didInit )
return;
_didInit = true;
reset();
}
-
- void Request::reset( bool reload ){
- if ( _m.operation() == dbKillCursors ){
+
+ void Request::reset( bool reload ) {
+ if ( _m.operation() == dbKillCursors ) {
return;
}
-
+
_config = grid.getDBConfig( getns() );
if ( reload )
uassert( 10192 , "db config reload failed!" , _config->reload() );
- if ( _config->isSharded( getns() ) ){
+ if ( _config->isSharded( getns() ) ) {
_chunkManager = _config->getChunkManager( getns() , reload );
uassert( 10193 , (string)"no shard info for: " + getns() , _chunkManager );
}
else {
_chunkManager.reset();
- }
+ }
_m.header()->id = _id;
-
+
}
-
+
Shard Request::primaryShard() const {
assert( _didInit );
-
- if ( _chunkManager ){
+
+ if ( _chunkManager ) {
if ( _chunkManager->numChunks() > 1 )
throw UserException( 8060 , "can't call primaryShard on a sharded collection" );
return _chunkManager->findChunk( _chunkManager->getShardKey().globalMin() )->getShard();
@@ -87,26 +87,26 @@ namespace mongo {
uassert( 10194 , "can't call primaryShard on a sharded collection!" , s.ok() );
return s;
}
-
- void Request::process( int attempt ){
+
+ void Request::process( int attempt ) {
init();
int op = _m.operation();
assert( op > dbMsg );
-
- if ( op == dbKillCursors ){
+
+ if ( op == dbKillCursors ) {
cursorCache.gotKillCursors( _m );
return;
}
-
+
log(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl;
-
+
Strategy * s = SINGLE;
_counter = &opsNonSharded;
-
+
_d.markSet();
-
- if ( _chunkManager ){
+
+ if ( _chunkManager ) {
s = SHARDED;
_counter = &opsSharded;
}
@@ -117,7 +117,7 @@ namespace mongo {
try {
s->queryOp( *this );
}
- catch ( StaleConfigException& staleConfig ){
+ catch ( StaleConfigException& staleConfig ) {
log() << staleConfig.what() << " attempt: " << attempt << endl;
uassert( 10195 , "too many attempts to update config, failing" , attempt < 5 );
ShardConnection::checkMyConnectionVersions( getns() );
@@ -139,24 +139,24 @@ namespace mongo {
globalOpCounters.gotOp( op , iscmd );
_counter->gotOp( op , iscmd );
}
-
+
bool Request::isCommand() const {
int x = _d.getQueryNToReturn();
return ( x == 1 || x == -1 ) && strstr( getns() , ".$cmd" );
}
- void Request::gotInsert(){
+ void Request::gotInsert() {
globalOpCounters.gotInsert();
_counter->gotInsert();
}
- void Request::reply( Message & response , const string& fromServer ){
+ void Request::reply( Message & response , const string& fromServer ) {
assert( _didInit );
long long cursor =response.header()->getCursor();
- if ( cursor ){
+ if ( cursor ) {
cursorCache.storeRef( fromServer , cursor );
}
_p->reply( _m , response , _id );
}
-
+
} // namespace mongo
diff --git a/s/request.h b/s/request.h
index e02c12218ac..5b4c228588b 100644
--- a/s/request.h
+++ b/s/request.h
@@ -26,16 +26,16 @@
namespace mongo {
-
+
class OpCounters;
class ClientInfo;
-
+
class Request : boost::noncopyable {
public:
Request( Message& m, AbstractMessagingPort* p );
// ---- message info -----
-
+
const char * getns() const {
return _d.getns();
@@ -60,12 +60,12 @@ namespace mongo {
assert( _didInit );
return _config->isShardingEnabled();
}
-
+
ChunkManagerPtr getChunkManager() const {
assert( _didInit );
return _chunkManager;
}
-
+
int getClientId() const {
return _clientId;
}
@@ -74,14 +74,14 @@ namespace mongo {
}
// ---- remote location info -----
-
-
+
+
Shard primaryShard() const ;
-
+
// ---- low level access ----
void reply( Message & response , const string& fromServer );
-
+
Message& m() { return _m; }
DbMessage& d() { return _d; }
AbstractMessagingPort* p() const { return _p; }
@@ -93,16 +93,16 @@ namespace mongo {
void init();
void reset( bool reload=false );
-
+
private:
Message& _m;
DbMessage _d;
AbstractMessagingPort* _p;
-
+
MSGID _id;
DBConfigPtr _config;
ChunkManagerPtr _chunkManager;
-
+
int _clientId;
ClientInfo * _clientInfo;
diff --git a/s/s_only.cpp b/s/s_only.cpp
index 460135fb0e2..83bceace37c 100644
--- a/s/s_only.cpp
+++ b/s/s_only.cpp
@@ -31,17 +31,16 @@ namespace mongo {
boost::thread_specific_ptr<Client> currentClient;
- Client::Client(const char *desc , MessagingPort *p) :
- _context(0),
- _shutdown(false),
- _desc(desc),
- _god(0),
- _lastOp(0),
- _mp(p)
- {
+ Client::Client(const char *desc , MessagingPort *p) :
+ _context(0),
+ _shutdown(false),
+ _desc(desc),
+ _god(0),
+ _lastOp(0),
+ _mp(p) {
}
- Client::~Client(){}
- bool Client::shutdown(){ return true; }
+ Client::~Client() {}
+ bool Client::shutdown() { return true; }
Client& Client::initThread(const char *desc, MessagingPort *mp) {
setThreadName(desc);
@@ -60,24 +59,24 @@ namespace mongo {
}
bool execCommand( Command * c ,
- Client& client , int queryOptions ,
- const char *ns, BSONObj& cmdObj ,
- BSONObjBuilder& result,
- bool fromRepl ){
+ Client& client , int queryOptions ,
+ const char *ns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl ) {
assert(c);
-
+
string dbname = nsToDatabase( ns );
-
- if ( cmdObj["help"].trueValue() ){
+
+ if ( cmdObj["help"].trueValue() ) {
stringstream ss;
ss << "help for: " << c->name << " ";
c->help( ss );
result.append( "help" , ss.str() );
result.append( "lockType" , c->locktype() );
return true;
- }
+ }
- if ( c->adminOnly() ){
+ if ( c->adminOnly() ) {
if ( dbname != "admin" ) {
result.append( "errmsg" , "access denied- use admin db" );
log() << "command denied: " << cmdObj.toString() << endl;
diff --git a/s/server.cpp b/s/server.cpp
index 8775d5c4241..93d33165b31 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -38,26 +38,26 @@
#include "shard_version.h"
namespace mongo {
-
- CmdLine cmdLine;
+
+ CmdLine cmdLine;
Database *database = 0;
string mongosCommand;
bool dbexitCalled = false;
- bool inShutdown(){
+ bool inShutdown() {
return dbexitCalled;
}
-
+
string getDbContext() {
return "?";
}
- bool haveLocalShardingInfo( const string& ns ){
+ bool haveLocalShardingInfo( const string& ns ) {
assert( 0 );
return false;
}
-
- void usage( char * argv[] ){
+
+ void usage( char * argv[] ) {
out() << argv[0] << " usage:\n\n";
out() << " -v+ verbose 1: general 2: more 3: per request 4: more\n";
out() << " --port <portno>\n";
@@ -68,23 +68,23 @@ namespace mongo {
class ShardingConnectionHook : public DBConnectionHook {
public:
- virtual void onHandedOut( DBClientBase * conn ){
+ virtual void onHandedOut( DBClientBase * conn ) {
ClientInfo::get()->addShard( conn->getServerAddress() );
}
} shardingConnectionHook;
-
+
class ShardedMessageHandler : public MessageHandler {
public:
- virtual ~ShardedMessageHandler(){}
+ virtual ~ShardedMessageHandler() {}
- virtual void process( Message& m , AbstractMessagingPort* p ){
+ virtual void process( Message& m , AbstractMessagingPort* p ) {
assert( p );
Request r( m , p );
LastError * le = lastError.startRequest( m , r.getClientId() );
assert( le );
-
- if ( logLevel > 5 ){
+
+ if ( logLevel > 5 ) {
log(5) << "client id: " << hex << r.getClientId() << "\t" << r.getns() << "\t" << dec << r.op() << endl;
}
try {
@@ -92,43 +92,43 @@ namespace mongo {
setClientId( r.getClientId() );
r.process();
}
- catch ( AssertionException & e ){
+ catch ( AssertionException & e ) {
log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException in process: " << e.what() << endl;
le->raiseError( e.getCode() , e.what() );
-
+
m.header()->id = r.id();
-
- if ( r.expectResponse() ){
+
+ if ( r.expectResponse() ) {
BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
replyToQuery( ResultFlag_ErrSet, p , m , err );
}
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
log() << "DBException in process: " << e.what() << endl;
-
+
le->raiseError( e.getCode() , e.what() );
-
+
m.header()->id = r.id();
-
- if ( r.expectResponse() ){
+
+ if ( r.expectResponse() ) {
BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
replyToQuery( ResultFlag_ErrSet, p , m , err );
}
}
}
- virtual void disconnected( AbstractMessagingPort* p ){
+ virtual void disconnected( AbstractMessagingPort* p ) {
ClientInfo::disconnect( p->getClientId() );
lastError.disconnect( p->getClientId() );
}
};
- void sighandler(int sig){
+ void sighandler(int sig) {
dbexit(EXIT_CLEAN, (string("received signal ") + BSONObjBuilder::numStr(sig)).c_str());
}
-
- void setupSignals( bool inFork ){
+
+ void setupSignals( bool inFork ) {
signal(SIGTERM, sighandler);
signal(SIGINT, sighandler);
@@ -143,14 +143,14 @@ namespace mongo {
#endif
}
- void init(){
+ void init() {
serverID.init();
setupSIGTRAPforGDB();
setupCoreSignals();
setupSignals( false );
}
- void start( const MessageServer::Options& opts ){
+ void start( const MessageServer::Options& opts ) {
setThreadName( "mongosMain" );
installChunkShardVersioning();
balancer.go();
@@ -165,12 +165,12 @@ namespace mongo {
server->run();
}
- DBClientBase *createDirectClient(){
+ DBClientBase *createDirectClient() {
uassert( 10197 , "createDirectClient not implemented for sharding yet" , 0 );
return 0;
}
- void printShardingVersionInfo(){
+ void printShardingVersionInfo() {
log() << mongosCommand << " " << mongodVersion() << " starting (--help for usage)" << endl;
printGitVersion();
printSysInfo();
@@ -192,87 +192,87 @@ int main(int argc, char* argv[]) {
po::options_description sharding_options("Sharding options");
po::options_description hidden("Hidden options");
po::positional_options_description positional;
-
+
CmdLine::addGlobalOptions( options , hidden );
-
+
sharding_options.add_options()
- ( "configdb" , po::value<string>() , "1 or 3 comma separated config servers" )
- ( "test" , "just run unit tests" )
- ( "upgrade" , "upgrade meta data version" )
- ( "chunkSize" , po::value<int>(), "maximum amount of data per chunk" )
- ( "ipv6", "enable IPv6 support (disabled by default)" )
- ( "jsonp","allow JSONP access via http (has security implications)" )
- ;
+ ( "configdb" , po::value<string>() , "1 or 3 comma separated config servers" )
+ ( "test" , "just run unit tests" )
+ ( "upgrade" , "upgrade meta data version" )
+ ( "chunkSize" , po::value<int>(), "maximum amount of data per chunk" )
+ ( "ipv6", "enable IPv6 support (disabled by default)" )
+ ( "jsonp","allow JSONP access via http (has security implications)" )
+ ;
options.add(sharding_options);
// parse options
po::variables_map params;
if ( ! CmdLine::store( argc , argv , options , hidden , positional , params ) )
return 0;
-
- if ( params.count( "help" ) ){
+
+ if ( params.count( "help" ) ) {
cout << options << endl;
return 0;
}
- if ( params.count( "version" ) ){
+ if ( params.count( "version" ) ) {
printShardingVersionInfo();
return 0;
}
- if ( params.count( "chunkSize" ) ){
+ if ( params.count( "chunkSize" ) ) {
Chunk::MaxChunkSize = params["chunkSize"].as<int>() * 1024 * 1024;
}
- if ( params.count( "ipv6" ) ){
+ if ( params.count( "ipv6" ) ) {
enableIPv6();
}
- if ( params.count( "jsonp" ) ){
+ if ( params.count( "jsonp" ) ) {
cmdLine.jsonp = true;
}
- if ( params.count( "test" ) ){
+ if ( params.count( "test" ) ) {
logLevel = 5;
UnitTest::runTests();
cout << "tests passed" << endl;
return 0;
}
-
- if ( ! params.count( "configdb" ) ){
+
+ if ( ! params.count( "configdb" ) ) {
out() << "error: no args for --configdb" << endl;
return 4;
}
vector<string> configdbs;
splitStringDelim( params["configdb"].as<string>() , &configdbs , ',' );
- if ( configdbs.size() != 1 && configdbs.size() != 3 ){
+ if ( configdbs.size() != 1 && configdbs.size() != 3 ) {
out() << "need either 1 or 3 configdbs" << endl;
return 5;
}
// we either have a seeting were all process are in localhost or none is
- for ( vector<string>::const_iterator it = configdbs.begin() ; it != configdbs.end() ; ++it ){
+ for ( vector<string>::const_iterator it = configdbs.begin() ; it != configdbs.end() ; ++it ) {
try {
HostAndPort configAddr( *it ); // will throw if address format is invalid
- if ( it == configdbs.begin() ){
+ if ( it == configdbs.begin() ) {
grid.setAllowLocalHost( configAddr.isLocalHost() );
}
- if ( configAddr.isLocalHost() != grid.allowLocalHost() ){
+ if ( configAddr.isLocalHost() != grid.allowLocalHost() ) {
out() << "cannot mix localhost and ip addresses in configdbs" << endl;
return 10;
}
- }
+ }
catch ( DBException& e) {
out() << "configdb: " << e.what() << endl;
return 9;
}
}
-
+
pool.addHook( &shardingConnectionHook );
pool.setName( "mongos connectionpool" );
@@ -289,22 +289,22 @@ int main(int argc, char* argv[]) {
usage( argv );
return 1;
}
-
+
printShardingVersionInfo();
-
- if ( ! configServer.init( configdbs ) ){
+
+ if ( ! configServer.init( configdbs ) ) {
cout << "couldn't resolve config db address" << endl;
return 7;
}
-
- if ( ! configServer.ok( true ) ){
+
+ if ( ! configServer.ok( true ) ) {
cout << "configServer startup check failed" << endl;
return 8;
}
-
+
int configError = configServer.checkConfigVersion( params.count( "upgrade" ) );
- if ( configError ){
- if ( configError > 0 ){
+ if ( configError ) {
+ if ( configError > 0 ) {
cout << "upgrade success!" << endl;
}
else {
@@ -317,7 +317,7 @@ int main(int argc, char* argv[]) {
init();
boost::thread web( boost::bind(&webServerThread, new NoAdminAccess() /* takes ownership */) );
-
+
MessageServer::Options opts;
opts.port = cmdLine.port;
opts.ipList = cmdLine.bind_ip;
@@ -330,8 +330,8 @@ int main(int argc, char* argv[]) {
#undef exit
void mongo::dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
dbexitCalled = true;
- log() << "dbexit: " << why
- << " rc:" << rc
+ log() << "dbexit: " << why
+ << " rc:" << rc
<< " " << ( why ? why : "" )
<< endl;
::exit(rc);
diff --git a/s/server.h b/s/server.h
index c45d77d4f35..1a5c9ea54b0 100644
--- a/s/server.h
+++ b/s/server.h
@@ -21,9 +21,9 @@
#include "../db/jsobj.h"
namespace mongo {
-
+
extern OID serverID;
-
+
// from request.cpp
void processRequest(Message& m, MessagingPort& p);
}
diff --git a/s/shard.cpp b/s/shard.cpp
index 32246dd90de..fc67eeee5c6 100644
--- a/s/shard.cpp
+++ b/s/shard.cpp
@@ -23,53 +23,54 @@
#include <set>
namespace mongo {
-
+
class StaticShardInfo {
public:
StaticShardInfo() : _mutex("StaticShardInfo") { }
- void reload(){
+ void reload() {
list<BSONObj> all;
{
ScopedDbConnection conn( configServer.getPrimary() );
auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , Query() );
assert( c.get() );
- while ( c->more() ){
+ while ( c->more() ) {
all.push_back( c->next().getOwned() );
}
conn.done();
}
-
+
scoped_lock lk( _mutex );
-
+
// We use the _lookup table for all shards and for the primary config DB. The config DB info,
// however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave
// the config state intact. The rationale is that this way we could drop shards that
// were removed without reinitializing the config DB information.
map<string,Shard>::iterator i = _lookup.find( "config" );
- if ( i != _lookup.end() ){
+ if ( i != _lookup.end() ) {
Shard config = i->second;
_lookup.clear();
_lookup[ "config" ] = config;
- } else {
+ }
+ else {
_lookup.clear();
}
- for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); ++i ){
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); ++i ) {
BSONObj o = *i;
string name = o["_id"].String();
string host = o["host"].String();
long long maxSize = 0;
BSONElement maxSizeElem = o[ ShardFields::maxSize.name() ];
- if ( ! maxSizeElem.eoo() ){
+ if ( ! maxSizeElem.eoo() ) {
maxSize = maxSizeElem.numberLong();
}
bool isDraining = false;
BSONElement isDrainingElem = o[ ShardFields::draining.name() ];
- if ( ! isDrainingElem.eoo() ){
+ if ( ! isDrainingElem.eoo() ) {
isDraining = isDrainingElem.Bool();
}
@@ -85,14 +86,14 @@ namespace mongo {
}
}
-
- bool isMember( const string& addr ){
+
+ bool isMember( const string& addr ) {
scoped_lock lk( _mutex );
map<string,Shard>::iterator i = _lookup.find( addr );
return i != _lookup.end();
}
- const Shard& find( const string& ident ){
+ const Shard& find( const string& ident ) {
string mykey = ident;
{
@@ -109,17 +110,17 @@ namespace mongo {
if ( i != _lookup.end() )
return i->second;
}
-
+
// not in our maps, re-load all
reload();
scoped_lock lk( _mutex );
map<string,Shard>::iterator i = _lookup.find( mykey );
uassert( 13129 , (string)"can't find shard for: " + mykey , i != _lookup.end() );
- return i->second;
+ return i->second;
}
-
- void set( const string& name , const string& addr , bool setName = true , bool setAddr = true ){
+
+ void set( const string& name , const string& addr , bool setName = true , bool setAddr = true ) {
Shard s(name,addr);
scoped_lock lk( _mutex );
if ( setName )
@@ -127,23 +128,24 @@ namespace mongo {
if ( setAddr )
_lookup[addr] = s;
}
-
- void remove( const string& name ){
+
+ void remove( const string& name ) {
scoped_lock lk( _mutex );
- for ( map<string,Shard>::iterator i = _lookup.begin(); i!=_lookup.end(); ){
+ for ( map<string,Shard>::iterator i = _lookup.begin(); i!=_lookup.end(); ) {
Shard s = i->second;
- if ( s.getName() == name ){
+ if ( s.getName() == name ) {
_lookup.erase(i++);
- } else {
+ }
+ else {
++i;
}
}
}
- void getAllShards( vector<Shard>& all ){
+ void getAllShards( vector<Shard>& all ) {
scoped_lock lk( _mutex );
std::set<string> seen;
- for ( map<string,Shard>::iterator i = _lookup.begin(); i!=_lookup.end(); ++i ){
+ for ( map<string,Shard>::iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
Shard s = i->second;
if ( s.getName() == "config" )
continue;
@@ -156,17 +158,17 @@ namespace mongo {
private:
map<string,Shard> _lookup;
- mongo::mutex _mutex;
+ mongo::mutex _mutex;
} staticShardInfo;
-
- void Shard::setAddress( const string& addr , bool authoritative ){
+
+ void Shard::setAddress( const string& addr , bool authoritative ) {
assert( _name.size() );
_addr = addr;
if ( authoritative )
staticShardInfo.set( _name , _addr , true , false );
}
-
- void Shard::reset( const string& ident ){
+
+ void Shard::reset( const string& ident ) {
const Shard& s = staticShardInfo.find( ident );
uassert( 13128 , (string)"can't find shard for: " + ident , s.ok() );
_name = s._name;
@@ -174,28 +176,28 @@ namespace mongo {
_maxSize = s._maxSize;
_isDraining = s._isDraining;
}
-
- void Shard::getAllShards( vector<Shard>& all ){
+
+ void Shard::getAllShards( vector<Shard>& all ) {
staticShardInfo.getAllShards( all );
}
- bool Shard::isAShard( const string& ident ){
+ bool Shard::isAShard( const string& ident ) {
return staticShardInfo.isMember( ident );
}
- void Shard::printShardInfo( ostream& out ){
+ void Shard::printShardInfo( ostream& out ) {
vector<Shard> all;
getAllShards( all );
for ( unsigned i=0; i<all.size(); i++ )
out << all[i].toString() << "\n";
out.flush();
}
-
+
BSONObj Shard::runCommand( const string& db , const BSONObj& cmd ) const {
ScopedDbConnection conn( this );
BSONObj res;
bool ok = conn->runCommand( db , cmd , res );
- if ( ! ok ){
+ if ( ! ok ) {
stringstream ss;
ss << "runCommand (" << cmd << ") on shard (" << _name << ") failed : " << res;
throw UserException( 13136 , ss.str() );
@@ -204,41 +206,41 @@ namespace mongo {
conn.done();
return res;
}
-
+
ShardStatus Shard::getStatus() const {
return ShardStatus( *this , runCommand( "admin" , BSON( "serverStatus" << 1 ) ) );
}
-
- void Shard::reloadShardInfo(){
+
+ void Shard::reloadShardInfo() {
staticShardInfo.reload();
}
- bool Shard::isMember( const string& addr ){
+ bool Shard::isMember( const string& addr ) {
return staticShardInfo.isMember( addr );
}
-
- void Shard::removeShard( const string& name ){
+
+ void Shard::removeShard( const string& name ) {
staticShardInfo.remove( name );
}
- Shard Shard::pick( const Shard& current ){
+ Shard Shard::pick( const Shard& current ) {
vector<Shard> all;
staticShardInfo.getAllShards( all );
- if ( all.size() == 0 ){
+ if ( all.size() == 0 ) {
staticShardInfo.reload();
staticShardInfo.getAllShards( all );
if ( all.size() == 0 )
return EMPTY;
}
-
+
// if current shard was provided, pick a different shard only if it is a better choice
ShardStatus best = all[0].getStatus();
- if ( current != EMPTY ){
+ if ( current != EMPTY ) {
best = current.getStatus();
}
-
- for ( size_t i=0; i<all.size(); i++ ){
+
+ for ( size_t i=0; i<all.size(); i++ ) {
ShardStatus t = all[i].getStatus();
if ( t < best )
best = t;
diff --git a/s/shard.h b/s/shard.h
index a47552b0952..97f0339bb25 100644
--- a/s/shard.h
+++ b/s/shard.h
@@ -34,45 +34,45 @@ namespace mongo {
class Shard {
public:
Shard()
- : _name("") , _addr("") , _maxSize(0) , _isDraining( false ){
+ : _name("") , _addr("") , _maxSize(0) , _isDraining( false ) {
}
Shard( const string& name , const string& addr, long long maxSize = 0 , bool isDraining = false )
- : _name(name) , _addr( addr ) , _maxSize( maxSize ) , _isDraining( isDraining ){
+ : _name(name) , _addr( addr ) , _maxSize( maxSize ) , _isDraining( isDraining ) {
}
- Shard( const string& ident ){
+ Shard( const string& ident ) {
reset( ident );
}
Shard( const Shard& other )
- : _name( other._name ) , _addr( other._addr ) , _maxSize( other._maxSize ) , _isDraining( other._isDraining ){
+ : _name( other._name ) , _addr( other._addr ) , _maxSize( other._maxSize ) , _isDraining( other._isDraining ) {
}
Shard( const Shard* other )
- : _name( other->_name ) , _addr( other->_addr ), _maxSize( other->_maxSize ) , _isDraining( other->_isDraining ){
+ : _name( other->_name ) , _addr( other->_addr ), _maxSize( other->_maxSize ) , _isDraining( other->_isDraining ) {
}
-
- static Shard make( const string& ident ){
+
+ static Shard make( const string& ident ) {
Shard s;
s.reset( ident );
return s;
}
static bool isAShard( const string& ident );
-
+
/**
* @param ident either name or address
*/
void reset( const string& ident );
-
+
void setAddress( const string& addr , bool authoritative = false );
string getName() const {
assert( _name.size() );
return _name;
}
-
+
string getConnString() const {
assert( _addr.size() );
return _addr;
@@ -97,7 +97,7 @@ namespace mongo {
bool operator==( const Shard& s ) const {
bool n = _name == s._name;
bool a = _addr == s._addr;
-
+
assert( n == a ); // names and address are 1 to 1
return n;
}
@@ -112,7 +112,7 @@ namespace mongo {
bool operator==( const string& s ) const {
return _name == s || _addr == s;
}
-
+
bool operator!=( const string& s ) const {
return _name != s && _addr != s;
}
@@ -120,18 +120,18 @@ namespace mongo {
bool operator<(const Shard& o) const {
return _name < o._name;
}
-
+
bool ok() const {
return _addr.size() > 0 && _addr.size() > 0;
}
-
+
BSONObj runCommand( const string& db , const string& simple ) const {
return runCommand( db , BSON( simple << 1 ) );
}
BSONObj runCommand( const string& db , const BSONObj& cmd ) const ;
-
+
ShardStatus getStatus() const ;
-
+
static void getAllShards( vector<Shard>& all );
static void printShardInfo( ostream& out );
@@ -140,7 +140,7 @@ namespace mongo {
* @return the currently emptiest shard, if best then current, or EMPTY
*/
static Shard pick( const Shard& current = EMPTY );
-
+
static void reloadShardInfo();
static void removeShard( const string& name );
@@ -152,13 +152,13 @@ namespace mongo {
private:
string _name;
string _addr;
- long long _maxSize; // in MBytes, 0 is unlimited
+ long long _maxSize; // in MBytes, 0 is unlimited
bool _isDraining; // shard is currently being removed
};
class ShardStatus {
public:
-
+
ShardStatus( const Shard& shard , const BSONObj& obj );
friend ostream& operator << (ostream& out, const ShardStatus& s) {
@@ -168,14 +168,14 @@ namespace mongo {
string toString() const {
stringstream ss;
- ss << "shard: " << _shard << " mapped: " << _mapped << " writeLock: " << _writeLock;
+ ss << "shard: " << _shard << " mapped: " << _mapped << " writeLock: " << _writeLock;
return ss.str();
}
- bool operator<( const ShardStatus& other ) const{
+ bool operator<( const ShardStatus& other ) const {
return _mapped < other._mapped;
}
-
+
Shard shard() const {
return _shard;
}
@@ -206,19 +206,19 @@ namespace mongo {
void done();
void kill();
- DBClientBase& conn(){
+ DBClientBase& conn() {
_finishInit();
assert( _conn );
return *_conn;
}
-
- DBClientBase* operator->(){
+
+ DBClientBase* operator->() {
_finishInit();
assert( _conn );
return _conn;
}
- DBClientBase* get(){
+ DBClientBase* get() {
_finishInit();
assert( _conn );
return _conn;
@@ -235,7 +235,7 @@ namespace mongo {
static void sync();
- void donotCheckVersion(){
+ void donotCheckVersion() {
_setVersion = false;
_finishedInit = true;
}
@@ -247,11 +247,11 @@ namespace mongo {
/** checks all of my thread local connections for the version of this ns */
static void checkMyConnectionVersions( const string & ns );
-
+
private:
void _init();
void _finishInit();
-
+
bool _finishedInit;
string _addr;
diff --git a/s/shard_version.cpp b/s/shard_version.cpp
index a11f7faaeb1..0f3e80ff9cf 100644
--- a/s/shard_version.cpp
+++ b/s/shard_version.cpp
@@ -35,9 +35,9 @@ namespace mongo {
static void resetShardVersion( DBClientBase * conn );
void installChunkShardVersioning() {
- //
+ //
// Overriding no-op behavior in shardconnection.cpp
- //
+ //
// TODO: Better encapsulate this mechanism.
//
checkShardVersionCB = checkShardVersion;
@@ -45,24 +45,24 @@ namespace mongo {
}
struct ConnectionShardStatus {
-
+
typedef unsigned long long S;
- ConnectionShardStatus()
- : _mutex( "ConnectionShardStatus" ){
+ ConnectionShardStatus()
+ : _mutex( "ConnectionShardStatus" ) {
}
- S getSequence( DBClientBase * conn , const string& ns ){
+ S getSequence( DBClientBase * conn , const string& ns ) {
scoped_lock lk( _mutex );
return _map[conn][ns];
}
- void setSequence( DBClientBase * conn , const string& ns , const S& s ){
+ void setSequence( DBClientBase * conn , const string& ns , const S& s ) {
scoped_lock lk( _mutex );
_map[conn][ns] = s;
}
- void reset( DBClientBase * conn ){
+ void reset( DBClientBase * conn ) {
scoped_lock lk( _mutex );
_map.erase( conn );
}
@@ -75,27 +75,27 @@ namespace mongo {
} connectionShardStatus;
- void resetShardVersion( DBClientBase * conn ){
+ void resetShardVersion( DBClientBase * conn ) {
connectionShardStatus.reset( conn );
}
-
+
/**
* @return true if had to do something
*/
- bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ){
+ bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ) {
// TODO: cache, optimize, etc...
-
+
WriteBackListener::init( conn );
DBConfigPtr conf = grid.getDBConfig( ns );
if ( ! conf )
return false;
-
+
unsigned long long officialSequenceNumber = 0;
-
+
ChunkManagerPtr manager;
const bool isSharded = conf->isSharded( ns );
- if ( isSharded ){
+ if ( isSharded ) {
manager = conf->getChunkManager( ns , authoritative );
officialSequenceNumber = manager->getSequenceNumber();
}
@@ -103,40 +103,40 @@ namespace mongo {
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
// (ie, last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns);
- if ( sequenceNumber == officialSequenceNumber ){
+ if ( sequenceNumber == officialSequenceNumber ) {
return false;
}
ShardChunkVersion version = 0;
- if ( isSharded ){
+ if ( isSharded ) {
version = manager->getVersion( Shard::make( conn.getServerAddress() ) );
}
-
- log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
- << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
+
+ log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
+ << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
<< " version: " << version << " manager: " << manager.get()
<< endl;
-
+
BSONObj result;
- if ( setShardVersion( conn , ns , version , authoritative , result ) ){
+ if ( setShardVersion( conn , ns , version , authoritative , result ) ) {
// success!
log(1) << " setShardVersion success!" << endl;
connectionShardStatus.setSequence( &conn , ns , officialSequenceNumber );
return true;
}
-
+
log(1) << " setShardVersion failed!\n" << result << endl;
if ( result.getBoolField( "need_authoritative" ) )
massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
-
- if ( ! authoritative ){
+
+ if ( ! authoritative ) {
checkShardVersion( conn , ns , 1 , tryNumber + 1 );
return true;
}
-
- if ( tryNumber < 4 ){
+
+ if ( tryNumber < 4 ) {
log(1) << "going to retry checkShardVersion" << endl;
sleepmillis( 10 );
checkShardVersion( conn , ns , 1 , tryNumber + 1 );
@@ -147,5 +147,5 @@ namespace mongo {
massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 );
return true;
}
-
+
} // namespace mongo
diff --git a/s/shard_version.h b/s/shard_version.h
index 54df76d6773..023b7fc47a6 100644
--- a/s/shard_version.h
+++ b/s/shard_version.h
@@ -19,7 +19,7 @@
#pragma once
namespace mongo {
-
+
/*
* Install chunk shard vesion callbaks in shardconnection code. This activates
* the chunk shard version control that mongos needs.
diff --git a/s/shardconnection.cpp b/s/shardconnection.cpp
index 164c6923532..6b5bf552550 100644
--- a/s/shardconnection.cpp
+++ b/s/shardconnection.cpp
@@ -23,18 +23,18 @@
#include <set>
namespace mongo {
-
+
// The code in shardconnection may run not only in mongos context. When elsewhere, chunk shard versioning
// is disabled. To enable chunk shard versioning, provide the check/resetShardVerionCB's below
//
// TODO: better encapsulate this mechanism.
- bool defaultCheckShardVersion( DBClientBase & conn , const string& ns , bool authoritative , int tryNumber ){
+ bool defaultCheckShardVersion( DBClientBase & conn , const string& ns , bool authoritative , int tryNumber ) {
// no-op in mongod
return false;
}
-
- void defaultResetShardVersion( DBClientBase * conn ){
+
+ void defaultResetShardVersion( DBClientBase * conn ) {
// no-op in mongod
}
@@ -48,22 +48,22 @@ namespace mongo {
class ClientConnections : boost::noncopyable {
public:
struct Status : boost::noncopyable {
- Status() : created(0), avail(0){}
+ Status() : created(0), avail(0) {}
- long long created;
+ long long created;
DBClientBase* avail;
};
- ClientConnections(){}
-
- ~ClientConnections(){
- for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ){
+ ClientConnections() {}
+
+ ~ClientConnections() {
+ for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
string addr = i->first;
Status* ss = i->second;
assert( ss );
- if ( ss->avail ){
- /* if we're shutting down, don't want to initiate release mechanism as it is slow,
+ if ( ss->avail ) {
+ /* if we're shutting down, don't want to initiate release mechanism as it is slow,
and isn't needed since all connections will be closed anyway */
if ( inShutdown() )
delete ss->avail;
@@ -75,15 +75,15 @@ namespace mongo {
}
_hosts.clear();
}
-
- DBClientBase * get( const string& addr , const string& ns ){
+
+ DBClientBase * get( const string& addr , const string& ns ) {
_check( ns );
Status* &s = _hosts[addr];
if ( ! s )
s = new Status();
-
- if ( s->avail ){
+
+ if ( s->avail ) {
DBClientBase* c = s->avail;
s->avail = 0;
pool.onHandedOut( c );
@@ -93,23 +93,23 @@ namespace mongo {
s->created++;
return pool.get( addr );
}
-
- void done( const string& addr , DBClientBase* conn ){
+
+ void done( const string& addr , DBClientBase* conn ) {
Status* s = _hosts[addr];
assert( s );
- if ( s->avail ){
+ if ( s->avail ) {
release( addr , conn );
return;
}
s->avail = conn;
}
-
- void sync(){
- for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ){
+
+ void sync() {
+ for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
string addr = i->first;
Status* ss = i->second;
- if ( ss->avail ){
+ if ( ss->avail ) {
ss->avail->getLastError();
release( addr , ss->avail );
ss->avail = 0;
@@ -119,16 +119,16 @@ namespace mongo {
_hosts.clear();
}
- void checkVersions( const string& ns ){
+ void checkVersions( const string& ns ) {
vector<Shard> all;
Shard::getAllShards( all );
- for ( unsigned i=0; i<all.size(); i++ ){
+ for ( unsigned i=0; i<all.size(); i++ ) {
Status* &s = _hosts[all[i].getConnString()];
if ( ! s )
s = new Status();
}
- for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ){
+ for ( map<string,Status*>::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
if ( ! Shard::isAShard( i->first ) )
continue;
Status* ss = i->second;
@@ -139,12 +139,12 @@ namespace mongo {
}
}
- void release( const string& addr , DBClientBase * conn ){
+ void release( const string& addr , DBClientBase * conn ) {
resetShardVersionCB( conn );
BSONObj res;
-
+
try {
- if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) ){
+ if ( conn->simpleCommand( "admin" , &res , "unsetSharding" ) ) {
pool.release( addr , conn );
}
else {
@@ -152,13 +152,13 @@ namespace mongo {
delete conn;
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log(LL_ERROR) << "couldn't unset sharding : " << e.what() << endl;
delete conn;
}
}
-
- void _check( const string& ns ){
+
+ void _check( const string& ns ) {
if ( ns.size() == 0 || _seenNS.count( ns ) )
return;
_seenNS.insert( ns );
@@ -168,12 +168,12 @@ namespace mongo {
map<string,Status*> _hosts;
set<string> _seenNS;
// -----
-
+
static thread_specific_ptr<ClientConnections> _perThread;
- static ClientConnections* threadInstance(){
+ static ClientConnections* threadInstance() {
ClientConnections* cc = _perThread.get();
- if ( ! cc ){
+ if ( ! cc ) {
cc = new ClientConnections();
_perThread.reset( cc );
}
@@ -192,57 +192,57 @@ namespace mongo {
: _addr( s.getConnString() ) , _ns( ns ) {
_init();
}
-
+
ShardConnection::ShardConnection( const string& addr , const string& ns )
: _addr( addr ) , _ns( ns ) {
_init();
}
-
- void ShardConnection::_init(){
+
+ void ShardConnection::_init() {
assert( _addr.size() );
_conn = ClientConnections::threadInstance()->get( _addr , _ns );
_finishedInit = false;
}
- void ShardConnection::_finishInit(){
+ void ShardConnection::_finishInit() {
if ( _finishedInit )
return;
_finishedInit = true;
-
- if ( _ns.size() ){
+
+ if ( _ns.size() ) {
_setVersion = checkShardVersionCB( *_conn , _ns , false , 1 );
}
else {
_setVersion = false;
}
-
+
}
- void ShardConnection::done(){
- if ( _conn ){
+ void ShardConnection::done() {
+ if ( _conn ) {
ClientConnections::threadInstance()->done( _addr , _conn );
_conn = 0;
_finishedInit = true;
}
}
- void ShardConnection::kill(){
- if ( _conn ){
+ void ShardConnection::kill() {
+ if ( _conn ) {
delete _conn;
_conn = 0;
_finishedInit = true;
}
}
- void ShardConnection::sync(){
+ void ShardConnection::sync() {
ClientConnections::threadInstance()->sync();
}
- bool ShardConnection::runCommand( const string& db , const BSONObj& cmd , BSONObj& res ){
+ bool ShardConnection::runCommand( const string& db , const BSONObj& cmd , BSONObj& res ) {
assert( _conn );
bool ok = _conn->runCommand( db , cmd , res );
- if ( ! ok ){
- if ( res["code"].numberInt() == StaleConfigInContextCode ){
+ if ( ! ok ) {
+ if ( res["code"].numberInt() == StaleConfigInContextCode ) {
string big = res["errmsg"].String();
string ns,raw;
massert( 13409 , (string)"can't parse ns from: " + big , StaleConfigException::parse( big , ns , raw ) );
@@ -253,12 +253,12 @@ namespace mongo {
return ok;
}
- void ShardConnection::checkMyConnectionVersions( const string & ns ){
+ void ShardConnection::checkMyConnectionVersions( const string & ns ) {
ClientConnections::threadInstance()->checkVersions( ns );
}
ShardConnection::~ShardConnection() {
- if ( _conn ){
+ if ( _conn ) {
if ( ! _conn->isFailed() ) {
/* see done() comments above for why we log this line */
log() << "~ScopedDBConnection: _conn != null" << endl;
diff --git a/s/shardkey.cpp b/s/shardkey.cpp
index d2c71b1714c..84cdb4bb520 100644
--- a/s/shardkey.cpp
+++ b/s/shardkey.cpp
@@ -31,12 +31,12 @@ namespace mongo {
BSONObjBuilder max;
BSONObjIterator it(p);
- while (it.more()){
+ while (it.more()) {
BSONElement e (it.next());
min.appendMinKey(e.fieldName());
max.appendMaxKey(e.fieldName());
}
-
+
gMin = min.obj();
gMax = max.obj();
}
@@ -50,11 +50,11 @@ namespace mongo {
}
bool ShardKeyPattern::hasShardKey( const BSONObj& obj ) const {
- /* this is written s.t. if obj has lots of fields, if the shard key fields are early,
+ /* this is written s.t. if obj has lots of fields, if the shard key fields are early,
it is fast. so a bit more work to try to be semi-fast.
*/
- for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it){
+ for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it) {
if(obj.getFieldDotted(it->c_str()).eoo())
return false;
}
@@ -64,17 +64,17 @@ namespace mongo {
bool ShardKeyPattern::isPrefixOf( const BSONObj& otherPattern ) const {
BSONObjIterator a( pattern );
BSONObjIterator b( otherPattern );
-
- while ( a.more() && b.more() ){
+
+ while ( a.more() && b.more() ) {
BSONElement x = a.next();
BSONElement y = b.next();
if ( strcmp( x.fieldName() , y.fieldName() ) )
return false;
}
-
+
return ! a.more();
}
-
+
string ShardKeyPattern::toString() const {
return pattern.toString();
}
@@ -82,51 +82,54 @@ namespace mongo {
BSONObj ShardKeyPattern::moveToFront(const BSONObj& obj) const {
vector<const char*> keysToMove;
keysToMove.push_back("_id");
- BSONForEach(e, pattern){
+ BSONForEach(e, pattern) {
if (strchr(e.fieldName(), '.') == NULL)
keysToMove.push_back(e.fieldName());
}
- if (keysToMove.size() == 1){
+ if (keysToMove.size() == 1) {
return obj;
-
- } else {
+
+ }
+ else {
BufBuilder buf (obj.objsize());
buf.appendNum(obj.objsize());
vector<pair<const char*, size_t> > copies;
pair<const char*, size_t> toCopy ((const char*)NULL, 0); // C++ NULL isn't a pointer type yet
- BSONForEach(e, obj){
+ BSONForEach(e, obj) {
bool moveToFront = false;
- for (vector<const char*>::const_iterator it(keysToMove.begin()), end(keysToMove.end()); it!=end; ++it){
- if (strcmp(e.fieldName(), *it) == 0){
+ for (vector<const char*>::const_iterator it(keysToMove.begin()), end(keysToMove.end()); it!=end; ++it) {
+ if (strcmp(e.fieldName(), *it) == 0) {
moveToFront = true;
break;
}
}
- if (moveToFront){
+ if (moveToFront) {
buf.appendBuf(e.fieldName()-1, e.size());
- if (toCopy.first){
+ if (toCopy.first) {
copies.push_back(toCopy);
toCopy.first = NULL;
}
- } else {
- if (!toCopy.first){
+ }
+ else {
+ if (!toCopy.first) {
toCopy.first = e.fieldName()-1;
toCopy.second = e.size();
- } else {
+ }
+ else {
toCopy.second += e.size();
}
}
}
- for (vector<pair<const char*, size_t> >::const_iterator it(copies.begin()), end(copies.end()); it!=end; ++it){
+ for (vector<pair<const char*, size_t> >::const_iterator it(copies.begin()), end(copies.end()); it!=end; ++it) {
buf.appendBuf(it->first, it->second);
}
- if (toCopy.first){
+ if (toCopy.first) {
buf.appendBuf(toCopy.first, toCopy.second);
}
@@ -135,16 +138,16 @@ namespace mongo {
BSONObj out (buf.buf(), true);
buf.decouple();
return out;
- }
+ }
}
-
- /* things to test for compound :
+
+ /* things to test for compound :
\ middle (deprecating?)
*/
class ShardKeyUnitTest : public UnitTest {
public:
-
- void testIsPrefixOf(){
+
+ void testIsPrefixOf() {
{
ShardKeyPattern k( BSON( "x" << 1 ) );
assert( ! k.isPrefixOf( BSON( "a" << 1 ) ) );
@@ -152,7 +155,7 @@ namespace mongo {
assert( k.isPrefixOf( BSON( "x" << 1 << "a" << 1 ) ) );
assert( ! k.isPrefixOf( BSON( "a" << 1 << "x" << 1 ) ) );
}
- {
+ {
ShardKeyPattern k( BSON( "x" << 1 << "y" << 1 ) );
assert( ! k.isPrefixOf( BSON( "x" << 1 ) ) );
assert( ! k.isPrefixOf( BSON( "x" << 1 << "z" << 1 ) ) );
@@ -160,8 +163,8 @@ namespace mongo {
assert( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 << "z" << 1 ) ) );
}
}
-
- void hasshardkeytest() {
+
+ void hasshardkeytest() {
BSONObj x = fromjson("{ zid : \"abcdefg\", num: 1.0, name: \"eliot\" }");
ShardKeyPattern k( BSON( "num" << 1 ) );
assert( k.hasShardKey(x) );
@@ -177,14 +180,14 @@ namespace mongo {
}
- void extractkeytest() {
+ void extractkeytest() {
ShardKeyPattern k( fromjson("{a:1,'sub.b':-1,'sub.c':1}") );
BSONObj x = fromjson("{a:1,'sub.b':2,'sub.c':3}");
assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).woEqual(x) );
assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).woEqual(x) );
}
- void moveToFrontTest(){
+ void moveToFrontTest() {
ShardKeyPattern sk (BSON("a" << 1 << "b" << 1));
BSONObj ret;
@@ -199,46 +202,46 @@ namespace mongo {
assert(ret.woEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
}
-
- void moveToFrontBenchmark(int numFields){
- BSONObjBuilder bb;
- bb.append("_id", 1);
- for (int i=0; i < numFields; i++)
- bb.append(BSONObjBuilder::numStr(i), 1);
- bb.append("key", 1);
- BSONObj o = bb.obj();
-
- ShardKeyPattern sk (BSON("key" << 1));
-
- Timer t;
- const int iterations = 100*1000;
- for (int i=0; i< iterations; i++){
- sk.moveToFront(o);
- }
- const double secs = t.micros() / 1000000.0;
- const double ops_per_sec = iterations / secs;
+ void moveToFrontBenchmark(int numFields) {
+ BSONObjBuilder bb;
+ bb.append("_id", 1);
+ for (int i=0; i < numFields; i++)
+ bb.append(BSONObjBuilder::numStr(i), 1);
+ bb.append("key", 1);
+ BSONObj o = bb.obj();
+
+ ShardKeyPattern sk (BSON("key" << 1));
+
+ Timer t;
+ const int iterations = 100*1000;
+ for (int i=0; i< iterations; i++) {
+ sk.moveToFront(o);
+ }
+
+ const double secs = t.micros() / 1000000.0;
+ const double ops_per_sec = iterations / secs;
- cout << "moveToFront (" << numFields << " fields) secs: " << secs << " ops_per_sec: " << ops_per_sec << endl;
+ cout << "moveToFront (" << numFields << " fields) secs: " << secs << " ops_per_sec: " << ops_per_sec << endl;
}
- void run(){
+ void run() {
extractkeytest();
ShardKeyPattern k( BSON( "key" << 1 ) );
-
+
BSONObj min = k.globalMin();
// cout << min.jsonString(TenGen) << endl;
BSONObj max = k.globalMax();
-
+
BSONObj k1 = BSON( "key" << 5 );
assert( k.compare( min , max ) < 0 );
assert( k.compare( min , k1 ) < 0 );
assert( k.compare( max , min ) > 0 );
assert( k.compare( min , min ) == 0 );
-
+
hasshardkeytest();
assert( k.hasShardKey( k1 ) );
assert( ! k.hasShardKey( BSON( "key2" << 1 ) ) );
@@ -247,13 +250,13 @@ namespace mongo {
BSONObj b = BSON( "key" << 999 );
assert( k.compare(a,b) < 0 );
-
+
testIsPrefixOf();
// add middle multitype tests
-
+
moveToFrontTest();
- if (0){ // toggle to run benchmark
+ if (0) { // toggle to run benchmark
moveToFrontBenchmark(0);
moveToFrontBenchmark(10);
moveToFrontBenchmark(100);
@@ -262,5 +265,5 @@ namespace mongo {
log(1) << "shardKeyTest passed" << endl;
}
} shardKeyTest;
-
+
} // namespace mongo
diff --git a/s/shardkey.h b/s/shardkey.h
index f42b5411dfd..96301ffe093 100644
--- a/s/shardkey.h
+++ b/s/shardkey.h
@@ -21,7 +21,7 @@
#include "../client/dbclient.h"
namespace mongo {
-
+
class Chunk;
/* A ShardKeyPattern is a pattern indicating what data to extract from the object to make the shard key from.
@@ -30,10 +30,10 @@ namespace mongo {
class ShardKeyPattern {
public:
ShardKeyPattern( BSONObj p = BSONObj() );
-
+
/**
global min is the lowest possible value for this key
- e.g. { num : MinKey }
+ e.g. { num : MinKey }
*/
BSONObj globalMin() const { return gMin; }
@@ -42,15 +42,15 @@ namespace mongo {
*/
BSONObj globalMax() const { return gMax; }
- bool isGlobalMin( const BSONObj& k ) const{
+ bool isGlobalMin( const BSONObj& k ) const {
return k.woCompare( globalMin() ) == 0;
}
- bool isGlobalMax( const BSONObj& k ) const{
+ bool isGlobalMax( const BSONObj& k ) const {
return k.woCompare( globalMax() ) == 0;
}
-
- bool isGlobal( const BSONObj& k ) const{
+
+ bool isGlobal( const BSONObj& k ) const {
return isGlobalMin( k ) || isGlobalMax( k );
}
@@ -60,20 +60,20 @@ namespace mongo {
l > r positive
*/
int compare( const BSONObj& l , const BSONObj& r ) const;
-
+
/**
@return whether or not obj has all fields in this shard key pattern
- e.g.
- ShardKey({num:1}).hasShardKey({ name:"joe", num:3 }) is true
+ e.g.
+ ShardKey({num:1}).hasShardKey({ name:"joe", num:3 }) is true
*/
bool hasShardKey( const BSONObj& obj ) const;
-
+
BSONObj key() const { return pattern; }
string toString() const;
BSONObj extractKey(const BSONObj& from) const;
-
+
bool partOfShardKey(const char* key ) const {
return pattern.hasField(key);
}
@@ -91,7 +91,7 @@ namespace mongo {
* @return BSONObj with _id and shardkey at front. May return original object.
*/
BSONObj moveToFront(const BSONObj& obj) const;
-
+
private:
BSONObj pattern;
BSONObj gMin;
@@ -101,10 +101,10 @@ namespace mongo {
set<string> patternfields;
};
- inline BSONObj ShardKeyPattern::extractKey(const BSONObj& from) const {
+ inline BSONObj ShardKeyPattern::extractKey(const BSONObj& from) const {
BSONObj k = from.extractFields(pattern);
uassert(13334, "Shard Key must be less than 512 bytes", k.objsize() < 512);
return k;
}
-}
+}
diff --git a/s/stats.cpp b/s/stats.cpp
index bb7a9757135..460ada3ccd6 100644
--- a/s/stats.cpp
+++ b/s/stats.cpp
@@ -20,7 +20,7 @@
#include "stats.h"
namespace mongo {
-
+
OpCounters opsNonSharded;
OpCounters opsSharded;
diff --git a/s/stats.h b/s/stats.h
index cbabf25202b..a7cc784e981 100644
--- a/s/stats.h
+++ b/s/stats.h
@@ -22,7 +22,7 @@
#include "../db/stats/counters.h"
namespace mongo {
-
+
extern OpCounters opsNonSharded;
extern OpCounters opsSharded;
diff --git a/s/strategy.cpp b/s/strategy.cpp
index 8017a7f51f1..d562507bc65 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -32,42 +32,42 @@ namespace mongo {
// ----- Strategy ------
- void Strategy::doWrite( int op , Request& r , const Shard& shard , bool checkVersion ){
+ void Strategy::doWrite( int op , Request& r , const Shard& shard , bool checkVersion ) {
ShardConnection conn( shard , r.getns() );
if ( ! checkVersion )
conn.donotCheckVersion();
- else if ( conn.setVersion() ){
+ else if ( conn.setVersion() ) {
conn.done();
throw StaleConfigException( r.getns() , "doWRite" , true );
}
conn->say( r.m() );
conn.done();
}
-
- void Strategy::doQuery( Request& r , const Shard& shard ){
-
+
+ void Strategy::doQuery( Request& r , const Shard& shard ) {
+
ShardConnection dbcon( shard , r.getns() );
DBClientBase &c = dbcon.conn();
-
+
Message response;
bool ok = c.call( r.m(), response);
uassert( 10200 , "mongos: error calling db", ok );
-
+
{
QueryResult *qr = (QueryResult *) response.singleData();
- if ( qr->resultFlags() & ResultFlag_ShardConfigStale ){
- dbcon.done();
- throw StaleConfigException( r.getns() , "Strategy::doQuery" );
- }
+ if ( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
+ dbcon.done();
+ throw StaleConfigException( r.getns() , "Strategy::doQuery" );
+ }
}
-
+
r.reply( response , c.getServerAddress() );
dbcon.done();
}
-
- void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj ){
+
+ void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj ) {
ShardConnection dbcon( shard , ns );
- if ( dbcon.setVersion() ){
+ if ( dbcon.setVersion() ) {
dbcon.done();
throw StaleConfigException( ns , "for insert" );
}
diff --git a/s/strategy.h b/s/strategy.h
index e8ec0fe6cdb..10a5a3f8427 100644
--- a/s/strategy.h
+++ b/s/strategy.h
@@ -23,21 +23,21 @@
#include "request.h"
namespace mongo {
-
+
class Strategy {
public:
- Strategy(){}
+ Strategy() {}
virtual ~Strategy() {}
virtual void queryOp( Request& r ) = 0;
virtual void getMore( Request& r ) = 0;
virtual void writeOp( int op , Request& r ) = 0;
-
+
protected:
void doWrite( int op , Request& r , const Shard& shard , bool checkVersion = true );
void doQuery( Request& r , const Shard& shard );
-
+
void insert( const Shard& shard , const char * ns , const BSONObj& obj );
-
+
};
extern Strategy * SINGLE;
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 0257b320bae..7d2c09fd623 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -31,31 +31,31 @@ namespace mongo {
class ShardStrategy : public Strategy {
- virtual void queryOp( Request& r ){
+ virtual void queryOp( Request& r ) {
QueryMessage q( r.d() );
log(3) << "shard query: " << q.ns << " " << q.query << endl;
-
+
if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") )
throw UserException( 8010 , "something is wrong, shouldn't see a command here" );
ChunkManagerPtr info = r.getChunkManager();
assert( info );
-
+
Query query( q.query );
set<Shard> shards;
info->getShardsForQuery( shards , query.getFilter() );
-
+
set<ServerAndQuery> servers;
- for ( set<Shard>::iterator i = shards.begin(); i != shards.end(); i++ ){
- servers.insert( ServerAndQuery( i->getConnString() , BSONObj() ) );
+ for ( set<Shard>::iterator i = shards.begin(); i != shards.end(); i++ ) {
+ servers.insert( ServerAndQuery( i->getConnString() , BSONObj() ) );
}
-
- if ( logLevel > 4 ){
+
+ if ( logLevel > 4 ) {
StringBuilder ss;
ss << " shard query servers: " << servers.size() << '\n';
- for ( set<ServerAndQuery>::iterator i = servers.begin(); i!=servers.end(); i++ ){
+ for ( set<ServerAndQuery>::iterator i = servers.begin(); i!=servers.end(); i++ ) {
const ServerAndQuery& s = *i;
ss << " " << s.toString() << '\n';
}
@@ -63,10 +63,10 @@ namespace mongo {
}
ClusteredCursor * cursor = 0;
-
+
BSONObj sort = query.getSort();
-
- if ( sort.isEmpty() ){
+
+ if ( sort.isEmpty() ) {
cursor = new SerialServerClusteredCursor( servers , q );
}
else {
@@ -80,88 +80,89 @@ namespace mongo {
log(5) << " cursor type: " << cursor->type() << endl;
shardedCursorTypes.hit( cursor->type() );
-
- if ( query.isExplain() ){
+
+ if ( query.isExplain() ) {
BSONObj explain = cursor->explain();
replyToQuery( 0 , r.p() , r.m() , explain );
delete( cursor );
return;
}
- } catch(...) {
+ }
+ catch(...) {
delete cursor;
throw;
}
ShardedClientCursorPtr cc (new ShardedClientCursor( q , cursor ));
- if ( ! cc->sendNextBatch( r ) ){
+ if ( ! cc->sendNextBatch( r ) ) {
return;
}
log(6) << "storing cursor : " << cc->getId() << endl;
cursorCache.store( cc );
}
-
- virtual void getMore( Request& r ){
+
+ virtual void getMore( Request& r ) {
int ntoreturn = r.d().pullInt();
long long id = r.d().pullInt64();
log(6) << "want cursor : " << id << endl;
ShardedClientCursorPtr cursor = cursorCache.get( id );
- if ( ! cursor ){
+ if ( ! cursor ) {
log(6) << "\t invalid cursor :(" << endl;
replyToQuery( ResultFlag_CursorNotFound , r.p() , r.m() , 0 , 0 , 0 );
return;
}
-
- if ( cursor->sendNextBatch( r , ntoreturn ) ){
+
+ if ( cursor->sendNextBatch( r , ntoreturn ) ) {
// still more data
cursor->accessed();
return;
}
-
+
// we've exhausted the cursor
cursorCache.remove( id );
}
-
- void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
-
- while ( d.moreJSObjs() ){
+
+ void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
+
+ while ( d.moreJSObjs() ) {
BSONObj o = d.nextJsObj();
- if ( ! manager->hasShardKey( o ) ){
+ if ( ! manager->hasShardKey( o ) ) {
bool bad = true;
- if ( manager->getShardKey().partOfShardKey( "_id" ) ){
+ if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
BSONObjBuilder b;
b.appendOID( "_id" , 0 , true );
b.appendElements( o );
o = b.obj();
bad = ! manager->hasShardKey( o );
}
-
- if ( bad ){
+
+ if ( bad ) {
log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl;
throw UserException( 8011 , "tried to insert object without shard key" );
}
-
+
}
// Many operations benefit from having the shard key early in the object
o = manager->getShardKey().moveToFront(o);
bool gotThrough = false;
- for ( int i=0; i<10; i++ ){
+ for ( int i=0; i<10; i++ ) {
try {
ChunkPtr c = manager->findChunk( o );
log(4) << " server:" << c->getShard().toString() << " " << o << endl;
insert( c->getShard() , r.getns() , o );
-
+
r.gotInsert();
c->splitIfShould( o.objsize() );
gotThrough = true;
break;
}
- catch ( StaleConfigException& ){
+ catch ( StaleConfigException& ) {
log(1) << "retrying insert because of StaleConfigException: " << o << endl;
r.reset();
manager = r.getChunkManager();
@@ -171,40 +172,40 @@ namespace mongo {
assert( gotThrough );
- }
+ }
}
- void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
+ void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
int flags = d.pullInt();
-
+
BSONObj query = d.nextJsObj();
uassert( 13506 , "$atomic not supported sharded" , query["$atomic"].eoo() );
uassert( 10201 , "invalid update" , d.moreJSObjs() );
BSONObj toupdate = d.nextJsObj();
BSONObj chunkFinder = query;
-
+
bool upsert = flags & UpdateOption_Upsert;
bool multi = flags & UpdateOption_Multi;
uassert( 10202 , "can't mix multi and upsert and sharding" , ! ( upsert && multi ) );
- if (upsert){
+ if (upsert) {
uassert(8012, "can't upsert something without shard key",
- (manager->hasShardKey(toupdate) ||
- (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))));
+ (manager->hasShardKey(toupdate) ||
+ (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))));
BSONObj key = manager->getShardKey().extractKey(query);
- BSONForEach(e, key){
+ BSONForEach(e, key) {
uassert(13465, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality);
}
}
bool save = false;
- if ( ! manager->hasShardKey( query ) ){
- if ( multi ){
+ if ( ! manager->hasShardKey( query ) ) {
+ if ( multi ) {
}
- else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ){
+ else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ) {
throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" );
}
else {
@@ -213,56 +214,58 @@ namespace mongo {
}
}
-
- if ( ! save ){
- if ( toupdate.firstElement().fieldName()[0] == '$' ){
+
+ if ( ! save ) {
+ if ( toupdate.firstElement().fieldName()[0] == '$' ) {
BSONObjIterator ops(toupdate);
- while(ops.more()){
+ while(ops.more()) {
BSONElement op(ops.next());
if (op.type() != Object)
continue;
BSONObjIterator fields(op.embeddedObject());
- while(fields.more()){
+ while(fields.more()) {
const string field = fields.next().fieldName();
- uassert(13123,
- str::stream() << "Can't modify shard key's value field" << field
- << " for collection: " << manager->getns(),
+ uassert(13123,
+ str::stream() << "Can't modify shard key's value field" << field
+ << " for collection: " << manager->getns(),
! manager->getShardKey().partOfShardKey(field));
}
}
- } else if ( manager->hasShardKey( toupdate ) ){
- uassert( 8014,
+ }
+ else if ( manager->hasShardKey( toupdate ) ) {
+ uassert( 8014,
str::stream() << "cannot modify shard key for collection: " << manager->getns(),
manager->getShardKey().compare( query , toupdate ) == 0 );
- } else {
- uasserted(12376,
+ }
+ else {
+ uasserted(12376,
str::stream() << "shard key must be in update object for collection: " << manager->getns() );
}
}
-
- if ( multi ){
+
+ if ( multi ) {
set<Shard> shards;
manager->getShardsForQuery( shards , chunkFinder );
int * x = (int*)(r.d().afterNS());
x[0] |= UpdateOption_Broadcast;
- for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++){
+ for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) {
doWrite( dbUpdate , r , *i , false );
}
}
else {
int left = 5;
- while ( true ){
+ while ( true ) {
try {
ChunkPtr c = manager->findChunk( chunkFinder );
doWrite( dbUpdate , r , c->getShard() );
c->splitIfShould( d.msg().header()->dataLen() );
break;
}
- catch ( StaleConfigException& e ){
+ catch ( StaleConfigException& e ) {
if ( left <= 0 )
throw e;
left--;
- log() << "update failed b/c of StaleConfigException, retrying "
+ log() << "update failed b/c of StaleConfigException, retrying "
<< " left:" << left << " ns: " << r.getns() << " query: " << query << endl;
r.reset( false );
manager = r.getChunkManager();
@@ -271,75 +274,75 @@ namespace mongo {
}
}
-
- void _delete( Request& r , DbMessage& d, ChunkManagerPtr manager ){
+
+ void _delete( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
int flags = d.pullInt();
bool justOne = flags & 1;
-
+
uassert( 10203 , "bad delete message" , d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
uassert( 13505 , "$atomic not supported sharded" , pattern["$atomic"].eoo() );
set<Shard> shards;
int left = 5;
-
- while ( true ){
+
+ while ( true ) {
try {
manager->getShardsForQuery( shards , pattern );
log(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl;
- if ( shards.size() == 1 ){
+ if ( shards.size() == 1 ) {
doWrite( dbDelete , r , *shards.begin() );
return;
}
break;
}
- catch ( StaleConfigException& e ){
+ catch ( StaleConfigException& e ) {
if ( left <= 0 )
throw e;
left--;
- log() << "delete failed b/c of StaleConfigException, retrying "
+ log() << "delete failed b/c of StaleConfigException, retrying "
<< " left:" << left << " ns: " << r.getns() << " patt: " << pattern << endl;
r.reset( false );
shards.clear();
manager = r.getChunkManager();
}
}
-
+
if ( justOne && ! pattern.hasField( "_id" ) )
throw UserException( 8015 , "can only delete with a non-shard key pattern if can delete as many as we find" );
-
- for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++){
+
+ for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) {
int * x = (int*)(r.d().afterNS());
x[0] |= RemoveOption_Broadcast;
doWrite( dbDelete , r , *i , false );
}
}
-
- virtual void writeOp( int op , Request& r ){
+
+ virtual void writeOp( int op , Request& r ) {
const char *ns = r.getns();
log(3) << "write: " << ns << endl;
-
+
DbMessage& d = r.d();
ChunkManagerPtr info = r.getChunkManager();
assert( info );
-
- if ( op == dbInsert ){
+
+ if ( op == dbInsert ) {
_insert( r , d , info );
}
- else if ( op == dbUpdate ){
- _update( r , d , info );
+ else if ( op == dbUpdate ) {
+ _update( r , d , info );
}
- else if ( op == dbDelete ){
+ else if ( op == dbDelete ) {
_delete( r , d , info );
}
else {
log() << "sharding can't do write op: " << op << endl;
throw UserException( 8016 , "can't do this write op on sharded collection" );
}
-
+
}
};
-
+
Strategy * SHARDED = new ShardStrategy();
}
diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp
index 93de5b79ebf..6b6ffd2e4ff 100644
--- a/s/strategy_single.cpp
+++ b/s/strategy_single.cpp
@@ -24,26 +24,26 @@
namespace mongo {
class SingleStrategy : public Strategy {
-
+
public:
- SingleStrategy(){
+ SingleStrategy() {
_commandsSafeToPass.insert( "$eval" );
_commandsSafeToPass.insert( "create" );
}
private:
- virtual void queryOp( Request& r ){
+ virtual void queryOp( Request& r ) {
QueryMessage q( r.d() );
-
+
log(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << endl;
-
- if ( r.isCommand() ){
-
+
+ if ( r.isCommand() ) {
+
if ( handleSpecialNamespaces( r , q ) )
return;
-
+
int loops = 5;
- while ( true ){
+ while ( true ) {
BSONObjBuilder builder;
try {
bool ok = Command::runAgainstRegistered(q.ns, q.query, builder);
@@ -54,15 +54,15 @@ namespace mongo {
}
break;
}
- catch ( StaleConfigException& e ){
+ catch ( StaleConfigException& e ) {
if ( loops <= 0 )
throw e;
-
+
loops--;
log() << "retrying command: " << q.query << endl;
ShardConnection::checkMyConnectionVersions( e.getns() );
}
- catch ( AssertionException& e ){
+ catch ( AssertionException& e ) {
e.getInfo().append( builder , "assertion" , "assertionCode" );
builder.append( "errmsg" , "db assertion failure" );
builder.append( "ok" , 0 );
@@ -71,18 +71,18 @@ namespace mongo {
return;
}
}
-
+
string commandName = q.query.firstElement().fieldName();
-
+
uassert(13390, "unrecognized command: " + commandName, _commandsSafeToPass.count(commandName) != 0);
}
-
+
doQuery( r , r.primaryShard() );
}
-
- virtual void getMore( Request& r ){
+
+ virtual void getMore( Request& r ) {
const char *ns = r.getns();
-
+
log(3) << "single getmore: " << ns << endl;
ShardConnection conn( r.primaryShard() , ns );
@@ -91,26 +91,26 @@ namespace mongo {
bool ok = conn->callRead( r.m() , response);
uassert( 10204 , "dbgrid: getmore: error calling db", ok);
r.reply( response , conn->getServerAddress() );
-
+
conn.done();
}
-
- void handleIndexWrite( int op , Request& r ){
-
+
+ void handleIndexWrite( int op , Request& r ) {
+
DbMessage& d = r.d();
- if ( op == dbInsert ){
- while( d.moreJSObjs() ){
+ if ( op == dbInsert ) {
+ while( d.moreJSObjs() ) {
BSONObj o = d.nextJsObj();
const char * ns = o["ns"].valuestr();
- if ( r.getConfig()->isSharded( ns ) ){
+ if ( r.getConfig()->isSharded( ns ) ) {
BSONObj newIndexKey = o["key"].embeddedObjectUserCheck();
-
- uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns +
- " key: " + o["key"].embeddedObjectUserCheck().toString() ,
+
+ uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns +
+ " key: " + o["key"].embeddedObjectUserCheck().toString() ,
IndexDetails::isIdIndexPattern( newIndexKey ) ||
- ! o["unique"].trueValue() ||
+ ! o["unique"].trueValue() ||
r.getConfig()->getChunkManager( ns )->getShardKey().isPrefixOf( newIndexKey ) );
ChunkManagerPtr cm = r.getConfig()->getChunkManager( ns );
@@ -127,10 +127,10 @@ namespace mongo {
r.gotInsert();
}
}
- else if ( op == dbUpdate ){
+ else if ( op == dbUpdate ) {
throw UserException( 8050 , "can't update system.indexes" );
}
- else if ( op == dbDelete ){
+ else if ( op == dbDelete ) {
// TODO
throw UserException( 8051 , "can't delete indexes on sharded collection yet" );
}
@@ -138,26 +138,26 @@ namespace mongo {
log() << "handleIndexWrite invalid write op: " << op << endl;
throw UserException( 8052 , "handleIndexWrite invalid write op" );
}
-
+
}
- virtual void writeOp( int op , Request& r ){
+ virtual void writeOp( int op , Request& r ) {
const char *ns = r.getns();
-
- if ( r.isShardingEnabled() &&
- strstr( ns , ".system.indexes" ) == strchr( ns , '.' ) &&
- strchr( ns , '.' ) ) {
+
+ if ( r.isShardingEnabled() &&
+ strstr( ns , ".system.indexes" ) == strchr( ns , '.' ) &&
+ strchr( ns , '.' ) ) {
log(1) << " .system.indexes write for: " << ns << endl;
handleIndexWrite( op , r );
return;
}
-
+
log(3) << "single write: " << ns << endl;
doWrite( op , r , r.primaryShard() );
r.gotInsert(); // Won't handle mulit-insert correctly. Not worth parsing the request.
}
- bool handleSpecialNamespaces( Request& r , QueryMessage& q ){
+ bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
const char * ns = r.getns();
ns = strstr( r.getns() , ".$cmd.sys." );
if ( ! ns )
@@ -166,25 +166,25 @@ namespace mongo {
BSONObjBuilder b;
vector<Shard> shards;
-
- if ( strcmp( ns , "inprog" ) == 0 ){
+
+ if ( strcmp( ns , "inprog" ) == 0 ) {
Shard::getAllShards( shards );
-
+
BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );
- for ( unsigned i=0; i<shards.size(); i++ ){
+ for ( unsigned i=0; i<shards.size(); i++ ) {
Shard shard = shards[i];
ScopedDbConnection conn( shard );
BSONObj temp = conn->findOne( r.getns() , BSONObj() );
- if ( temp["inprog"].isABSONObj() ){
+ if ( temp["inprog"].isABSONObj() ) {
BSONObjIterator i( temp["inprog"].Obj() );
- while ( i.more() ){
+ while ( i.more() ) {
BSONObjBuilder x;
-
+
BSONObjIterator j( i.next().Obj() );
- while( j.more() ){
+ while( j.more() ) {
BSONElement e = j.next();
- if ( strcmp( e.fieldName() , "opid" ) == 0 ){
+ if ( strcmp( e.fieldName() , "opid" ) == 0 ) {
stringstream ss;
ss << shard.getName() << ':' << e.numberInt();
x.append( "opid" , ss.str() );
@@ -198,15 +198,15 @@ namespace mongo {
}
conn.done();
}
-
+
arr.done();
}
- else if ( strcmp( ns , "killop" ) == 0 ){
+ else if ( strcmp( ns , "killop" ) == 0 ) {
BSONElement e = q.query["op"];
- if ( strstr( r.getns() , "admin." ) != 0 ){
+ if ( strstr( r.getns() , "admin." ) != 0 ) {
b.append( "err" , "unauthorized" );
}
- else if ( e.type() != String ){
+ else if ( e.type() != String ) {
b.append( "err" , "bad op" );
b.append( e );
}
@@ -214,7 +214,7 @@ namespace mongo {
b.append( e );
string s = e.String();
string::size_type i = s.find( ':' );
- if ( i == string::npos ){
+ if ( i == string::npos ) {
b.append( "err" , "bad opid" );
}
else {
@@ -225,14 +225,14 @@ namespace mongo {
log() << "want to kill op: " << e << endl;
Shard s(shard);
-
+
ScopedDbConnection conn( s );
conn->findOne( r.getns() , BSON( "op" << opid ) );
conn.done();
}
}
}
- else if ( strcmp( ns , "unlock" ) == 0 ){
+ else if ( strcmp( ns , "unlock" ) == 0 ) {
b.append( "err" , "can't do unlock through mongos" );
}
else {
@@ -247,6 +247,6 @@ namespace mongo {
set<string> _commandsSafeToPass;
};
-
+
Strategy * SINGLE = new SingleStrategy();
}
diff --git a/s/util.h b/s/util.h
index 067db5c2373..b3f63d85cb4 100644
--- a/s/util.h
+++ b/s/util.h
@@ -36,20 +36,20 @@ namespace mongo {
};
unsigned long long _combined;
};
-
+
ShardChunkVersion( int major=0, int minor=0 )
- : _minor(minor),_major(major){
+ : _minor(minor),_major(major) {
}
-
+
ShardChunkVersion( unsigned long long ll )
- : _combined( ll ){
+ : _combined( ll ) {
}
-
- ShardChunkVersion( const BSONElement& e ){
- if ( e.type() == Date || e.type() == Timestamp ){
+
+ ShardChunkVersion( const BSONElement& e ) {
+ if ( e.type() == Date || e.type() == Timestamp ) {
_combined = e._numberLong();
}
- else if ( e.eoo() ){
+ else if ( e.eoo() ) {
_combined = 0;
}
else {
@@ -59,7 +59,7 @@ namespace mongo {
}
}
- void inc( bool major ){
+ void inc( bool major ) {
if ( major )
incMajor();
else
@@ -70,7 +70,7 @@ namespace mongo {
_major++;
_minor = 0;
}
-
+
void incMinor() {
_minor++;
}
@@ -83,19 +83,19 @@ namespace mongo {
return _combined > 0;
}
- string toString() const {
- stringstream ss;
- ss << _major << "|" << _minor;
- return ss.str();
+ string toString() const {
+ stringstream ss;
+ ss << _major << "|" << _minor;
+ return ss.str();
}
int majorVersion() const { return _major; }
int minorVersion() const { return _minor; }
-
+
operator unsigned long long() const { return _combined; }
-
- ShardChunkVersion& operator=( const BSONElement& elem ){
- switch ( elem.type() ){
+
+ ShardChunkVersion& operator=( const BSONElement& elem ) {
+ switch ( elem.type() ) {
case Timestamp:
case NumberLong:
case Date:
@@ -110,39 +110,39 @@ namespace mongo {
return *this;
}
};
-
- inline ostream& operator<<( ostream &s , const ShardChunkVersion& v){
+
+ inline ostream& operator<<( ostream &s , const ShardChunkVersion& v) {
s << v._major << "|" << v._minor;
return s;
}
- /**
- * your config info for a given shard/chunk is out of date
+ /**
+ * your config info for a given shard/chunk is out of date
*/
class StaleConfigException : public AssertionException {
public:
StaleConfigException( const string& ns , const string& raw , bool justConnection = false )
- : AssertionException( (string)"ns: " + ns + " " + raw , 9996 ) ,
+ : AssertionException( (string)"ns: " + ns + " " + raw , 9996 ) ,
_justConnection(justConnection) ,
- _ns(ns){
+ _ns(ns) {
}
-
- virtual ~StaleConfigException() throw(){}
-
+
+ virtual ~StaleConfigException() throw() {}
+
virtual void appendPrefix( stringstream& ss ) const { ss << "StaleConfigException: "; }
-
+
bool justConnection() const { return _justConnection; }
-
+
string getns() const { return _ns; }
- static bool parse( const string& big , string& ns , string& raw ){
+ static bool parse( const string& big , string& ns , string& raw ) {
string::size_type start = big.find( '[' );
if ( start == string::npos )
return false;
string::size_type end = big.find( ']' ,start );
if ( end == string::npos )
return false;
-
+
ns = big.substr( start + 1 , ( end - start ) - 1 );
raw = big.substr( end + 1 );
return true;
diff --git a/s/writeback_listener.cpp b/s/writeback_listener.cpp
index 2e1bc10af25..528aa15ce59 100644
--- a/s/writeback_listener.cpp
+++ b/s/writeback_listener.cpp
@@ -37,12 +37,12 @@ namespace mongo {
map<ConnectionId,WriteBackListener::WBStatus> WriteBackListener::_seenWritebacks;
mongo::mutex WriteBackListener::_seenWritebacksLock("WriteBackListener::seen");
- WriteBackListener::WriteBackListener( const string& addr ) : _addr( addr ){
+ WriteBackListener::WriteBackListener( const string& addr ) : _addr( addr ) {
log() << "creating WriteBackListener for: " << addr << endl;
}
-
+
/* static */
- void WriteBackListener::init( DBClientBase& conn ){
+ void WriteBackListener::init( DBClientBase& conn ) {
scoped_lock lk( _cacheLock );
WriteBackListener*& l = _cache[conn.getServerAddress()];
if ( l )
@@ -52,13 +52,13 @@ namespace mongo {
}
/* static */
- void WriteBackListener::waitFor( ConnectionId connectionId, const OID& oid ){
+ void WriteBackListener::waitFor( ConnectionId connectionId, const OID& oid ) {
Timer t;
- for ( int i=0; i<5000; i++ ){
+ for ( int i=0; i<5000; i++ ) {
{
scoped_lock lk( _seenWritebacksLock );
WBStatus s = _seenWritebacks[connectionId];
- if ( oid <= s.id ){
+ if ( oid <= s.id ) {
// TODO return gle
return;
}
@@ -70,36 +70,36 @@ namespace mongo {
uasserted( 13403 , ss.str() );
}
- void WriteBackListener::run(){
+ void WriteBackListener::run() {
int secsToSleep = 0;
- while ( ! inShutdown() && Shard::isMember( _addr ) ){
-
+ while ( ! inShutdown() && Shard::isMember( _addr ) ) {
+
try {
ScopedDbConnection conn( _addr );
-
+
BSONObj result;
-
+
{
BSONObjBuilder cmd;
cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
- if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){
+ if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) {
log() << "writebacklisten command failed! " << result << endl;
conn.done();
continue;
}
}
-
+
log(1) << "writebacklisten result: " << result << endl;
-
+
BSONObj data = result.getObjectField( "data" );
- if ( data.getBoolField( "writeBack" ) ){
+ if ( data.getBoolField( "writeBack" ) ) {
string ns = data["ns"].valuestrsafe();
-
+
ConnectionId cid = 0;
OID wid;
- if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ){
+ if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) {
cid = data["connectionId"].numberLong();
wid = data["id"].OID();
}
@@ -109,28 +109,28 @@ namespace mongo {
int len; // not used, but needed for next call
Message m( (void*)data["msg"].binData( len ) , false );
- massert( 10427 , "invalid writeback message" , m.header()->valid() );
+ massert( 10427 , "invalid writeback message" , m.header()->valid() );
DBConfigPtr db = grid.getDBConfig( ns );
ShardChunkVersion needVersion( data["version"] );
-
- log(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
+
+ log(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
<< " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3)
-
+
if ( logLevel ) log(1) << debugString( m ) << endl;
- if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ){
+ if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ) {
// this means when the write went originally, the version was old
// if we're here, it means we've already updated the config, so don't need to do again
//db->getChunkManager( ns , true ); // SERVER-1349
}
else {
// we received a writeback object that was sent to a previous version of a shard
- // the actual shard may not have the object the writeback operation is for
+ // the actual shard may not have the object the writeback operation is for
// we need to reload the chunk manager and get the new shard versions
db->getChunkManager( ns , true );
}
-
+
// do reequest and then call getLastError
// we have to call getLastError so we can return the right fields to the user if they decide to call getLastError
@@ -142,14 +142,14 @@ namespace mongo {
gle = BSONObj(); // TODO
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
error() << "error processing writeback: " << e << endl;
BSONObjBuilder b;
b.append( "err" , e.toString() );
e.getInfo().append( b );
gle = b.obj();
}
-
+
{
scoped_lock lk( _seenWritebacksLock );
WBStatus& s = _seenWritebacks[cid];
@@ -157,30 +157,30 @@ namespace mongo {
s.gle = gle;
}
}
- else if ( result["noop"].trueValue() ){
+ else if ( result["noop"].trueValue() ) {
// no-op
}
else {
log() << "unknown writeBack result: " << result << endl;
}
-
+
conn.done();
secsToSleep = 0;
continue;
}
- catch ( std::exception e ){
+ catch ( std::exception e ) {
- if ( inShutdown() ){
+ if ( inShutdown() ) {
// we're shutting down, so just clean up
return;
}
log() << "WriteBackListener exception : " << e.what() << endl;
-
+
// It's possible this shard was removed
- Shard::reloadShardInfo();
+ Shard::reloadShardInfo();
}
- catch ( ... ){
+ catch ( ... ) {
log() << "WriteBackListener uncaught exception!" << endl;
}
secsToSleep++;
diff --git a/s/writeback_listener.h b/s/writeback_listener.h
index bd2664d9332..bcb039a404b 100644
--- a/s/writeback_listener.h
+++ b/s/writeback_listener.h
@@ -42,7 +42,7 @@ namespace mongo {
protected:
WriteBackListener( const string& addr );
- string name() const { return "WriteBackListener"; }
+ string name() const { return "WriteBackListener"; }
void run();
private:
@@ -50,7 +50,7 @@ namespace mongo {
static mongo::mutex _cacheLock; // protects _cache
static map<string,WriteBackListener*> _cache; // server to listener
-
+
struct WBStatus {
OID id;
BSONObj gle;
@@ -59,7 +59,7 @@ namespace mongo {
static mongo::mutex _seenWritebacksLock; // protects _seenWritbacks
static map<ConnectionId,WBStatus> _seenWritebacks; // connectionId -> last write back GLE
};
-
+
void waitForWriteback( const OID& oid );
} // namespace mongo
diff --git a/scripting/bench.cpp b/scripting/bench.cpp
index b3bf96c6146..27239853fbd 100644
--- a/scripting/bench.cpp
+++ b/scripting/bench.cpp
@@ -33,15 +33,15 @@ namespace mongo {
/**
* benchQuery( "foo" , { _id : 1 } )
*/
- BSONObj benchQuery( const BSONObj& args ){
+ BSONObj benchQuery( const BSONObj& args ) {
return BSONObj();
}
struct BenchRunConfig {
- BenchRunConfig(){
+ BenchRunConfig() {
host = "localhost";
db = "test";
-
+
parallel = 1;
seconds = 1;
@@ -49,10 +49,10 @@ namespace mongo {
threadsReady = 0;
error = false;
}
-
+
string host;
string db;
-
+
unsigned parallel;
int seconds;
@@ -63,18 +63,18 @@ namespace mongo {
bool error;
};
-
- static void benchThread( BenchRunConfig * config ){
+
+ static void benchThread( BenchRunConfig * config ) {
ScopedDbConnection conn( config->host );
- config->threadsReady++;
-
- while ( config->active ){
+ config->threadsReady++;
+
+ while ( config->active ) {
BSONObjIterator i( config->ops );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
string ns = e["ns"].String();
string op = e["op"].String();
-
+
if ( op == "findOne" ) {
conn->findOne( ns , e["query"].Obj() );
}
@@ -89,18 +89,18 @@ namespace mongo {
conn.done();
}
-
+
/**
* benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
*/
- BSONObj benchRun( const BSONObj& argsFake ){
+ BSONObj benchRun( const BSONObj& argsFake ) {
assert( argsFake.firstElement().isABSONObj() );
BSONObj args = argsFake.firstElement().Obj();
// setup
BenchRunConfig config;
-
+
if ( args["host"].type() == String )
config.host = args["host"].String();
if ( args["db"].type() == String )
@@ -122,40 +122,40 @@ namespace mongo {
vector<boost::thread*> all;
for ( unsigned i=0; i<config.parallel; i++ )
all.push_back( new boost::thread( boost::bind( benchThread , &config ) ) );
-
+
// give them time to init
while ( config.threadsReady < config.parallel )
sleepmillis( 1 );
BSONObj before;
conn->simpleCommand( "admin" , &before , "serverStatus" );
-
+
sleepsecs( config.seconds );
BSONObj after;
conn->simpleCommand( "admin" , &after , "serverStatus" );
-
+
conn.done();
config.active = false;
-
+
for ( unsigned i=0; i<all.size(); i++ )
all[i]->join();
-
+
if ( config.error )
return BSON( "err" << 1 );
-
+
// compute actual ops/sec
-
+
before = before["opcounters"].Obj();
after = after["opcounters"].Obj();
-
+
BSONObjBuilder buf;
buf.append( "note" , "values per second" );
{
BSONObjIterator i( after );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
double x = e.number();
x = x - before[e.fieldName()].number();
@@ -166,7 +166,7 @@ namespace mongo {
return BSON( "" << zoo );
}
- void installBenchmarkSystem( Scope& scope ){
+ void installBenchmarkSystem( Scope& scope ) {
scope.injectNative( "benchRun" , benchRun );
}
diff --git a/scripting/engine.cpp b/scripting/engine.cpp
index e696f70f2dd..60e56ae3599 100644
--- a/scripting/engine.cpp
+++ b/scripting/engine.cpp
@@ -23,27 +23,27 @@
namespace mongo {
long long Scope::_lastVersion = 1;
-
+
int Scope::_numScopes = 0;
- Scope::Scope() : _localDBName("") , _loadedVersion(0){
+ Scope::Scope() : _localDBName("") , _loadedVersion(0) {
_numScopes++;
}
- Scope::~Scope(){
+ Scope::~Scope() {
_numScopes--;
}
ScriptEngine::ScriptEngine() : _scopeInitCallback() {
}
- ScriptEngine::~ScriptEngine(){
+ ScriptEngine::~ScriptEngine() {
}
- void Scope::append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ){
+ void Scope::append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
int t = type( scopeName );
-
- switch ( t ){
+
+ switch ( t ) {
case Object:
builder.append( fieldName , getObject( scopeName ) );
break;
@@ -82,20 +82,20 @@ namespace mongo {
temp << t;
uassert( 10206 , temp.str() , 0 );
}
-
+
}
- int Scope::invoke( const char* code , const BSONObj& args, int timeoutMs ){
+ int Scope::invoke( const char* code , const BSONObj& args, int timeoutMs ) {
ScriptingFunction func = createFunction( code );
uassert( 10207 , "compile failed" , func );
return invoke( func , args, timeoutMs );
}
-
- bool Scope::execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs ){
-
+
+ bool Scope::execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs ) {
+
path p( filename );
- if ( ! exists( p ) ){
+ if ( ! exists( p ) ) {
log() << "file [" << filename << "] doesn't exist" << endl;
if ( assertOnError )
assert( 0 );
@@ -103,10 +103,10 @@ namespace mongo {
}
// iterate directories and recurse using all *.js files in the directory
- if ( is_directory( p ) ){
+ if ( is_directory( p ) ) {
directory_iterator end;
bool empty = true;
- for (directory_iterator it (p); it != end; it++){
+ for (directory_iterator it (p); it != end; it++) {
empty = false;
path sub (*it);
if (!endsWith(sub.string().c_str(), ".js"))
@@ -115,7 +115,7 @@ namespace mongo {
return false;
}
- if (empty){
+ if (empty) {
log() << "directory [" << filename << "] doesn't have any *.js files" << endl;
if ( assertOnError )
assert( 0 );
@@ -124,7 +124,7 @@ namespace mongo {
return true;
}
-
+
File f;
f.open( filename.c_str() , true );
@@ -139,7 +139,7 @@ namespace mongo {
f.read( 0 , data.get() , L );
int offset = 0;
- if (data[0] == '#' && data[1] == '!'){
+ if (data[0] == '#' && data[1] == '!') {
const char* newline = strchr(data.get(), '\n');
if (! newline)
return true; // file of just shebang treated same as empty file
@@ -147,74 +147,74 @@ namespace mongo {
}
StringData code (data.get() + offset, L - offset);
-
+
return exec( code , filename , printResult , reportError , assertOnError, timeoutMs );
}
- void Scope::storedFuncMod(){
+ void Scope::storedFuncMod() {
_lastVersion++;
}
-
+
void Scope::validateObjectIdString( const string &str ) {
massert( 10448 , "invalid object id: length", str.size() == 24 );
- for ( string::size_type i=0; i<str.size(); i++ ){
+ for ( string::size_type i=0; i<str.size(); i++ ) {
char c = str[i];
if ( ( c >= '0' && c <= '9' ) ||
- ( c >= 'a' && c <= 'f' ) ||
- ( c >= 'A' && c <= 'F' ) ){
+ ( c >= 'a' && c <= 'f' ) ||
+ ( c >= 'A' && c <= 'F' ) ) {
continue;
}
massert( 10430 , "invalid object id: not hex", false );
- }
+ }
}
- void Scope::loadStored( bool ignoreNotConnected ){
- if ( _localDBName.size() == 0 ){
+ void Scope::loadStored( bool ignoreNotConnected ) {
+ if ( _localDBName.size() == 0 ) {
if ( ignoreNotConnected )
return;
uassert( 10208 , "need to have locallyConnected already" , _localDBName.size() );
}
if ( _loadedVersion == _lastVersion )
return;
-
+
_loadedVersion = _lastVersion;
string coll = _localDBName + ".system.js";
-
+
static DBClientBase * db = createDirectClient();
auto_ptr<DBClientCursor> c = db->query( coll , Query(), 0, 0, NULL, QueryOption_SlaveOk, 0 );
assert( c.get() );
-
+
set<string> thisTime;
-
- while ( c->more() ){
+
+ while ( c->more() ) {
BSONObj o = c->next();
BSONElement n = o["_id"];
BSONElement v = o["value"];
-
+
uassert( 10209 , "name has to be a string" , n.type() == String );
uassert( 10210 , "value has to be set" , v.type() != EOO );
-
+
setElement( n.valuestr() , v );
thisTime.insert( n.valuestr() );
_storedNames.insert( n.valuestr() );
-
+
}
// --- remove things from scope that were removed
list<string> toremove;
- for ( set<string>::iterator i=_storedNames.begin(); i!=_storedNames.end(); i++ ){
+ for ( set<string>::iterator i=_storedNames.begin(); i!=_storedNames.end(); i++ ) {
string n = *i;
if ( thisTime.count( n ) == 0 )
toremove.push_back( n );
}
-
- for ( list<string>::iterator i=toremove.begin(); i!=toremove.end(); i++ ){
+
+ for ( list<string>::iterator i=toremove.begin(); i!=toremove.end(); i++ ) {
string n = *i;
_storedNames.erase( n );
execSetup( (string)"delete " + n , "clean up scope" );
@@ -222,11 +222,11 @@ namespace mongo {
}
- ScriptingFunction Scope::createFunction( const char * code ){
- if ( code[0] == '/' && code [1] == '*' ){
+ ScriptingFunction Scope::createFunction( const char * code ) {
+ if ( code[0] == '/' && code [1] == '*' ) {
code += 2;
- while ( code[0] && code[1] ){
- if ( code[0] == '*' && code[1] == '/' ){
+ while ( code[0] && code[1] ) {
+ if ( code[0] == '*' && code[1] == '/' ) {
code += 2;
break;
}
@@ -240,7 +240,7 @@ namespace mongo {
_cachedFunctions[code] = f;
return f;
}
-
+
typedef map< string , list<Scope*> > PoolToScopes;
class ScopeCache {
@@ -249,21 +249,21 @@ namespace mongo {
ScopeCache() : _mutex("ScopeCache") {
_magic = 17;
}
-
- ~ScopeCache(){
+
+ ~ScopeCache() {
assert( _magic == 17 );
_magic = 1;
if ( inShutdown() )
return;
-
+
clear();
}
- void done( const string& pool , Scope * s ){
+ void done( const string& pool , Scope * s ) {
scoped_lock lk( _mutex );
list<Scope*> & l = _pools[pool];
- if ( l.size() > 10 ){
+ if ( l.size() > 10 ) {
delete s;
}
else {
@@ -271,31 +271,31 @@ namespace mongo {
s->reset();
}
}
-
- Scope * get( const string& pool ){
+
+ Scope * get( const string& pool ) {
scoped_lock lk( _mutex );
list<Scope*> & l = _pools[pool];
if ( l.size() == 0 )
return 0;
-
+
Scope * s = l.back();
l.pop_back();
s->reset();
return s;
}
-
- void clear(){
+
+ void clear() {
set<Scope*> seen;
-
- for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ){
- for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ ){
+
+ for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ) {
+ for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ ) {
Scope * s = *j;
assert( ! seen.count( s ) );
delete s;
seen.insert( s );
}
}
-
+
_pools.clear();
}
@@ -309,12 +309,12 @@ namespace mongo {
class PooledScope : public Scope {
public:
- PooledScope( const string pool , Scope * real ) : _pool( pool ) , _real( real ){
+ PooledScope( const string pool , Scope * real ) : _pool( pool ) , _real( real ) {
_real->loadStored( true );
};
- virtual ~PooledScope(){
+ virtual ~PooledScope() {
ScopeCache * sc = scopeCache.get();
- if ( sc ){
+ if ( sc ) {
sc->done( _pool , _real );
_real = 0;
}
@@ -326,92 +326,92 @@ namespace mongo {
_real = 0;
}
}
-
- void reset(){
+
+ void reset() {
_real->reset();
}
- void init( const BSONObj * data ){
+ void init( const BSONObj * data ) {
_real->init( data );
}
-
- void localConnect( const char * dbName ){
+
+ void localConnect( const char * dbName ) {
_real->localConnect( dbName );
}
- void externalSetup(){
+ void externalSetup() {
_real->externalSetup();
}
-
- double getNumber( const char *field ){
+
+ double getNumber( const char *field ) {
return _real->getNumber( field );
}
- string getString( const char *field ){
+ string getString( const char *field ) {
return _real->getString( field );
}
- bool getBoolean( const char *field ){
+ bool getBoolean( const char *field ) {
return _real->getBoolean( field );
}
- BSONObj getObject( const char *field ){
+ BSONObj getObject( const char *field ) {
return _real->getObject( field );
}
- int type( const char *field ){
+ int type( const char *field ) {
return _real->type( field );
}
- void setElement( const char *field , const BSONElement& val ){
+ void setElement( const char *field , const BSONElement& val ) {
_real->setElement( field , val );
}
- void setNumber( const char *field , double val ){
+ void setNumber( const char *field , double val ) {
_real->setNumber( field , val );
}
- void setString( const char *field , const char * val ){
+ void setString( const char *field , const char * val ) {
_real->setString( field , val );
}
- void setObject( const char *field , const BSONObj& obj , bool readOnly=true ){
+ void setObject( const char *field , const BSONObj& obj , bool readOnly=true ) {
_real->setObject( field , obj , readOnly );
}
- void setBoolean( const char *field , bool val ){
+ void setBoolean( const char *field , bool val ) {
_real->setBoolean( field , val );
}
- void setThis( const BSONObj * obj ){
+ void setThis( const BSONObj * obj ) {
_real->setThis( obj );
}
-
- ScriptingFunction createFunction( const char * code ){
+
+ ScriptingFunction createFunction( const char * code ) {
return _real->createFunction( code );
}
- ScriptingFunction _createFunction( const char * code ){
+ ScriptingFunction _createFunction( const char * code ) {
return _real->createFunction( code );
}
- void rename( const char * from , const char * to ){
+ void rename( const char * from , const char * to ) {
_real->rename( from , to );
}
/**
* @return 0 on success
*/
- int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs , bool ignoreReturn ){
+ int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs , bool ignoreReturn ) {
return _real->invoke( func , args , timeoutMs , ignoreReturn );
}
- string getError(){
+ string getError() {
return _real->getError();
}
-
- bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ){
+
+ bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) {
return _real->exec( code , name , printResult , reportError , assertOnError , timeoutMs );
}
- bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ){
+ bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) {
return _real->execFile( filename , printResult , reportError , assertOnError , timeoutMs );
}
-
- void injectNative( const char *field, NativeFunction func ){
+
+ void injectNative( const char *field, NativeFunction func ) {
_real->injectNative( field , func );
}
-
- void gc(){
+
+ void gc() {
_real->gc();
}
@@ -420,57 +420,57 @@ namespace mongo {
Scope * _real;
};
- auto_ptr<Scope> ScriptEngine::getPooledScope( const string& pool ){
- if ( ! scopeCache.get() ){
+ auto_ptr<Scope> ScriptEngine::getPooledScope( const string& pool ) {
+ if ( ! scopeCache.get() ) {
scopeCache.reset( new ScopeCache() );
}
Scope * s = scopeCache->get( pool );
- if ( ! s ){
+ if ( ! s ) {
s = newScope();
}
-
+
auto_ptr<Scope> p;
p.reset( new PooledScope( pool , s ) );
return p;
}
-
- void ScriptEngine::threadDone(){
+
+ void ScriptEngine::threadDone() {
ScopeCache * sc = scopeCache.get();
- if ( sc ){
+ if ( sc ) {
sc->clear();
}
}
-
+
void ( *ScriptEngine::_connectCallback )( DBClientWithCommands & ) = 0;
const char * ( *ScriptEngine::_checkInterruptCallback )() = 0;
unsigned ( *ScriptEngine::_getInterruptSpecCallback )() = 0;
-
+
ScriptEngine * globalScriptEngine = 0;
- bool hasJSReturn( const string& code ){
+ bool hasJSReturn( const string& code ) {
size_t x = code.find( "return" );
if ( x == string::npos )
return false;
- return
+ return
( x == 0 || ! isalpha( code[x-1] ) ) &&
! isalpha( code[x+6] );
}
- const char * jsSkipWhiteSpace( const char * raw ){
- while ( raw[0] ){
+ const char * jsSkipWhiteSpace( const char * raw ) {
+ while ( raw[0] ) {
while (isspace(*raw)) {
raw++;
}
-
+
if ( raw[0] != '/' || raw[1] != '/' )
break;
-
+
while ( raw[0] && raw[0] != '\n' )
raw++;
}
return raw;
}
}
-
+
diff --git a/scripting/engine.h b/scripting/engine.h
index 6a6780e09fa..62afd77cccd 100644
--- a/scripting/engine.h
+++ b/scripting/engine.h
@@ -44,35 +44,35 @@ namespace mongo {
public:
Scope();
virtual ~Scope();
-
+
virtual void reset() = 0;
virtual void init( const BSONObj * data ) = 0;
- void init( const char * data ){
+ void init( const char * data ) {
BSONObj o( data , 0 );
init( &o );
}
-
+
virtual void localConnect( const char * dbName ) = 0;
virtual void externalSetup() = 0;
class NoDBAccess {
Scope * _s;
public:
- NoDBAccess( Scope * s ){
+ NoDBAccess( Scope * s ) {
_s = s;
}
- ~NoDBAccess(){
+ ~NoDBAccess() {
_s->rename( "____db____" , "db" );
}
};
- NoDBAccess disableDBAccess( const char * why ){
+ NoDBAccess disableDBAccess( const char * why ) {
rename( "db" , "____db____" );
return NoDBAccess( this );
}
virtual double getNumber( const char *field ) = 0;
- virtual int getNumberInt( const char *field ){ return (int)getNumber( field ); }
- virtual long long getNumberLongLong( const char *field ){ return (long long)getNumber( field ); }
+ virtual int getNumberInt( const char *field ) { return (int)getNumber( field ); }
+ virtual long long getNumberLongLong( const char *field ) { return (long long)getNumber( field ); }
virtual string getString( const char *field ) = 0;
virtual bool getBoolean( const char *field ) = 0;
virtual BSONObj getObject( const char *field ) = 0;
@@ -87,39 +87,39 @@ namespace mongo {
virtual void setObject( const char *field , const BSONObj& obj , bool readOnly=true ) = 0;
virtual void setBoolean( const char *field , bool val ) = 0;
virtual void setThis( const BSONObj * obj ) = 0;
-
+
virtual ScriptingFunction createFunction( const char * code );
-
+
virtual void rename( const char * from , const char * to ) = 0;
/**
* @return 0 on success
*/
virtual int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = false ) = 0;
- void invokeSafe( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 ){
+ void invokeSafe( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 ) {
int res = invoke( func , args , timeoutMs );
if ( res == 0 )
return;
throw UserException( 9004 , (string)"invoke failed: " + getError() );
}
virtual string getError() = 0;
-
+
int invoke( const char* code , const BSONObj& args, int timeoutMs = 0 );
- void invokeSafe( const char* code , const BSONObj& args, int timeoutMs = 0 ){
+ void invokeSafe( const char* code , const BSONObj& args, int timeoutMs = 0 ) {
if ( invoke( code , args , timeoutMs ) == 0 )
return;
throw UserException( 9005 , (string)"invoke failed: " + getError() );
}
virtual bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) = 0;
- virtual void execSetup( const StringData& code , const string& name = "setup" ){
+ virtual void execSetup( const StringData& code , const string& name = "setup" ) {
exec( code , name , false , true , true , 0 );
}
- void execSetup( const JSFile& file){
+ void execSetup( const JSFile& file) {
execSetup(file.source, file.name);
}
- void execCoreFiles(){
+ void execCoreFiles() {
// keeping same order as in SConstruct
execSetup(JSFiles::utils);
execSetup(JSFiles::db);
@@ -130,25 +130,25 @@ namespace mongo {
}
virtual bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 );
-
+
virtual void injectNative( const char *field, NativeFunction func ) = 0;
virtual void gc() = 0;
void loadStored( bool ignoreNotConnected = false );
-
+
/**
if any changes are made to .system.js, call this
right now its just global - slightly inefficient, but a lot simpler
*/
static void storedFuncMod();
-
- static int getNumScopes(){
+
+ static int getNumScopes() {
return _numScopes;
}
-
+
static void validateObjectIdString( const string &str );
-
+
protected:
virtual ScriptingFunction _createFunction( const char * code ) = 0;
@@ -161,16 +161,16 @@ namespace mongo {
static int _numScopes;
};
-
+
void installGlobalUtils( Scope& scope );
class DBClientWithCommands;
-
+
class ScriptEngine : boost::noncopyable {
public:
ScriptEngine();
virtual ~ScriptEngine();
-
+
virtual Scope * newScope() {
Scope *s = createScope();
if ( s && _scopeInitCallback )
@@ -178,19 +178,19 @@ namespace mongo {
installGlobalUtils( *s );
return s;
}
-
+
virtual void runTest() = 0;
-
+
virtual bool utf8Ok() const = 0;
static void setup();
auto_ptr<Scope> getPooledScope( const string& pool );
void threadDone();
-
+
struct Unlocker { virtual ~Unlocker() {} };
virtual auto_ptr<Unlocker> newThreadUnlocker() { return auto_ptr< Unlocker >( new Unlocker ); }
-
+
void setScopeInitCallback( void ( *func )( Scope & ) ) { _scopeInitCallback = func; }
static void setConnectCallback( void ( *func )( DBClientWithCommands& ) ) { _connectCallback = func; }
static void runConnectCallback( DBClientWithCommands &c ) {
@@ -200,18 +200,18 @@ namespace mongo {
// engine implementation may either respond to interrupt events or
// poll for interrupts
-
+
// the interrupt functions must not wait indefinitely on a lock
virtual void interrupt( unsigned opSpec ) {}
virtual void interruptAll() {}
-
+
static void setGetInterruptSpecCallback( unsigned ( *func )() ) { _getInterruptSpecCallback = func; }
static bool haveGetInterruptSpecCallback() { return _getInterruptSpecCallback; }
static unsigned getInterruptSpec() {
massert( 13474, "no _getInterruptSpecCallback", _getInterruptSpecCallback );
return _getInterruptSpecCallback();
}
-
+
static void setCheckInterruptCallback( const char * ( *func )() ) { _checkInterruptCallback = func; }
static bool haveCheckInterruptCallback() { return _checkInterruptCallback; }
static const char * checkInterrupt() {
@@ -221,10 +221,10 @@ namespace mongo {
const char *r = checkInterrupt();
return r && r[ 0 ];
}
-
+
protected:
virtual Scope * createScope() = 0;
-
+
private:
void ( *_scopeInitCallback )( Scope & );
static void ( *_connectCallback )( DBClientWithCommands & );
diff --git a/scripting/engine_java.cpp b/scripting/engine_java.cpp
index dacf5325322..fc8945fc5a1 100644
--- a/scripting/engine_java.cpp
+++ b/scripting/engine_java.cpp
@@ -55,19 +55,19 @@ namespace mongo {
no tss cleanup on windows for boost lib?
we don't care for now esp on windows only
- the boost source says:
-
- This function's sole purpose is to cause a link error in cases where
- automatic tss cleanup is not implemented by Boost.Threads as a
- reminder that user code is responsible for calling the necessary
- functions at the appropriate times (and for implementing an a
- tss_cleanup_implemented() function to eliminate the linker's
- missing symbol error).
-
- If Boost.Threads later implements automatic tss cleanup in cases
- where it currently doesn't (which is the plan), the duplicate
- symbol error will warn the user that their custom solution is no
- longer needed and can be removed.
+ the boost source says:
+
+ This function's sole purpose is to cause a link error in cases where
+ automatic tss cleanup is not implemented by Boost.Threads as a
+ reminder that user code is responsible for calling the necessary
+ functions at the appropriate times (and for implementing an a
+ tss_cleanup_implemented() function to eliminate the linker's
+ missing symbol error).
+
+ If Boost.Threads later implements automatic tss cleanup in cases
+ where it currently doesn't (which is the plan), the duplicate
+ symbol error will warn the user that their custom solution is no
+ longer needed and can be removed.
*/
extern "C" void tss_cleanup_implemented(void) {
//out() << "tss_cleanup_implemented called" << endl;
@@ -185,10 +185,10 @@ namespace mongo {
if ( res ) {
log() << "using classpath: " << q << endl;
log()
- << " res : " << (unsigned) res << " "
- << "_jvm : " << _jvm << " "
- << "_env : " << _mainEnv << " "
- << endl;
+ << " res : " << (unsigned) res << " "
+ << "_jvm : " << _jvm << " "
+ << "_env : " << _mainEnv << " "
+ << endl;
problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
log() << "(try --nojni if you do not require that functionality)" << endl;
exit(22);
@@ -397,12 +397,11 @@ namespace mongo {
return retStr;
}
- BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field )
- {
+ BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field ) {
jstring s1 = _getEnv()->NewStringUTF( field );
int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
_getEnv()->DeleteLocalRef( s1 );
-
+
if ( guess == 0 )
return BSONObj();
@@ -471,12 +470,12 @@ namespace mongo {
return env;
}
- Scope * JavaJSImpl::createScope(){
+ Scope * JavaJSImpl::createScope() {
return new JavaScope();
}
- void ScriptEngine::setup(){
- if ( ! JavaJS ){
+ void ScriptEngine::setup() {
+ if ( ! JavaJS ) {
JavaJS = new JavaJSImpl();
globalScriptEngine = JavaJS;
}
@@ -564,40 +563,40 @@ namespace mongo {
if ( ! possible.size() ) {
possible.push_back( "./" );
possible.push_back( "../" );
-
+
log(2) << "dbExecCommand: " << dbExecCommand << endl;
-
+
string dbDir = dbExecCommand;
#ifdef WIN32
- if ( dbDir.find( "\\" ) != string::npos ){
+ if ( dbDir.find( "\\" ) != string::npos ) {
dbDir = dbDir.substr( 0 , dbDir.find_last_of( "\\" ) );
}
else {
dbDir = ".";
}
#else
- if ( dbDir.find( "/" ) != string::npos ){
+ if ( dbDir.find( "/" ) != string::npos ) {
dbDir = dbDir.substr( 0 , dbDir.find_last_of( "/" ) );
}
else {
bool found = false;
-
- if ( getenv( "PATH" ) ){
+
+ if ( getenv( "PATH" ) ) {
string s = getenv( "PATH" );
s += ":";
pcrecpp::StringPiece input( s );
string dir;
pcrecpp::RE re("(.*?):");
- while ( re.Consume( &input, &dir ) ){
+ while ( re.Consume( &input, &dir ) ) {
string test = dir + "/" + dbExecCommand;
- if ( boost::filesystem::exists( test ) ){
- while ( boost::filesystem::symbolic_link_exists( test ) ){
+ if ( boost::filesystem::exists( test ) ) {
+ while ( boost::filesystem::symbolic_link_exists( test ) ) {
char tmp[2048];
int len = readlink( test.c_str() , tmp , 2048 );
tmp[len] = 0;
log(5) << " symlink " << test << " -->> " << tmp << endl;
test = tmp;
-
+
dir = test.substr( 0 , test.rfind( "/" ) );
}
dbDir = dir;
@@ -606,12 +605,12 @@ namespace mongo {
}
}
}
-
+
if ( ! found )
dbDir = ".";
}
#endif
-
+
log(2) << "dbDir [" << dbDir << "]" << endl;
possible.push_back( ( dbDir + "/../lib/mongo/" ));
possible.push_back( ( dbDir + "/../lib64/mongo/" ));
@@ -624,7 +623,7 @@ namespace mongo {
for ( list<string>::iterator i = possible.begin() ; i != possible.end(); i++ ) {
const string temp = *i;
const string jarDir = ((string)temp) + "jars/";
-
+
log(5) << "possible jarDir [" << jarDir << "]" << endl;
path p(jarDir );
@@ -641,7 +640,7 @@ namespace mongo {
};
-
+
// ---
JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ) {
@@ -692,7 +691,7 @@ namespace mongo {
jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
- jassert( ! JavaJS.invoke( scope , func1 ) );
+ jassert( ! JavaJS.invoke( scope , func1 ) );
if ( debug ) out() << "func3 start" << endl;
@@ -757,7 +756,7 @@ namespace mongo {
assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
}
-
+
#endif
} // namespace mongo
diff --git a/scripting/engine_java.h b/scripting/engine_java.h
index 5c6bc3bc495..b8245ba6f22 100644
--- a/scripting/engine_java.h
+++ b/scripting/engine_java.h
@@ -163,10 +163,10 @@ namespace mongo {
JavaJS->scopeInit( s , o );
}
- void localConnect( const char * dbName ){
+ void localConnect( const char * dbName ) {
setString("$client", dbName );
}
-
+
double getNumber(const char *field) {
return JavaJS->scopeGetNumber(s,field);
}
@@ -183,7 +183,7 @@ namespace mongo {
return JavaJS->scopeGetType(s,field);
}
- void setThis( const BSONObj * obj ){
+ void setThis( const BSONObj * obj ) {
JavaJS->scopeSetThis( s , obj );
}
@@ -200,17 +200,17 @@ namespace mongo {
void setBoolean(const char *field, bool val ) {
JavaJS->scopeSetBoolean(s,field,val);
}
-
- ScriptingFunction createFunction( const char * code ){
+
+ ScriptingFunction createFunction( const char * code ) {
return JavaJS->functionCreate( code );
}
- int invoke( ScriptingFunction function , const BSONObj& args ){
+ int invoke( ScriptingFunction function , const BSONObj& args ) {
setObject( "args" , args , true );
return JavaJS->invoke(s,function);
}
-
- string getError(){
+
+ string getError() {
return getString( "error" );
}
diff --git a/scripting/engine_none.cpp b/scripting/engine_none.cpp
index 2320d0e909a..d13dbecc06e 100644
--- a/scripting/engine_none.cpp
+++ b/scripting/engine_none.cpp
@@ -18,7 +18,7 @@
#include "engine.h"
namespace mongo {
- void ScriptEngine::setup(){
+ void ScriptEngine::setup() {
// noop
}
}
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index bb2be367775..15cc4d4caa3 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -26,10 +26,10 @@
#endif
#define smuassert( cx , msg , val ) \
- if ( ! ( val ) ){ \
- JS_ReportError( cx , msg ); \
- return JS_FALSE; \
- }
+ if ( ! ( val ) ){ \
+ JS_ReportError( cx , msg ); \
+ return JS_FALSE; \
+ }
#define CHECKNEWOBJECT(xx,ctx,w) \
if ( ! xx ){ \
@@ -40,20 +40,20 @@
massert( 13615 , "JS allocation failed, either memory leak or using too much memory" , newthing )
namespace mongo {
-
+
class InvalidUTF8Exception : public UserException {
public:
- InvalidUTF8Exception() : UserException( 9006 , "invalid utf8" ){
+ InvalidUTF8Exception() : UserException( 9006 , "invalid utf8" ) {
}
};
- string trim( string s ){
+ string trim( string s ) {
while ( s.size() && isspace( s[0] ) )
s = s.substr( 1 );
-
+
while ( s.size() && isspace( s[s.size()-1] ) )
s = s.substr( 0 , s.size() - 1 );
-
+
return s;
}
@@ -68,18 +68,18 @@ namespace mongo {
class BSONHolder {
public:
- BSONHolder( BSONObj obj ){
+ BSONHolder( BSONObj obj ) {
_obj = obj.getOwned();
_inResolve = false;
_modified = false;
_magic = 17;
}
-
- ~BSONHolder(){
+
+ ~BSONHolder() {
_magic = 18;
}
- void check(){
+ void check() {
uassert( 10212 , "holder magic value is wrong" , _magic == 17 && _obj.isValid() );
}
@@ -92,24 +92,24 @@ namespace mongo {
set<string> _removed;
bool _modified;
};
-
+
class BSONFieldIterator {
public:
- BSONFieldIterator( BSONHolder * holder ){
+ BSONFieldIterator( BSONHolder * holder ) {
set<string> added;
BSONObjIterator it( holder->_obj );
- while ( it.more() ){
+ while ( it.more() ) {
BSONElement e = it.next();
if ( holder->_removed.count( e.fieldName() ) )
continue;
_names.push_back( e.fieldName() );
added.insert( e.fieldName() );
}
-
- for ( list<string>::iterator i = holder->_extra.begin(); i != holder->_extra.end(); i++ ){
+
+ for ( list<string>::iterator i = holder->_extra.begin(); i != holder->_extra.end(); i++ ) {
if ( ! added.count( *i ) )
_names.push_back( *i );
}
@@ -117,11 +117,11 @@ namespace mongo {
_it = _names.begin();
}
- bool more(){
+ bool more() {
return _it != _names.end();
}
- string next(){
+ string next() {
string s = *_it;
_it++;
return s;
@@ -132,24 +132,24 @@ namespace mongo {
list<string>::iterator _it;
};
- BSONFieldIterator * BSONHolder::it(){
+ BSONFieldIterator * BSONHolder::it() {
return new BSONFieldIterator( this );
}
class TraverseStack {
public:
- TraverseStack(){
+ TraverseStack() {
_o = 0;
_parent = 0;
}
- TraverseStack( JSObject * o , const TraverseStack * parent ){
+ TraverseStack( JSObject * o , const TraverseStack * parent ) {
_o = o;
_parent = parent;
}
TraverseStack dive( JSObject * o ) const {
- if ( o ){
+ if ( o ) {
uassert( 13076 , (string)"recursive toObject" , ! has( o ) );
}
return TraverseStack( o , this );
@@ -158,7 +158,7 @@ namespace mongo {
int depth() const {
int d = 0;
const TraverseStack * s = _parent;
- while ( s ){
+ while ( s ) {
s = s->_parent;
d++;
}
@@ -168,12 +168,12 @@ namespace mongo {
bool isTop() const {
return _parent == 0;
}
-
+
bool has( JSObject * o ) const {
if ( ! o )
return false;
const TraverseStack * s = this;
- while ( s ){
+ while ( s ) {
if ( s->_o == o )
return true;
s = s->_parent;
@@ -187,11 +187,11 @@ namespace mongo {
class Convertor : boost::noncopyable {
public:
- Convertor( JSContext * cx ){
+ Convertor( JSContext * cx ) {
_context = cx;
}
- string toString( JSString * so ){
+ string toString( JSString * so ) {
jschar * s = JS_GetStringChars( so );
size_t srclen = JS_GetStringLength( so );
if( srclen == 0 )
@@ -205,10 +205,10 @@ namespace mongo {
// units, but experiments suggest 8bit units expected. We allocate
// enough memory that either will work.
- if ( !JS_EncodeCharacters( _context , s , srclen , dst , &len) ){
+ if ( !JS_EncodeCharacters( _context , s , srclen , dst , &len) ) {
StringBuilder temp;
temp << "Not proper UTF-16: ";
- for ( size_t i=0; i<srclen; i++ ){
+ for ( size_t i=0; i<srclen; i++ ) {
if ( i > 0 )
temp << ",";
temp << s[i];
@@ -224,7 +224,7 @@ namespace mongo {
return ss;
}
- string toString( jsval v ){
+ string toString( jsval v ) {
return toString( JS_ValueToString( _context , v ) );
}
@@ -233,27 +233,28 @@ namespace mongo {
boost::uint64_t val;
if ( hasProperty( o, "top" ) ) {
val =
- ( (boost::uint64_t)(boost::uint32_t)getNumber( o , "top" ) << 32 ) +
- ( boost::uint32_t)( getNumber( o , "bottom" ) );
- } else {
+ ( (boost::uint64_t)(boost::uint32_t)getNumber( o , "top" ) << 32 ) +
+ ( boost::uint32_t)( getNumber( o , "bottom" ) );
+ }
+ else {
val = (boost::uint64_t)(boost::int64_t) getNumber( o, "floatApprox" );
}
return val;
}
-
- double toNumber( jsval v ){
+
+ double toNumber( jsval v ) {
double d;
uassert( 10214 , "not a number" , JS_ValueToNumber( _context , v , &d ) );
return d;
}
- bool toBoolean( jsval v ){
+ bool toBoolean( jsval v ) {
JSBool b;
assert( JS_ValueToBoolean( _context, v , &b ) );
return b;
}
- OID toOID( jsval v ){
+ OID toOID( jsval v ) {
JSContext * cx = _context;
assert( JSVAL_IS_OID( v ) );
@@ -263,21 +264,21 @@ namespace mongo {
return oid;
}
- BSONObj toObject( JSObject * o , const TraverseStack& stack=TraverseStack() ){
+ BSONObj toObject( JSObject * o , const TraverseStack& stack=TraverseStack() ) {
if ( ! o )
return BSONObj();
- if ( JS_InstanceOf( _context , o , &bson_ro_class , 0 ) ){
+ if ( JS_InstanceOf( _context , o , &bson_ro_class , 0 ) ) {
BSONHolder * holder = GETHOLDER( _context , o );
assert( holder );
return holder->_obj.getOwned();
}
BSONObj orig;
- if ( JS_InstanceOf( _context , o , &bson_class , 0 ) ){
+ if ( JS_InstanceOf( _context , o , &bson_class , 0 ) ) {
BSONHolder * holder = GETHOLDER(_context,o);
assert( holder );
- if ( ! holder->_modified ){
+ if ( ! holder->_modified ) {
return holder->_obj;
}
orig = holder->_obj;
@@ -285,26 +286,26 @@ namespace mongo {
BSONObjBuilder b;
- if ( ! appendSpecialDBObject( this , b , "value" , OBJECT_TO_JSVAL( o ) , o ) ){
+ if ( ! appendSpecialDBObject( this , b , "value" , OBJECT_TO_JSVAL( o ) , o ) ) {
- if ( stack.isTop() ){
+ if ( stack.isTop() ) {
jsval theid = getProperty( o , "_id" );
- if ( ! JSVAL_IS_VOID( theid ) ){
+ if ( ! JSVAL_IS_VOID( theid ) ) {
append( b , "_id" , theid , EOO , stack.dive( o ) );
}
}
-
+
JSIdArray * properties = JS_Enumerate( _context , o );
assert( properties );
-
- for ( jsint i=0; i<properties->length; i++ ){
+
+ for ( jsint i=0; i<properties->length; i++ ) {
jsid id = properties->vector[i];
jsval nameval;
assert( JS_IdToValue( _context ,id , &nameval ) );
string name = toString( nameval );
if ( stack.isTop() && name == "_id" )
continue;
-
+
append( b , name , getProperty( o , name.c_str() ) , orig[name].type() , stack.dive( o ) );
}
@@ -314,34 +315,34 @@ namespace mongo {
return b.obj();
}
- BSONObj toObject( jsval v ){
+ BSONObj toObject( jsval v ) {
if ( JSVAL_IS_NULL( v ) ||
- JSVAL_IS_VOID( v ) )
+ JSVAL_IS_VOID( v ) )
return BSONObj();
uassert( 10215 , "not an object" , JSVAL_IS_OBJECT( v ) );
return toObject( JSVAL_TO_OBJECT( v ) );
}
- string getFunctionCode( JSFunction * func ){
+ string getFunctionCode( JSFunction * func ) {
return toString( JS_DecompileFunction( _context , func , 0 ) );
}
- string getFunctionCode( jsval v ){
+ string getFunctionCode( jsval v ) {
uassert( 10216 , "not a function" , JS_TypeOfValue( _context , v ) == JSTYPE_FUNCTION );
return getFunctionCode( JS_ValueToFunction( _context , v ) );
}
-
- void appendRegex( BSONObjBuilder& b , const string& name , string s ){
+
+ void appendRegex( BSONObjBuilder& b , const string& name , string s ) {
assert( s[0] == '/' );
s = s.substr(1);
string::size_type end = s.rfind( '/' );
b.appendRegex( name , s.substr( 0 , end ) , s.substr( end + 1 ) );
}
- void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO , const TraverseStack& stack=TraverseStack() ){
+ void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO , const TraverseStack& stack=TraverseStack() ) {
//cout << "name: " << name << "\t" << typeString( val ) << " oldType: " << oldType << endl;
- switch ( JS_TypeOfValue( _context , val ) ){
+ switch ( JS_TypeOfValue( _context , val ) ) {
case JSTYPE_VOID: b.appendUndefined( name ); break;
case JSTYPE_NULL: b.appendNull( name ); break;
@@ -359,12 +360,12 @@ namespace mongo {
case JSTYPE_OBJECT: {
JSObject * o = JSVAL_TO_OBJECT( val );
- if ( ! o || o == JSVAL_NULL ){
+ if ( ! o || o == JSVAL_NULL ) {
b.appendNull( name );
}
- else if ( ! appendSpecialDBObject( this , b , name , val , o ) ){
+ else if ( ! appendSpecialDBObject( this , b , name , val , o ) ) {
BSONObj sub = toObject( o , stack );
- if ( JS_IsArrayObject( _context , o ) ){
+ if ( JS_IsArrayObject( _context , o ) ) {
b.appendArray( name , sub );
}
else {
@@ -376,7 +377,7 @@ namespace mongo {
case JSTYPE_FUNCTION: {
string s = toString(val);
- if ( s[0] == '/' ){
+ if ( s[0] == '/' ) {
appendRegex( b , name , s );
}
else {
@@ -391,28 +392,28 @@ namespace mongo {
// ---------- to spider monkey ---------
- bool hasFunctionIdentifier( const string& code ){
+ bool hasFunctionIdentifier( const string& code ) {
if ( code.size() < 9 || code.find( "function" ) != 0 )
return false;
return code[8] == ' ' || code[8] == '(';
}
- bool isSimpleStatement( const string& code ){
+ bool isSimpleStatement( const string& code ) {
if ( hasJSReturn( code ) )
return false;
if ( code.find( ';' ) != string::npos &&
- code.find( ';' ) != code.rfind( ';' ) )
+ code.find( ';' ) != code.rfind( ';' ) )
return false;
-
+
if ( code.find( '\n') != string::npos )
return false;
if ( code.find( "for(" ) != string::npos ||
- code.find( "for (" ) != string::npos ||
- code.find( "while (" ) != string::npos ||
- code.find( "while(" ) != string::npos )
+ code.find( "for (" ) != string::npos ||
+ code.find( "while (" ) != string::npos ||
+ code.find( "while(" ) != string::npos )
return false;
return true;
@@ -420,14 +421,14 @@ namespace mongo {
void addRoot( JSFunction * f , const char * name );
- JSFunction * compileFunction( const char * code, JSObject * assoc = 0 ){
+ JSFunction * compileFunction( const char * code, JSObject * assoc = 0 ) {
const char * gcName = "unknown";
JSFunction * f = _compileFunction( code , assoc , gcName );
//addRoot( f , gcName );
return f;
}
- JSFunction * _compileFunction( const char * raw , JSObject * assoc , const char *& gcName ){
+ JSFunction * _compileFunction( const char * raw , JSObject * assoc , const char *& gcName ) {
if ( ! assoc )
assoc = JS_GetGlobalObject( _context );
@@ -441,9 +442,9 @@ namespace mongo {
fname << "_" << fnum++ << "_";
- if ( ! hasFunctionIdentifier( raw ) ){
+ if ( ! hasFunctionIdentifier( raw ) ) {
string s = raw;
- if ( isSimpleStatement( s ) ){
+ if ( isSimpleStatement( s ) ) {
s = "return " + s;
}
gcName = "cf anon";
@@ -452,23 +453,23 @@ namespace mongo {
}
string code = raw;
-
+
size_t start = code.find( '(' );
assert( start != string::npos );
-
+
fname << "_f_" << trim( code.substr( 9 , start - 9 ) );
code = code.substr( start + 1 );
size_t end = code.find( ')' );
assert( end != string::npos );
-
+
string paramString = trim( code.substr( 0 , end ) );
code = code.substr( end + 1 );
-
+
vector<string> params;
- while ( paramString.size() ){
+ while ( paramString.size() ) {
size_t c = paramString.find( ',' );
- if ( c == string::npos ){
+ if ( c == string::npos ) {
params.push_back( paramString );
break;
}
@@ -476,14 +477,14 @@ namespace mongo {
paramString = trim( paramString.substr( c + 1 ) );
paramString = trim( paramString );
}
-
+
boost::scoped_array<const char *> paramArray (new const char*[params.size()]);
for ( size_t i=0; i<params.size(); i++ )
paramArray[i] = params[i].c_str();
-
+
JSFunction * func = JS_CompileFunction( _context , assoc , fname.str().c_str() , params.size() , paramArray.get() , code.c_str() , code.size() , "nofile_b" , 0 );
- if ( ! func ){
+ if ( ! func ) {
log() << "compile failed for: " << raw << endl;
return 0;
}
@@ -492,31 +493,31 @@ namespace mongo {
}
- jsval toval( double d ){
+ jsval toval( double d ) {
jsval val;
assert( JS_NewNumberValue( _context, d , &val ) );
return val;
}
- jsval toval( const char * c ){
+ jsval toval( const char * c ) {
JSString * s = JS_NewStringCopyZ( _context , c );
if ( s )
return STRING_TO_JSVAL( s );
-
+
// possibly unicode, try manual
-
+
size_t len = strlen( c );
size_t dstlen = len * 4;
jschar * dst = (jschar*)malloc( dstlen );
-
+
JSBool res = JS_DecodeBytes( _context , c , len , dst, &dstlen );
- if ( res ){
+ if ( res ) {
s = JS_NewUCStringCopyN( _context , dst , dstlen );
}
free( dst );
- if ( ! res ){
+ if ( ! res ) {
tlog() << "decode failed. probably invalid utf-8 string [" << c << "]" << endl;
jsval v;
if ( JS_GetPendingException( _context , &v ) )
@@ -528,9 +529,9 @@ namespace mongo {
return STRING_TO_JSVAL( s );
}
- JSObject * toJSObject( const BSONObj * obj , bool readOnly=false ){
+ JSObject * toJSObject( const BSONObj * obj , bool readOnly=false ) {
static string ref = "$ref";
- if ( ref == obj->firstElement().fieldName() ){
+ if ( ref == obj->firstElement().fieldName() ) {
JSObject * o = JS_NewObject( _context , &dbref_class , NULL, NULL);
CHECKNEWOBJECT(o,_context,"toJSObject1");
assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
@@ -542,7 +543,7 @@ namespace mongo {
return o;
}
- jsval toval( const BSONObj* obj , bool readOnly=false ){
+ jsval toval( const BSONObj* obj , bool readOnly=false ) {
JSObject * o = toJSObject( obj , readOnly );
return OBJECT_TO_JSVAL( o );
}
@@ -550,7 +551,7 @@ namespace mongo {
void makeLongObj( long long n, JSObject * o ) {
boost::uint64_t val = (boost::uint64_t)n;
CHECKNEWOBJECT(o,_context,"NumberLong1");
- setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) );
+ setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) );
if ( (boost::int64_t)val != (boost::int64_t)(double)(boost::int64_t)( val ) ) {
// using 2 doubles here instead of a single double because certain double
// bit patterns represent undefined values and sm might trash them
@@ -558,16 +559,16 @@ namespace mongo {
setProperty( o , "bottom" , toval( (double)(boost::uint32_t)( val & 0x00000000ffffffff ) ) );
}
}
-
+
jsval toval( long long n ) {
JSObject * o = JS_NewObject( _context , &numberlong_class , 0 , 0 );
makeLongObj( n, o );
return OBJECT_TO_JSVAL( o );
}
-
- jsval toval( const BSONElement& e ){
- switch( e.type() ){
+ jsval toval( const BSONElement& e ) {
+
+ switch( e.type() ) {
case EOO:
case jstNULL:
case Undefined:
@@ -580,15 +581,15 @@ namespace mongo {
return toval( e.valuestr() );
case Bool:
return e.boolean() ? JSVAL_TRUE : JSVAL_FALSE;
- case Object:{
+ case Object: {
BSONObj embed = e.embeddedObject().getOwned();
return toval( &embed );
}
- case Array:{
+ case Array: {
BSONObj embed = e.embeddedObject().getOwned();
- if ( embed.isEmpty() ){
+ if ( embed.isEmpty() ) {
return OBJECT_TO_JSVAL( JS_NewArrayObject( _context , 0 , 0 ) );
}
@@ -599,31 +600,31 @@ namespace mongo {
jsval myarray = OBJECT_TO_JSVAL( array );
- for ( int i=0; i<n; i++ ){
+ for ( int i=0; i<n; i++ ) {
jsval v = toval( embed[i] );
assert( JS_SetElement( _context , array , i , &v ) );
}
return myarray;
}
- case jstOID:{
+ case jstOID: {
OID oid = e.__oid();
JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 );
CHECKNEWOBJECT(o,_context,"jstOID");
setProperty( o , "str" , toval( oid.str().c_str() ) );
return OBJECT_TO_JSVAL( o );
}
- case RegEx:{
+ case RegEx: {
const char * flags = e.regexFlags();
uintN flagNumber = 0;
- while ( *flags ){
- switch ( *flags ){
+ while ( *flags ) {
+ switch ( *flags ) {
case 'g': flagNumber |= JSREG_GLOB; break;
case 'i': flagNumber |= JSREG_FOLD; break;
case 'm': flagNumber |= JSREG_MULTILINE; break;
//case 'y': flagNumber |= JSREG_STICKY; break;
-
- default:
+
+ default:
log() << "warning: unknown regex flag:" << *flags << endl;
}
flags++;
@@ -633,17 +634,17 @@ namespace mongo {
assert( r );
return OBJECT_TO_JSVAL( r );
}
- case Code:{
+ case Code: {
JSFunction * func = compileFunction( e.valuestr() );
if ( func )
return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
return JSVAL_NULL;
}
- case CodeWScope:{
+ case CodeWScope: {
JSFunction * func = compileFunction( e.codeWScopeCode() );
BSONObj extraScope = e.codeWScopeObject();
- if ( ! extraScope.isEmpty() ){
+ if ( ! extraScope.isEmpty() ) {
log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
}
@@ -680,7 +681,7 @@ namespace mongo {
setProperty( o , "id" , OBJECT_TO_JSVAL( oid ) );
return OBJECT_TO_JSVAL( o );
}
- case BinData:{
+ case BinData: {
JSObject * o = JS_NewObject( _context , &bindata_class , 0 , 0 );
CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
int len;
@@ -701,55 +702,55 @@ namespace mongo {
// ------- object helpers ------
- JSObject * getJSObject( JSObject * o , const char * name ){
+ JSObject * getJSObject( JSObject * o , const char * name ) {
jsval v;
assert( JS_GetProperty( _context , o , name , &v ) );
return JSVAL_TO_OBJECT( v );
}
- JSObject * getGlobalObject( const char * name ){
+ JSObject * getGlobalObject( const char * name ) {
return getJSObject( JS_GetGlobalObject( _context ) , name );
}
- JSObject * getGlobalPrototype( const char * name ){
+ JSObject * getGlobalPrototype( const char * name ) {
return getJSObject( getGlobalObject( name ) , "prototype" );
}
- bool hasProperty( JSObject * o , const char * name ){
+ bool hasProperty( JSObject * o , const char * name ) {
JSBool res;
assert( JS_HasProperty( _context , o , name , & res ) );
return res;
}
- jsval getProperty( JSObject * o , const char * field ){
+ jsval getProperty( JSObject * o , const char * field ) {
uassert( 10219 , "object passed to getPropery is null" , o );
jsval v;
assert( JS_GetProperty( _context , o , field , &v ) );
return v;
}
- void setProperty( JSObject * o , const char * field , jsval v ){
+ void setProperty( JSObject * o , const char * field , jsval v ) {
assert( JS_SetProperty( _context , o , field , &v ) );
}
- string typeString( jsval v ){
+ string typeString( jsval v ) {
JSType t = JS_TypeOfValue( _context , v );
return JS_GetTypeName( _context , t );
}
- bool getBoolean( JSObject * o , const char * field ){
+ bool getBoolean( JSObject * o , const char * field ) {
return toBoolean( getProperty( o , field ) );
}
- double getNumber( JSObject * o , const char * field ){
+ double getNumber( JSObject * o , const char * field ) {
return toNumber( getProperty( o , field ) );
}
- string getString( JSObject * o , const char * field ){
+ string getString( JSObject * o , const char * field ) {
return toString( getProperty( o , field ) );
}
- JSClass * getClass( JSObject * o , const char * field ){
+ JSClass * getClass( JSObject * o , const char * field ) {
jsval v;
assert( JS_GetProperty( _context , o , field , &v ) );
if ( ! JSVAL_IS_OBJECT( v ) )
@@ -763,25 +764,25 @@ namespace mongo {
};
- void bson_finalize( JSContext * cx , JSObject * obj ){
+ void bson_finalize( JSContext * cx , JSObject * obj ) {
BSONHolder * o = GETHOLDER( cx , obj );
- if ( o ){
+ if ( o ) {
delete o;
assert( JS_SetPrivate( cx , obj , 0 ) );
}
}
- JSBool bson_enumerate( JSContext *cx, JSObject *obj, JSIterateOp enum_op, jsval *statep, jsid *idp ){
+ JSBool bson_enumerate( JSContext *cx, JSObject *obj, JSIterateOp enum_op, jsval *statep, jsid *idp ) {
BSONHolder * o = GETHOLDER( cx , obj );
-
- if ( enum_op == JSENUMERATE_INIT ){
- if ( o ){
+
+ if ( enum_op == JSENUMERATE_INIT ) {
+ if ( o ) {
BSONFieldIterator * it = o->it();
*statep = PRIVATE_TO_JSVAL( it );
}
else {
- *statep = 0;
+ *statep = 0;
}
if ( idp )
*idp = JSVAL_ZERO;
@@ -789,13 +790,13 @@ namespace mongo {
}
BSONFieldIterator * it = (BSONFieldIterator*)JSVAL_TO_PRIVATE( *statep );
- if ( ! it ){
+ if ( ! it ) {
*statep = 0;
return JS_TRUE;
}
- if ( enum_op == JSENUMERATE_NEXT ){
- if ( it->more() ){
+ if ( enum_op == JSENUMERATE_NEXT ) {
+ if ( it->more() ) {
string name = it->next();
Convertor c(cx);
assert( JS_ValueToId( cx , c.toval( name.c_str() ) , idp ) );
@@ -807,7 +808,7 @@ namespace mongo {
return JS_TRUE;
}
- if ( enum_op == JSENUMERATE_DESTROY ){
+ if ( enum_op == JSENUMERATE_DESTROY ) {
if ( it )
delete it;
return JS_TRUE;
@@ -817,9 +818,9 @@ namespace mongo {
return JS_FALSE;
}
- JSBool noaccess( JSContext *cx, JSObject *obj, jsval idval, jsval *vp){
+ JSBool noaccess( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
BSONHolder * holder = GETHOLDER( cx , obj );
- if ( ! holder ){
+ if ( ! holder ) {
// in init code still
return JS_TRUE;
}
@@ -836,7 +837,7 @@ namespace mongo {
JSCLASS_NO_OPTIONAL_MEMBERS
};
- JSBool bson_cons( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool bson_cons( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
cerr << "bson_cons : shouldn't be here!" << endl;
JS_ReportError( cx , "can't construct bson object" );
return JS_FALSE;
@@ -845,26 +846,26 @@ namespace mongo {
JSFunctionSpec bson_functions[] = {
{ 0 }
};
-
- JSBool bson_add_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp){
+
+ JSBool bson_add_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
BSONHolder * holder = GETHOLDER( cx , obj );
- if ( ! holder ){
+ if ( ! holder ) {
// static init
return JS_TRUE;
}
- if ( ! holder->_inResolve ){
+ if ( ! holder->_inResolve ) {
Convertor c(cx);
string name = c.toString( idval );
- if ( holder->_obj[name].eoo() ){
+ if ( holder->_obj[name].eoo() ) {
holder->_extra.push_back( name );
}
holder->_modified = true;
}
return JS_TRUE;
}
-
- JSBool mark_modified( JSContext *cx, JSObject *obj, jsval idval, jsval *vp){
+
+ JSBool mark_modified( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
Convertor c(cx);
BSONHolder * holder = GETHOLDER( cx , obj );
if ( !holder ) // needed when we're messing with DBRef.prototype
@@ -875,8 +876,8 @@ namespace mongo {
holder->_removed.erase( c.toString( idval ) );
return JS_TRUE;
}
-
- JSBool mark_modified_remove( JSContext *cx, JSObject *obj, jsval idval, jsval *vp){
+
+ JSBool mark_modified_remove( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
Convertor c(cx);
BSONHolder * holder = GETHOLDER( cx , obj );
if ( holder->_inResolve )
@@ -902,10 +903,10 @@ namespace mongo {
// --- global helpers ---
- JSBool native_print( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ){
+ JSBool native_print( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
stringstream ss;
Convertor c( cx );
- for ( uintN i=0; i<argc; i++ ){
+ for ( uintN i=0; i<argc; i++ ) {
if ( i > 0 )
ss << " ";
ss << c.toString( argv[i] );
@@ -915,32 +916,32 @@ namespace mongo {
return JS_TRUE;
}
- JSBool native_helper( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ){
+ JSBool native_helper( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
Convertor c(cx);
-
+
NativeFunction func = (NativeFunction)((long long)c.getNumber( obj , "x" ) );
assert( func );
-
+
BSONObj a;
- if ( argc > 0 ){
+ if ( argc > 0 ) {
BSONObjBuilder args;
- for ( uintN i=0; i<argc; i++ ){
+ for ( uintN i=0; i<argc; i++ ) {
c.append( args , args.numStr( i ) , argv[i] );
}
-
+
a = args.obj();
}
-
+
BSONObj out;
try {
out = func( a );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
JS_ReportError( cx , e.what() );
return JS_FALSE;
}
-
- if ( out.isEmpty() ){
+
+ if ( out.isEmpty() ) {
*rval = JSVAL_VOID;
}
else {
@@ -952,7 +953,7 @@ namespace mongo {
JSBool native_load( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval );
- JSBool native_gc( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ){
+ JSBool native_gc( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
JS_GC( cx );
return JS_TRUE;
}
@@ -968,16 +969,16 @@ namespace mongo {
// ----END global helpers ----
// Object helpers
-
- JSBool bson_get_size(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+
+ JSBool bson_get_size(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
if ( argc != 1 || !JSVAL_IS_OBJECT( argv[ 0 ] ) ) {
JS_ReportError( cx , "bsonsize requires one valid object" );
return JS_FALSE;
}
Convertor c(cx);
-
- if ( argv[0] == JSVAL_VOID || argv[0] == JSVAL_NULL ){
+
+ if ( argv[0] == JSVAL_VOID || argv[0] == JSVAL_NULL ) {
*rval = c.toval( 0.0 );
return JS_TRUE;
}
@@ -987,9 +988,9 @@ namespace mongo {
double size = 0;
if ( JS_InstanceOf( cx , o , &bson_ro_class , 0 ) ||
- JS_InstanceOf( cx , o , &bson_class , 0 ) ){
+ JS_InstanceOf( cx , o , &bson_class , 0 ) ) {
BSONHolder * h = GETHOLDER( cx , o );
- if ( h ){
+ if ( h ) {
size = h->_obj.objsize();
}
}
@@ -997,36 +998,36 @@ namespace mongo {
BSONObj temp = c.toObject( o );
size = temp.objsize();
}
-
+
*rval = c.toval( size );
- return JS_TRUE;
+ return JS_TRUE;
}
-
+
JSFunctionSpec objectHelpers[] = {
- { "bsonsize" , &bson_get_size , 1 , 0 , 0 } ,
- { 0 , 0 , 0 , 0 , 0 }
+ { "bsonsize" , &bson_get_size , 1 , 0 , 0 } ,
+ { 0 , 0 , 0 , 0 , 0 }
};
-
+
// end Object helpers
- JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ){
+ JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
assert( JS_EnterLocalRootScope( cx ) );
Convertor c( cx );
BSONHolder * holder = GETHOLDER( cx , obj );
- if ( ! holder ){
+ if ( ! holder ) {
// static init
*objp = 0;
JS_LeaveLocalRootScope( cx );
return JS_TRUE;
}
holder->check();
-
+
string s = c.toString( id );
BSONElement e = holder->_obj[ s.c_str() ];
-
- if ( e.type() == EOO || holder->_removed.count( s ) ){
+
+ if ( e.type() == EOO || holder->_removed.count( s ) ) {
*objp = 0;
JS_LeaveLocalRootScope( cx );
return JS_TRUE;
@@ -1046,12 +1047,12 @@ namespace mongo {
holder->_inResolve = true;
assert( JS_SetProperty( cx , obj , s.c_str() , &val ) );
holder->_inResolve = false;
-
- if ( val != JSVAL_NULL && val != JSVAL_VOID && JSVAL_IS_OBJECT( val ) ){
+
+ if ( val != JSVAL_NULL && val != JSVAL_VOID && JSVAL_IS_OBJECT( val ) ) {
// TODO: this is a hack to get around sub objects being modified
JSObject * oo = JSVAL_TO_OBJECT( val );
- if ( JS_InstanceOf( cx , oo , &bson_class , 0 ) ||
- JS_IsArrayObject( cx , oo ) ){
+ if ( JS_InstanceOf( cx , oo , &bson_class , 0 ) ||
+ JS_IsArrayObject( cx , oo ) ) {
holder->_modified = true;
}
}
@@ -1067,15 +1068,15 @@ namespace mongo {
class SMEngine : public ScriptEngine {
public:
- SMEngine(){
+ SMEngine() {
#ifdef SM18
JS_SetCStringsAreUTF8();
#endif
_runtime = JS_NewRuntime(8L * 1024L * 1024L);
uassert( 10221 , "JS_NewRuntime failed" , _runtime );
-
- if ( ! utf8Ok() ){
+
+ if ( ! utf8Ok() ) {
log() << "*** warning: spider monkey build without utf8 support. consider rebuilding with utf8 support" << endl;
}
@@ -1084,7 +1085,7 @@ namespace mongo {
uassert( 10222 , "assert not being executed" , x == 1 );
}
- ~SMEngine(){
+ ~SMEngine() {
JS_DestroyRuntime( _runtime );
JS_ShutDown();
}
@@ -1109,7 +1110,7 @@ namespace mongo {
SMEngine * globalSMEngine;
- void ScriptEngine::setup(){
+ void ScriptEngine::setup() {
globalSMEngine = new SMEngine();
globalScriptEngine = globalSMEngine;
}
@@ -1118,11 +1119,11 @@ namespace mongo {
// ------ scope ------
- JSBool no_gc(JSContext *cx, JSGCStatus status){
+ JSBool no_gc(JSContext *cx, JSGCStatus status) {
return JS_FALSE;
}
- JSBool yes_gc(JSContext *cx, JSGCStatus status){
+ JSBool yes_gc(JSContext *cx, JSGCStatus status) {
return JS_TRUE;
}
@@ -1146,64 +1147,64 @@ namespace mongo {
JS_SetOptions( _context , JS_GetOptions( _context ) | JSOPTION_VAROBJFIX );
JS_DefineFunctions( _context , _global , globalHelpers );
-
+
JS_DefineFunctions( _context , _convertor->getGlobalObject( "Object" ), objectHelpers );
//JS_SetGCCallback( _context , no_gc ); // this is useful for seeing if something is a gc problem
_postCreateHacks();
}
-
- ~SMScope(){
+
+ ~SMScope() {
smlock;
uassert( 10223 , "deleted SMScope twice?" , _convertor );
- for ( list<void*>::iterator i=_roots.begin(); i != _roots.end(); i++ ){
+ for ( list<void*>::iterator i=_roots.begin(); i != _roots.end(); i++ ) {
JS_RemoveRoot( _context , *i );
}
_roots.clear();
-
- if ( _this ){
+
+ if ( _this ) {
JS_RemoveRoot( _context , &_this );
_this = 0;
}
- if ( _convertor ){
+ if ( _convertor ) {
delete _convertor;
_convertor = 0;
}
-
- if ( _context ){
+
+ if ( _context ) {
JS_DestroyContext( _context );
_context = 0;
}
}
-
- void reset(){
+
+ void reset() {
smlock;
assert( _convertor );
return;
- if ( _this ){
+ if ( _this ) {
JS_RemoveRoot( _context , &_this );
_this = 0;
}
currentScope.reset( this );
_error = "";
}
-
- void addRoot( void * root , const char * name ){
+
+ void addRoot( void * root , const char * name ) {
JS_AddNamedRoot( _context , root , name );
_roots.push_back( root );
}
- void init( const BSONObj * data ){
+ void init( const BSONObj * data ) {
smlock;
if ( ! data )
return;
BSONObjIterator i( *data );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
_convertor->setProperty( _global , e.fieldName() , _convertor->toval( e ) );
_initFieldNames.insert( e.fieldName() );
@@ -1211,7 +1212,7 @@ namespace mongo {
}
- void externalSetup(){
+ void externalSetup() {
smlock;
uassert( 10224 , "already local connected" , ! _localConnect );
if ( _externalSetup )
@@ -1220,20 +1221,20 @@ namespace mongo {
_externalSetup = true;
}
- void localConnect( const char * dbName ){
+ void localConnect( const char * dbName ) {
{
smlock;
uassert( 10225 , "already setup for external db" , ! _externalSetup );
- if ( _localConnect ){
+ if ( _localConnect ) {
uassert( 10226 , "connected to different db" , _localDBName == dbName );
return;
}
-
+
initMongoJS( this , _context , _global , true );
-
+
exec( "_mongo = new Mongo();" );
exec( ((string)"db = _mongo.getDB( \"" + dbName + "\" ); ").c_str() );
-
+
_localConnect = true;
_localDBName = dbName;
}
@@ -1241,14 +1242,14 @@ namespace mongo {
}
// ----- getters ------
- double getNumber( const char *field ){
+ double getNumber( const char *field ) {
smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
return _convertor->toNumber( val );
}
- string getString( const char *field ){
+ string getString( const char *field ) {
smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
@@ -1256,27 +1257,27 @@ namespace mongo {
return _convertor->toString( s );
}
- bool getBoolean( const char *field ){
+ bool getBoolean( const char *field ) {
smlock;
return _convertor->getBoolean( _global , field );
}
- BSONObj getObject( const char *field ){
+ BSONObj getObject( const char *field ) {
smlock;
return _convertor->toObject( _convertor->getProperty( _global , field ) );
}
- JSObject * getJSObject( const char * field ){
+ JSObject * getJSObject( const char * field ) {
smlock;
return _convertor->getJSObject( _global , field );
}
- int type( const char *field ){
+ int type( const char *field ) {
smlock;
jsval val;
assert( JS_GetProperty( _context , _global , field , &val ) );
- switch ( JS_TypeOfValue( _context , val ) ){
+ switch ( JS_TypeOfValue( _context , val ) ) {
case JSTYPE_VOID: return Undefined;
case JSTYPE_NULL: return jstNULL;
case JSTYPE_OBJECT: {
@@ -1301,50 +1302,50 @@ namespace mongo {
// ----- setters ------
- void setElement( const char *field , const BSONElement& val ){
+ void setElement( const char *field , const BSONElement& val ) {
smlock;
jsval v = _convertor->toval( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
- void setNumber( const char *field , double val ){
+ void setNumber( const char *field , double val ) {
smlock;
jsval v = _convertor->toval( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
- void setString( const char *field , const char * val ){
+ void setString( const char *field , const char * val ) {
smlock;
jsval v = _convertor->toval( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
- void setObject( const char *field , const BSONObj& obj , bool readOnly ){
+ void setObject( const char *field , const BSONObj& obj , bool readOnly ) {
smlock;
jsval v = _convertor->toval( &obj , readOnly );
JS_SetProperty( _context , _global , field , &v );
}
- void setBoolean( const char *field , bool val ){
+ void setBoolean( const char *field , bool val ) {
smlock;
jsval v = BOOLEAN_TO_JSVAL( val );
assert( JS_SetProperty( _context , _global , field , &v ) );
}
- void setThis( const BSONObj * obj ){
+ void setThis( const BSONObj * obj ) {
smlock;
- if ( _this ){
+ if ( _this ) {
JS_RemoveRoot( _context , &_this );
_this = 0;
}
-
- if ( obj ){
+
+ if ( obj ) {
_this = _convertor->toJSObject( obj );
JS_AddNamedRoot( _context , &_this , "scope this" );
}
}
- void rename( const char * from , const char * to ){
+ void rename( const char * from , const char * to ) {
smlock;
jsval v;
assert( JS_GetProperty( _context , _global , from , &v ) );
@@ -1355,7 +1356,7 @@ namespace mongo {
// ---- functions -----
- ScriptingFunction _createFunction( const char * code ){
+ ScriptingFunction _createFunction( const char * code ) {
smlock;
precall();
return (ScriptingFunction)_convertor->compileFunction( code );
@@ -1367,10 +1368,10 @@ namespace mongo {
int count;
};
- // should not generate exceptions, as those can be caught in
+ // should not generate exceptions, as those can be caught in
// javascript code; returning false without an exception exits
// immediately
- static JSBool _interrupt( JSContext *cx ){
+ static JSBool _interrupt( JSContext *cx ) {
TimeoutSpec &spec = *(TimeoutSpec *)( JS_GetContextPrivate( cx ) );
if ( ++spec.count % 1000 != 0 )
return JS_TRUE;
@@ -1388,8 +1389,8 @@ namespace mongo {
return JS_FALSE;
}
-
- static JSBool interrupt( JSContext *cx, JSScript *script ){
+
+ static JSBool interrupt( JSContext *cx, JSScript *script ) {
return _interrupt( cx );
}
@@ -1420,12 +1421,12 @@ namespace mongo {
}
}
- void precall(){
+ void precall() {
_error = "";
currentScope.reset( this );
}
- bool exec( const StringData& code , const string& name = "(anon)" , bool printResult = false , bool reportError = true , bool assertOnError = true, int timeoutMs = 0 ){
+ bool exec( const StringData& code , const string& name = "(anon)" , bool printResult = false , bool reportError = true , bool assertOnError = true, int timeoutMs = 0 ) {
smlock;
precall();
@@ -1435,9 +1436,9 @@ namespace mongo {
JSBool worked = JS_EvaluateScript( _context , _global , code.data() , code.size() , name.c_str() , 1 , &ret );
uninstallInterrupt( timeoutMs );
- if ( ! worked && _error.size() == 0 ){
+ if ( ! worked && _error.size() == 0 ) {
jsval v;
- if ( JS_GetPendingException( _context , &v ) ){
+ if ( JS_GetPendingException( _context , &v ) ) {
_error = _convertor->toString( v );
if ( reportError )
cout << _error << endl;
@@ -1447,7 +1448,7 @@ namespace mongo {
if ( assertOnError )
uassert( 10228 , name + " exec failed" , worked );
- if ( reportError && ! _error.empty() ){
+ if ( reportError && ! _error.empty() ) {
// cout << "exec error: " << _error << endl;
// already printed in reportError, so... TODO
}
@@ -1460,23 +1461,23 @@ namespace mongo {
return worked;
}
-
- int invoke( JSFunction * func , const BSONObj& args, int timeoutMs , bool ignoreReturn ){
+
+ int invoke( JSFunction * func , const BSONObj& args, int timeoutMs , bool ignoreReturn ) {
smlock;
precall();
assert( JS_EnterLocalRootScope( _context ) );
-
+
int nargs = args.nFields();
scoped_array<jsval> smargsPtr( new jsval[nargs] );
- if ( nargs ){
+ if ( nargs ) {
BSONObjIterator it( args );
- for ( int i=0; i<nargs; i++ ){
+ for ( int i=0; i<nargs; i++ ) {
smargsPtr[i] = _convertor->toval( it.next() );
}
}
- if ( args.isEmpty() ){
+ if ( args.isEmpty() ) {
_convertor->setProperty( _global , "args" , JSVAL_NULL );
}
else {
@@ -1493,27 +1494,27 @@ namespace mongo {
if ( !ret ) {
return -3;
}
-
- if ( ! ignoreReturn ){
+
+ if ( ! ignoreReturn ) {
assert( JS_SetProperty( _context , _global , "return" , &rval ) );
}
return 0;
}
- int invoke( ScriptingFunction funcAddr , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = 0 ){
+ int invoke( ScriptingFunction funcAddr , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = 0 ) {
return invoke( (JSFunction*)funcAddr , args , timeoutMs , ignoreReturn );
}
- void gotError( string s ){
+ void gotError( string s ) {
_error = s;
}
- string getError(){
+ string getError() {
return _error;
}
- void injectNative( const char *field, NativeFunction func ){
+ void injectNative( const char *field, NativeFunction func ) {
smlock;
string name = field;
_convertor->setProperty( _global , (name + "_").c_str() , _convertor->toval( (double)(long long)func ) );
@@ -1524,16 +1525,16 @@ namespace mongo {
exec( code.str() );
}
- virtual void gc(){
+ virtual void gc() {
smlock;
JS_GC( _context );
}
JSContext *SavedContext() const { return _context; }
-
+
private:
- void _postCreateHacks(){
+ void _postCreateHacks() {
#ifdef XULRUNNER
exec( "__x__ = new Date(1);" );
globalSMEngine->_dateClass = _convertor->getClass( _global , "__x__" );
@@ -1541,7 +1542,7 @@ namespace mongo {
globalSMEngine->_regexClass = _convertor->getClass( _global , "__x__" );
#endif
}
-
+
JSContext * _context;
Convertor * _convertor;
@@ -1553,41 +1554,41 @@ namespace mongo {
bool _externalSetup;
bool _localConnect;
-
+
set<string> _initFieldNames;
-
+
};
/* used to make the logging not overly chatty in the mongo shell. */
extern bool isShell;
- void errorReporter( JSContext *cx, const char *message, JSErrorReport *report ){
+ void errorReporter( JSContext *cx, const char *message, JSErrorReport *report ) {
stringstream ss;
- if( !isShell )
+ if( !isShell )
ss << "JS Error: ";
ss << message;
- if ( report && report->filename ){
+ if ( report && report->filename ) {
ss << " " << report->filename << ":" << report->lineno;
}
tlog() << ss.str() << endl;
- if ( currentScope.get() ){
+ if ( currentScope.get() ) {
currentScope->gotError( ss.str() );
}
}
- JSBool native_load( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ){
+ JSBool native_load( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
Convertor c(cx);
Scope * s = currentScope.get();
- for ( uintN i=0; i<argc; i++ ){
+ for ( uintN i=0; i<argc; i++ ) {
string filename = c.toString( argv[i] );
//cout << "load [" << filename << "]" << endl;
- if ( ! s->execFile( filename , false , true , false ) ){
+ if ( ! s->execFile( filename , false , true , false ) ) {
JS_ReportError( cx , ((string)"error loading js file: " + filename ).c_str() );
return JS_FALSE;
}
@@ -1598,7 +1599,7 @@ namespace mongo {
- void SMEngine::runTest(){
+ void SMEngine::runTest() {
SMScope s;
s.localConnect( "foo" );
@@ -1628,17 +1629,17 @@ namespace mongo {
}
- Scope * SMEngine::createScope(){
+ Scope * SMEngine::createScope() {
return new SMScope();
}
- void Convertor::addRoot( JSFunction * f , const char * name ){
+ void Convertor::addRoot( JSFunction * f , const char * name ) {
if ( ! f )
return;
SMScope * scope = currentScope.get();
uassert( 10229 , "need a scope" , scope );
-
+
JSObject * o = JS_GetFunctionObject( f );
assert( o );
scope->addRoot( &o , name );
diff --git a/scripting/engine_spidermonkey.h b/scripting/engine_spidermonkey.h
index 4617b5d878f..3ee74953444 100644
--- a/scripting/engine_spidermonkey.h
+++ b/scripting/engine_spidermonkey.h
@@ -37,7 +37,7 @@
#include "jstypes.h"
#undef JS_PUBLIC_API
#undef JS_PUBLIC_DATA
-#define JS_PUBLIC_API(t) t __cdecl
+#define JS_PUBLIC_API(t) t __cdecl
#define JS_PUBLIC_DATA(t) t
#endif
@@ -64,7 +64,7 @@
#define JSCLASS_GLOBAL_FLAGS 0
-JSBool JS_CStringsAreUTF8(){
+JSBool JS_CStringsAreUTF8() {
return false;
}
@@ -85,7 +85,7 @@ namespace mongo {
class SMScope;
class Convertor;
-
+
extern JSClass bson_class;
extern JSClass bson_ro_class;
@@ -99,10 +99,10 @@ namespace mongo {
extern JSClass maxkey_class;
// internal things
- void dontDeleteScope( SMScope * s ){}
+ void dontDeleteScope( SMScope * s ) {}
void errorReporter( JSContext *cx, const char *message, JSErrorReport *report );
extern boost::thread_specific_ptr<SMScope> currentScope;
-
+
// bson
JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp );
@@ -112,14 +112,14 @@ namespace mongo {
bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , jsval val , JSObject * o );
#define JSVAL_IS_OID(v) ( JSVAL_IS_OBJECT( v ) && JS_InstanceOf( cx , JSVAL_TO_OBJECT( v ) , &object_id_class , 0 ) )
-
+
bool isDate( JSContext * cx , JSObject * o );
// JS private data must be 2byte aligned, so we use a holder to refer to an unaligned pointer.
struct BinDataHolder {
BinDataHolder( const char *c, int copyLen = -1 ) :
- c_( const_cast< char * >( c ) ),
- iFree_( copyLen != -1 ) {
+ c_( const_cast< char * >( c ) ),
+ iFree_( copyLen != -1 ) {
if ( copyLen != -1 ) {
c_ = (char*)malloc( copyLen );
memcpy( c_, c, copyLen );
diff --git a/scripting/engine_v8.cpp b/scripting/engine_v8.cpp
index df9dcc9ffe3..82213b78c6c 100644
--- a/scripting/engine_v8.cpp
+++ b/scripting/engine_v8.cpp
@@ -1,4 +1,4 @@
-//engine_v8.cpp
+//engine_v8.cpp
/* Copyright 2009 10gen Inc.
*
@@ -27,20 +27,20 @@ namespace mongo {
// guarded by v8 mutex
map< unsigned, int > __interruptSpecToThreadId;
-
+
// --- engine ---
V8ScriptEngine::V8ScriptEngine() {}
-
- V8ScriptEngine::~V8ScriptEngine(){
+
+ V8ScriptEngine::~V8ScriptEngine() {
}
- void ScriptEngine::setup(){
- if ( !globalScriptEngine ){
+ void ScriptEngine::setup() {
+ if ( !globalScriptEngine ) {
globalScriptEngine = new V8ScriptEngine();
}
}
-
+
void V8ScriptEngine::interrupt( unsigned opSpec ) {
v8::Locker l;
if ( __interruptSpecToThreadId.count( opSpec ) ) {
@@ -54,18 +54,18 @@ namespace mongo {
toKill.push_back( i->second );
}
for( vector< int >::const_iterator i = toKill.begin(); i != toKill.end(); ++i ) {
- V8::TerminateExecution( *i );
+ V8::TerminateExecution( *i );
}
}
// --- scope ---
-
- V8Scope::V8Scope( V8ScriptEngine * engine )
- : _engine( engine ) ,
- _connectState( NOT ){
+
+ V8Scope::V8Scope( V8ScriptEngine * engine )
+ : _engine( engine ) ,
+ _connectState( NOT ) {
V8Lock l;
- HandleScope handleScope;
+ HandleScope handleScope;
_context = Context::New();
Context::Scope context_scope( _context );
_global = Persistent< v8::Object >::New( _context->Global() );
@@ -77,18 +77,18 @@ namespace mongo {
_global->Set(v8::String::New("load"),
v8::FunctionTemplate::New( v8Callback< loadCallback >, v8::External::New(this))->GetFunction() );
-
+
_wrapper = Persistent< v8::Function >::New( getObjectWrapperTemplate()->GetFunction() );
-
+
_global->Set(v8::String::New("gc"), newV8Function< GCV8 >()->GetFunction() );
installDBTypes( _global );
}
- V8Scope::~V8Scope(){
+ V8Scope::~V8Scope() {
V8Lock l;
- Context::Scope context_scope( _context );
+ Context::Scope context_scope( _context );
_wrapper.Dispose();
_this.Dispose();
for( unsigned i = 0; i < _funcs.size(); ++i )
@@ -113,10 +113,12 @@ namespace mongo {
BSONObj ret;
try {
ret = function( nativeArgs );
- } catch( const std::exception &e ) {
+ }
+ catch( const std::exception &e ) {
return v8::ThrowException(v8::String::New(e.what()));
- } catch( ... ) {
- return v8::ThrowException(v8::String::New("unknown exception"));
+ }
+ catch( ... ) {
+ return v8::ThrowException(v8::String::New("unknown exception"));
}
return handle_scope.Close( mongoToV8Element( ret.firstElement() ) );
}
@@ -140,46 +142,46 @@ namespace mongo {
// ---- global stuff ----
- void V8Scope::init( const BSONObj * data ){
+ void V8Scope::init( const BSONObj * data ) {
V8Lock l;
if ( ! data )
return;
-
+
BSONObjIterator i( *data );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
setElement( e.fieldName() , e );
}
}
-
- void V8Scope::setNumber( const char * field , double val ){
+
+ void V8Scope::setNumber( const char * field , double val ) {
V8_SIMPLE_HEADER
_global->Set( v8::String::New( field ) , v8::Number::New( val ) );
}
- void V8Scope::setString( const char * field , const char * val ){
+ void V8Scope::setString( const char * field , const char * val ) {
V8_SIMPLE_HEADER
_global->Set( v8::String::New( field ) , v8::String::New( val ) );
}
- void V8Scope::setBoolean( const char * field , bool val ){
+ void V8Scope::setBoolean( const char * field , bool val ) {
V8_SIMPLE_HEADER
_global->Set( v8::String::New( field ) , v8::Boolean::New( val ) );
}
- void V8Scope::setElement( const char *field , const BSONElement& e ){
+ void V8Scope::setElement( const char *field , const BSONElement& e ) {
V8_SIMPLE_HEADER
_global->Set( v8::String::New( field ) , mongoToV8Element( e ) );
}
- void V8Scope::setObject( const char *field , const BSONObj& obj , bool readOnly){
+ void V8Scope::setObject( const char *field , const BSONObj& obj , bool readOnly) {
V8_SIMPLE_HEADER
// Set() accepts a ReadOnly parameter, but this just prevents the field itself
// from being overwritten and doesn't protect the object stored in 'field'.
_global->Set( v8::String::New( field ) , mongoToV8( obj, false, readOnly) );
}
- int V8Scope::type( const char *field ){
+ int V8Scope::type( const char *field ) {
V8_SIMPLE_HEADER
Handle<Value> v = get( field );
if ( v->IsNull() )
@@ -198,7 +200,7 @@ namespace mongo {
return NumberInt;
if ( v->IsNumber() )
return NumberDouble;
- if ( v->IsExternal() ){
+ if ( v->IsExternal() ) {
uassert( 10230 , "can't handle external yet" , 0 );
return -1;
}
@@ -210,36 +212,36 @@ namespace mongo {
throw UserException( 12509, (string)"don't know what this is: " + field );
}
- v8::Handle<v8::Value> V8Scope::get( const char * field ){
+ v8::Handle<v8::Value> V8Scope::get( const char * field ) {
return _global->Get( v8::String::New( field ) );
}
- double V8Scope::getNumber( const char *field ){
+ double V8Scope::getNumber( const char *field ) {
V8_SIMPLE_HEADER
return get( field )->ToNumber()->Value();
}
- int V8Scope::getNumberInt( const char *field ){
+ int V8Scope::getNumberInt( const char *field ) {
V8_SIMPLE_HEADER
return get( field )->ToInt32()->Value();
}
- long long V8Scope::getNumberLongLong( const char *field ){
+ long long V8Scope::getNumberLongLong( const char *field ) {
V8_SIMPLE_HEADER
return get( field )->ToInteger()->Value();
}
- string V8Scope::getString( const char *field ){
+ string V8Scope::getString( const char *field ) {
V8_SIMPLE_HEADER
return toSTLString( get( field ) );
}
- bool V8Scope::getBoolean( const char *field ){
+ bool V8Scope::getBoolean( const char *field ) {
V8_SIMPLE_HEADER
return get( field )->ToBoolean()->Value();
}
-
- BSONObj V8Scope::getObject( const char * field ){
+
+ BSONObj V8Scope::getObject( const char * field ) {
V8_SIMPLE_HEADER
Handle<Value> v = get( field );
if ( v->IsNull() || v->IsUndefined() )
@@ -247,28 +249,28 @@ namespace mongo {
uassert( 10231 , "not an object" , v->IsObject() );
return v8ToMongo( v->ToObject() );
}
-
+
// --- functions -----
- bool hasFunctionIdentifier( const string& code ){
+ bool hasFunctionIdentifier( const string& code ) {
if ( code.size() < 9 || code.find( "function" ) != 0 )
return false;
-
+
return code[8] == ' ' || code[8] == '(';
}
-
- Local< v8::Function > V8Scope::__createFunction( const char * raw ){
+
+ Local< v8::Function > V8Scope::__createFunction( const char * raw ) {
raw = jsSkipWhiteSpace( raw );
string code = raw;
if ( !hasFunctionIdentifier( code ) ) {
- if ( code.find( "\n" ) == string::npos &&
- ! hasJSReturn( code ) &&
- ( code.find( ";" ) == string::npos || code.find( ";" ) == code.size() - 1 ) ){
+ if ( code.find( "\n" ) == string::npos &&
+ ! hasJSReturn( code ) &&
+ ( code.find( ";" ) == string::npos || code.find( ";" ) == code.size() - 1 ) ) {
code = "return " + code;
}
code = "function(){ " + code + "}";
}
-
+
int num = _funcs.size() + 1;
string fn;
@@ -277,30 +279,30 @@ namespace mongo {
ss << "_funcs" << num;
fn = ss.str();
}
-
+
code = fn + " = " + code;
TryCatch try_catch;
// this might be time consuming, consider allowing an interrupt
- Handle<Script> script = v8::Script::Compile( v8::String::New( code.c_str() ) ,
- v8::String::New( fn.c_str() ) );
- if ( script.IsEmpty() ){
+ Handle<Script> script = v8::Script::Compile( v8::String::New( code.c_str() ) ,
+ v8::String::New( fn.c_str() ) );
+ if ( script.IsEmpty() ) {
_error = (string)"compile error: " + toSTLString( &try_catch );
log() << _error << endl;
return Local< v8::Function >();
}
-
+
Local<Value> result = script->Run();
- if ( result.IsEmpty() ){
+ if ( result.IsEmpty() ) {
_error = (string)"compile error: " + toSTLString( &try_catch );
log() << _error << endl;
return Local< v8::Function >();
- }
-
+ }
+
return v8::Function::Cast( *_global->Get( v8::String::New( fn.c_str() ) ) );
}
-
- ScriptingFunction V8Scope::_createFunction( const char * raw ){
+
+ ScriptingFunction V8Scope::_createFunction( const char * raw ) {
V8_SIMPLE_HEADER
Local< Value > ret = __createFunction( raw );
if ( ret.IsEmpty() )
@@ -312,9 +314,9 @@ namespace mongo {
return num;
}
- void V8Scope::setThis( const BSONObj * obj ){
+ void V8Scope::setThis( const BSONObj * obj ) {
V8_SIMPLE_HEADER
- if ( ! obj ){
+ if ( ! obj ) {
_this = Persistent< v8::Object >::New( v8::Object::New() );
return;
}
@@ -325,30 +327,31 @@ namespace mongo {
_this = Persistent< v8::Object >::New( _wrapper->NewInstance( 1, argv ) );
}
- void V8Scope::rename( const char * from , const char * to ){
+ void V8Scope::rename( const char * from , const char * to ) {
V8_SIMPLE_HEADER;
v8::Local<v8::String> f = v8::String::New( from );
v8::Local<v8::String> t = v8::String::New( to );
_global->Set( t , _global->Get( f ) );
_global->Set( f , v8::Undefined() );
}
-
- int V8Scope::invoke( ScriptingFunction func , const BSONObj& argsObject, int timeoutMs , bool ignoreReturn ){
+
+ int V8Scope::invoke( ScriptingFunction func , const BSONObj& argsObject, int timeoutMs , bool ignoreReturn ) {
V8_SIMPLE_HEADER
Handle<Value> funcValue = _funcs[func-1];
-
- TryCatch try_catch;
+
+ TryCatch try_catch;
int nargs = argsObject.nFields();
scoped_array< Handle<Value> > args;
- if ( nargs ){
+ if ( nargs ) {
args.reset( new Handle<Value>[nargs] );
BSONObjIterator it( argsObject );
- for ( int i=0; i<nargs; i++ ){
+ for ( int i=0; i<nargs; i++ ) {
BSONElement next = it.next();
args[i] = mongoToV8Element( next );
}
setObject( "args", argsObject, true ); // for backwards compatibility
- } else {
+ }
+ else {
_global->Set( v8::String::New( "args" ), v8::Undefined() );
}
if ( globalScriptEngine->interrupted() ) {
@@ -361,12 +364,13 @@ namespace mongo {
enableV8Interrupt(); // because of v8 locker we can check interrupted, then enable
Local<Value> result = ((v8::Function*)(*funcValue))->Call( _this , nargs , args.get() );
disableV8Interrupt();
-
- if ( result.IsEmpty() ){
+
+ if ( result.IsEmpty() ) {
stringstream ss;
if ( try_catch.HasCaught() && !try_catch.CanContinue() ) {
ss << "error in invoke: " << globalScriptEngine->checkInterrupt();
- } else {
+ }
+ else {
ss << "error in invoke: " << toSTLString( &try_catch );
}
_error = ss.str();
@@ -374,28 +378,28 @@ namespace mongo {
return 1;
}
- if ( ! ignoreReturn ){
+ if ( ! ignoreReturn ) {
_global->Set( v8::String::New( "return" ) , result );
}
return 0;
}
- bool V8Scope::exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs ){
- if ( timeoutMs ){
+ bool V8Scope::exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs ) {
+ if ( timeoutMs ) {
static bool t = 1;
- if ( t ){
+ if ( t ) {
log() << "timeoutMs not support for v8 yet code: " << code << endl;
t = 0;
}
}
-
+
V8_SIMPLE_HEADER
-
+
TryCatch try_catch;
-
- Handle<Script> script = v8::Script::Compile( v8::String::New( code.data() ) ,
- v8::String::New( name.c_str() ) );
+
+ Handle<Script> script = v8::Script::Compile( v8::String::New( code.data() ) ,
+ v8::String::New( name.c_str() ) );
if (script.IsEmpty()) {
stringstream ss;
ss << "compile error: " << toSTLString( &try_catch );
@@ -405,8 +409,8 @@ namespace mongo {
if ( assertOnError )
uassert( 10233 , _error , 0 );
return false;
- }
-
+ }
+
if ( globalScriptEngine->interrupted() ) {
_error = (string)"exec error: " + globalScriptEngine->checkInterrupt();
if ( reportError ) {
@@ -420,10 +424,11 @@ namespace mongo {
enableV8Interrupt(); // because of v8 locker we can check interrupted, then enable
Handle<v8::Value> result = script->Run();
disableV8Interrupt();
- if ( result.IsEmpty() ){
+ if ( result.IsEmpty() ) {
if ( try_catch.HasCaught() && !try_catch.CanContinue() ) {
_error = (string)"exec error: " + globalScriptEngine->checkInterrupt();
- } else {
+ }
+ else {
_error = (string)"exec error: " + toSTLString( &try_catch );
}
if ( reportError )
@@ -431,25 +436,25 @@ namespace mongo {
if ( assertOnError )
uassert( 10234 , _error , 0 );
return false;
- }
-
+ }
+
_global->Set( v8::String::New( "__lastres__" ) , result );
- if ( printResult && ! result->IsUndefined() ){
+ if ( printResult && ! result->IsUndefined() ) {
cout << toSTLString( result ) << endl;
}
-
+
return true;
}
-
- void V8Scope::injectNative( const char *field, NativeFunction func ){
+
+ void V8Scope::injectNative( const char *field, NativeFunction func ) {
V8_SIMPLE_HEADER
-
+
Handle< FunctionTemplate > f( newV8Function< nativeCallback >() );
f->Set( v8::String::New( "_native_function" ), External::New( (void*)func ) );
_global->Set( v8::String::New( field ), f->GetFunction() );
- }
-
+ }
+
void V8Scope::gc() {
cout << "in gc" << endl;
V8Lock l;
@@ -458,13 +463,13 @@ namespace mongo {
// ----- db access -----
- void V8Scope::localConnect( const char * dbName ){
+ void V8Scope::localConnect( const char * dbName ) {
{
V8_SIMPLE_HEADER
if ( _connectState == EXTERNAL )
throw UserException( 12510, "externalSetup already called, can't call externalSetup" );
- if ( _connectState == LOCAL ){
+ if ( _connectState == LOCAL ) {
if ( _localDBName == dbName )
return;
throw UserException( 12511, "localConnect called with a different name previously" );
@@ -483,8 +488,8 @@ namespace mongo {
}
loadStored();
}
-
- void V8Scope::externalSetup(){
+
+ void V8Scope::externalSetup() {
V8_SIMPLE_HEADER
if ( _connectState == EXTERNAL )
return;
@@ -499,12 +504,12 @@ namespace mongo {
// ----- internal -----
- void V8Scope::reset(){
+ void V8Scope::reset() {
_startCall();
}
- void V8Scope::_startCall(){
+ void V8Scope::_startCall() {
_error = "";
}
-
+
} // namespace mongo
diff --git a/scripting/engine_v8.h b/scripting/engine_v8.h
index 1034e7f3da1..c770955d74f 100644
--- a/scripting/engine_v8.h
+++ b/scripting/engine_v8.h
@@ -27,19 +27,19 @@ using namespace v8;
namespace mongo {
class V8ScriptEngine;
-
+
class V8Scope : public Scope {
public:
-
+
V8Scope( V8ScriptEngine * engine );
~V8Scope();
-
+
virtual void reset();
virtual void init( const BSONObj * data );
virtual void localConnect( const char * dbName );
virtual void externalSetup();
-
+
v8::Handle<v8::Value> get( const char * field ); // caller must create context and handle scopes
virtual double getNumber( const char *field );
virtual int getNumberInt( const char *field );
@@ -47,7 +47,7 @@ namespace mongo {
virtual string getString( const char *field );
virtual bool getBoolean( const char *field );
virtual BSONObj getObject( const char *field );
-
+
virtual int type( const char *field );
virtual void setNumber( const char *field , double val );
@@ -58,22 +58,22 @@ namespace mongo {
virtual void setThis( const BSONObj * obj );
virtual void rename( const char * from , const char * to );
-
+
virtual ScriptingFunction _createFunction( const char * code );
Local< v8::Function > __createFunction( const char * code );
virtual int invoke( ScriptingFunction func , const BSONObj& args, int timeoutMs = 0 , bool ignoreReturn = false );
virtual bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs );
- virtual string getError(){ return _error; }
-
+ virtual string getError() { return _error; }
+
virtual void injectNative( const char *field, NativeFunction func );
void gc();
Handle< Context > context() const { return _context; }
-
+
private:
void _startCall();
-
+
static Handle< Value > nativeCallback( const Arguments &args );
static Handle< Value > loadCallback( const Arguments &args );
@@ -92,32 +92,32 @@ namespace mongo {
enum ConnectState { NOT , LOCAL , EXTERNAL };
ConnectState _connectState;
};
-
+
class V8ScriptEngine : public ScriptEngine {
public:
V8ScriptEngine();
virtual ~V8ScriptEngine();
-
- virtual Scope * createScope(){ return new V8Scope( this ); }
-
- virtual void runTest(){}
+
+ virtual Scope * createScope() { return new V8Scope( this ); }
+
+ virtual void runTest() {}
bool utf8Ok() const { return true; }
class V8UnlockForClient : public Unlocker {
V8Unlock u_;
};
-
+
virtual auto_ptr<Unlocker> newThreadUnlocker() { return auto_ptr< Unlocker >( new V8UnlockForClient ); }
-
+
virtual void interrupt( unsigned opSpec );
virtual void interruptAll();
private:
friend class V8Scope;
};
-
-
+
+
extern ScriptEngine * globalScriptEngine;
extern map< unsigned, int > __interruptSpecToThreadId;
}
diff --git a/scripting/sm_db.cpp b/scripting/sm_db.cpp
index f9243377db2..0b144283f49 100644
--- a/scripting/sm_db.cpp
+++ b/scripting/sm_db.cpp
@@ -34,15 +34,15 @@ namespace mongo {
bool haveLocalShardingInfo( const string& ns );
// ------------ some defs needed ---------------
-
+
JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName );
-
+
// ------------ utils ------------------
-
- bool isSpecialName( const string& name ){
+
+ bool isSpecialName( const string& name ) {
static set<string> names;
- if ( names.size() == 0 ){
+ if ( names.size() == 0 ) {
names.insert( "tojson" );
names.insert( "toJson" );
names.insert( "toString" );
@@ -50,10 +50,10 @@ namespace mongo {
if ( name.length() == 0 )
return false;
-
+
if ( name[0] == '_' )
return true;
-
+
return names.count( name ) > 0;
}
@@ -63,8 +63,8 @@ namespace mongo {
class CursorHolder {
public:
CursorHolder( auto_ptr< DBClientCursor > &cursor, const shared_ptr< DBClientWithCommands > &connection ) :
- connection_( connection ),
- cursor_( cursor ) {
+ connection_( connection ),
+ cursor_( cursor ) {
assert( cursor_.get() );
}
DBClientCursor *get() const { return cursor_.get(); }
@@ -72,60 +72,60 @@ namespace mongo {
shared_ptr< DBClientWithCommands > connection_;
auto_ptr< DBClientCursor > cursor_;
};
-
+
DBClientCursor *getCursor( JSContext *cx, JSObject *obj ) {
CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
uassert( 10235 , "no cursor!" , holder );
return holder->get();
}
-
- JSBool internal_cursor_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+
+ JSBool internal_cursor_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
uassert( 10236 , "no args to internal_cursor_constructor" , argc == 0 );
assert( JS_SetPrivate( cx , obj , 0 ) ); // just for safety
return JS_TRUE;
}
- void internal_cursor_finalize( JSContext * cx , JSObject * obj ){
+ void internal_cursor_finalize( JSContext * cx , JSObject * obj ) {
CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
- if ( holder ){
+ if ( holder ) {
delete holder;
assert( JS_SetPrivate( cx , obj , 0 ) );
}
}
- JSBool internal_cursor_hasNext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool internal_cursor_hasNext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
DBClientCursor *cursor = getCursor( cx, obj );
try {
*rval = cursor->more() ? JSVAL_TRUE : JSVAL_FALSE;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
JS_ReportError( cx , e.what() );
return JS_FALSE;
}
return JS_TRUE;
}
- JSBool internal_cursor_objsLeftInBatch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool internal_cursor_objsLeftInBatch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
DBClientCursor *cursor = getCursor( cx, obj );
Convertor c(cx);
*rval = c.toval((double) cursor->objsLeftInBatch() );
return JS_TRUE;
}
- JSBool internal_cursor_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool internal_cursor_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
DBClientCursor *cursor = getCursor( cx, obj );
- BSONObj n;
-
+ BSONObj n;
+
try {
- if ( ! cursor->more() ){
+ if ( ! cursor->more() ) {
JS_ReportError( cx , "cursor at the end" );
return JS_FALSE;
}
n = cursor->next();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
JS_ReportError( cx , e.what() );
return JS_FALSE;
}
@@ -149,15 +149,15 @@ namespace mongo {
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
+
// ------ mongo stuff ------
- JSBool mongo_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool mongo_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
uassert( 10237 , "mongo_constructor not implemented yet" , 0 );
throw -1;
}
-
- JSBool mongo_local_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+
+ JSBool mongo_local_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
shared_ptr< DBClientWithCommands > client( createDirectClient() );
@@ -169,29 +169,29 @@ namespace mongo {
return JS_TRUE;
}
- JSBool mongo_external_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool mongo_external_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
-
+
smuassert( cx , "0 or 1 args to Mongo" , argc <= 1 );
-
+
string host = "127.0.0.1";
if ( argc > 0 )
host = c.toString( argv[0] );
-
+
string errmsg;
ConnectionString cs = ConnectionString::parse( host , errmsg );
- if ( ! cs.isValid() ){
+ if ( ! cs.isValid() ) {
JS_ReportError( cx , errmsg.c_str() );
return JS_FALSE;
}
shared_ptr< DBClientWithCommands > conn( cs.connect( errmsg ) );
- if ( ! conn ){
+ if ( ! conn ) {
JS_ReportError( cx , errmsg.c_str() );
return JS_FALSE;
}
-
+
ScriptEngine::runConnectCallback( *conn );
assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
@@ -206,10 +206,10 @@ namespace mongo {
uassert( 10239 , "no connection!" , connHolder && connHolder->get() );
return connHolder->get();
}
-
- void mongo_finalize( JSContext * cx , JSObject * obj ){
+
+ void mongo_finalize( JSContext * cx , JSObject * obj ) {
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
- if ( connHolder ){
+ if ( connHolder ) {
delete connHolder;
assert( JS_SetPrivate( cx , obj , 0 ) );
}
@@ -220,21 +220,21 @@ namespace mongo {
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, mongo_finalize,
JSCLASS_NO_OPTIONAL_MEMBERS
- };
+ };
- JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
smuassert( cx , "mongo_find needs 6 args" , argc == 6 );
shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
smuassert( cx , "no connection!" , connHolder && connHolder->get() );
DBClientWithCommands *conn = connHolder->get();
-
+
Convertor c( cx );
string ns = c.toString( argv[0] );
-
+
BSONObj q = c.toObject( argv[1] );
BSONObj f = c.toObject( argv[2] );
-
+
int nToReturn = (int) c.toNumber( argv[3] );
int nToSkip = (int) c.toNumber( argv[4] );
bool slaveOk = c.getBoolean( obj , "slaveOk" );
@@ -243,7 +243,7 @@ namespace mongo {
try {
auto_ptr<DBClientCursor> cursor = conn->query( ns , q , nToReturn , nToSkip , f.nFields() ? &f : 0 , slaveOk ? QueryOption_SlaveOk : 0 , batchSize );
- if ( ! cursor.get() ){
+ if ( ! cursor.get() ) {
log() << "query failed : " << ns << " " << q << " to: " << conn->toString() << endl;
JS_ReportError( cx , "error doing query: failed" );
return JS_FALSE;
@@ -254,19 +254,19 @@ namespace mongo {
*rval = OBJECT_TO_JSVAL( mycursor );
return JS_TRUE;
}
- catch ( ... ){
+ catch ( ... ) {
JS_ReportError( cx , "error doing query: unknown" );
return JS_FALSE;
}
}
- JSBool mongo_update(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool mongo_update(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
smuassert( cx , "mongo_find needs at elast 3 args" , argc >= 3 );
smuassert( cx , "2nd param to update has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
smuassert( cx , "3rd param to update has to be an object" , JSVAL_IS_OBJECT( argv[2] ) );
Convertor c( cx );
- if ( c.getBoolean( obj , "readOnly" ) ){
+ if ( c.getBoolean( obj , "readOnly" ) ) {
JS_ReportError( cx , "js db in read only mode - mongo_update" );
return JS_FALSE;
}
@@ -283,75 +283,75 @@ namespace mongo {
conn->update( ns , c.toObject( argv[1] ) , c.toObject( argv[2] ) , upsert , multi );
return JS_TRUE;
}
- catch ( ... ){
+ catch ( ... ) {
JS_ReportError( cx , "error doing update" );
return JS_FALSE;
}
}
- JSBool mongo_insert(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool mongo_insert(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
smuassert( cx , "mongo_insert needs 2 args" , argc == 2 );
smuassert( cx , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
Convertor c( cx );
- if ( c.getBoolean( obj , "readOnly" ) ){
+ if ( c.getBoolean( obj , "readOnly" ) ) {
JS_ReportError( cx , "js db in read only mode - mongo_insert" );
return JS_FALSE;
}
-
+
DBClientWithCommands * conn = getConnection( cx, obj );
uassert( 10248 , "no connection!" , conn );
-
+
string ns = c.toString( argv[0] );
BSONObj o = c.toObject( argv[1] );
// TODO: add _id
-
+
try {
conn->insert( ns , o );
return JS_TRUE;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
stringstream ss;
ss << "error doing insert:" << e.what();
string s = ss.str();
JS_ReportError( cx , s.c_str() );
return JS_FALSE;
}
- catch ( ... ){
+ catch ( ... ) {
JS_ReportError( cx , "error doing insert" );
return JS_FALSE;
}
}
- JSBool mongo_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool mongo_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
smuassert( cx , "mongo_remove needs 2 or 3 arguments" , argc == 2 || argc == 3 );
smuassert( cx , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
Convertor c( cx );
- if ( c.getBoolean( obj , "readOnly" ) ){
+ if ( c.getBoolean( obj , "readOnly" ) ) {
JS_ReportError( cx , "js db in read only mode - mongo_remove" );
return JS_FALSE;
}
DBClientWithCommands * conn = getConnection( cx, obj );
uassert( 10251 , "no connection!" , conn );
-
+
string ns = c.toString( argv[0] );
BSONObj o = c.toObject( argv[1] );
bool justOne = false;
if ( argc > 2 )
justOne = c.toBoolean( argv[2] );
-
+
try {
conn->remove( ns , o , justOne );
return JS_TRUE;
}
- catch ( ... ){
+ catch ( ... ) {
JS_ReportError( cx , "error doing remove" );
return JS_FALSE;
}
-
+
}
JSFunctionSpec mongo_functions[] = {
@@ -362,93 +362,93 @@ namespace mongo {
{ 0 }
};
- // ------------- db_collection -------------
-
- JSBool db_collection_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- smuassert( cx , "db_collection_constructor wrong args" , argc == 4 );
- assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
- assert( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
- assert( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
- assert( JS_SetProperty( cx , obj , "_fullName" , &(argv[3]) ) );
-
- Convertor c(cx);
- if ( haveLocalShardingInfo( c.toString( argv[3] ) ) ){
- JS_ReportError( cx , "can't use sharded collection from db.eval" );
- return JS_FALSE;
- }
-
- return JS_TRUE;
- }
-
- JSBool db_collection_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ){
- if ( flags & JSRESOLVE_ASSIGNING )
- return JS_TRUE;
-
- Convertor c( cx );
- string collname = c.toString( id );
-
- if ( isSpecialName( collname ) )
- return JS_TRUE;
-
- if ( obj == c.getGlobalPrototype( "DBCollection" ) )
- return JS_TRUE;
-
- JSObject * proto = JS_GetPrototype( cx , obj );
- if ( c.hasProperty( obj , collname.c_str() ) || ( proto && c.hasProperty( proto , collname.c_str() ) ) )
- return JS_TRUE;
-
- string name = c.toString( c.getProperty( obj , "_shortName" ) );
- name += ".";
- name += collname;
-
- jsval db = c.getProperty( obj , "_db" );
- if ( ! JSVAL_IS_OBJECT( db ) )
- return JS_TRUE;
-
- JSObject * coll = doCreateCollection( cx , JSVAL_TO_OBJECT( db ) , name );
- if ( ! coll )
- return JS_FALSE;
- c.setProperty( obj , collname.c_str() , OBJECT_TO_JSVAL( coll ) );
- *objp = obj;
- return JS_TRUE;
- }
+ // ------------- db_collection -------------
+
+ JSBool db_collection_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "db_collection_constructor wrong args" , argc == 4 );
+ assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
+ assert( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
+ assert( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
+ assert( JS_SetProperty( cx , obj , "_fullName" , &(argv[3]) ) );
+
+ Convertor c(cx);
+ if ( haveLocalShardingInfo( c.toString( argv[3] ) ) ) {
+ JS_ReportError( cx , "can't use sharded collection from db.eval" );
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool db_collection_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
+ if ( flags & JSRESOLVE_ASSIGNING )
+ return JS_TRUE;
+
+ Convertor c( cx );
+ string collname = c.toString( id );
+
+ if ( isSpecialName( collname ) )
+ return JS_TRUE;
+
+ if ( obj == c.getGlobalPrototype( "DBCollection" ) )
+ return JS_TRUE;
+
+ JSObject * proto = JS_GetPrototype( cx , obj );
+ if ( c.hasProperty( obj , collname.c_str() ) || ( proto && c.hasProperty( proto , collname.c_str() ) ) )
+ return JS_TRUE;
+
+ string name = c.toString( c.getProperty( obj , "_shortName" ) );
+ name += ".";
+ name += collname;
+
+ jsval db = c.getProperty( obj , "_db" );
+ if ( ! JSVAL_IS_OBJECT( db ) )
+ return JS_TRUE;
+
+ JSObject * coll = doCreateCollection( cx , JSVAL_TO_OBJECT( db ) , name );
+ if ( ! coll )
+ return JS_FALSE;
+ c.setProperty( obj , collname.c_str() , OBJECT_TO_JSVAL( coll ) );
+ *objp = obj;
+ return JS_TRUE;
+ }
JSClass db_collection_class = {
- "DBCollection" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
- JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
- JS_EnumerateStub, (JSResolveOp)(&db_collection_resolve) , JS_ConvertStub, JS_FinalizeStub,
- JSCLASS_NO_OPTIONAL_MEMBERS
- };
-
-
- JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName ){
- Convertor c(cx);
-
- assert( c.hasProperty( db , "_mongo" ) );
- assert( c.hasProperty( db , "_name" ) );
-
- JSObject * coll = JS_NewObject( cx , &db_collection_class , 0 , 0 );
- CHECKNEWOBJECT( coll, cx, "doCreateCollection" );
- c.setProperty( coll , "_mongo" , c.getProperty( db , "_mongo" ) );
- c.setProperty( coll , "_db" , OBJECT_TO_JSVAL( db ) );
- c.setProperty( coll , "_shortName" , c.toval( shortName.c_str() ) );
-
- string name = c.toString( c.getProperty( db , "_name" ) );
- name += "." + shortName;
- c.setProperty( coll , "_fullName" , c.toval( name.c_str() ) );
-
- if ( haveLocalShardingInfo( name ) ){
- JS_ReportError( cx , "can't use sharded collection from db.eval" );
- return 0;
- }
-
- return coll;
+ "DBCollection" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, (JSResolveOp)(&db_collection_resolve) , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+
+ JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName ) {
+ Convertor c(cx);
+
+ assert( c.hasProperty( db , "_mongo" ) );
+ assert( c.hasProperty( db , "_name" ) );
+
+ JSObject * coll = JS_NewObject( cx , &db_collection_class , 0 , 0 );
+ CHECKNEWOBJECT( coll, cx, "doCreateCollection" );
+ c.setProperty( coll , "_mongo" , c.getProperty( db , "_mongo" ) );
+ c.setProperty( coll , "_db" , OBJECT_TO_JSVAL( db ) );
+ c.setProperty( coll , "_shortName" , c.toval( shortName.c_str() ) );
+
+ string name = c.toString( c.getProperty( db , "_name" ) );
+ name += "." + shortName;
+ c.setProperty( coll , "_fullName" , c.toval( name.c_str() ) );
+
+ if ( haveLocalShardingInfo( name ) ) {
+ JS_ReportError( cx , "can't use sharded collection from db.eval" );
+ return 0;
+ }
+
+ return coll;
}
-
+
// -------------- DB ---------------
-
-
- JSBool db_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+
+
+ JSBool db_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
smuassert( cx, "wrong number of arguments to DB" , argc == 2 );
assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
assert( JS_SetProperty( cx , obj , "_name" , &(argv[1]) ) );
@@ -456,7 +456,7 @@ namespace mongo {
return JS_TRUE;
}
- JSBool db_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ){
+ JSBool db_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
if ( flags & JSRESOLVE_ASSIGNING )
return JS_TRUE;
@@ -466,9 +466,9 @@ namespace mongo {
return JS_TRUE;
string collname = c.toString( id );
-
+
if ( isSpecialName( collname ) )
- return JS_TRUE;
+ return JS_TRUE;
JSObject * proto = JS_GetPrototype( cx , obj );
if ( proto && c.hasProperty( proto , collname.c_str() ) )
@@ -478,26 +478,26 @@ namespace mongo {
if ( ! coll )
return JS_FALSE;
c.setProperty( obj , collname.c_str() , OBJECT_TO_JSVAL( coll ) );
-
+
*objp = obj;
return JS_TRUE;
}
JSClass db_class = {
- "DB" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
+ "DB" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
JS_EnumerateStub, (JSResolveOp)(&db_resolve) , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
+
// -------------- object id -------------
- JSBool object_id_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool object_id_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
OID oid;
- if ( argc == 0 ){
+ if ( argc == 0 ) {
oid.init();
}
else {
@@ -506,26 +506,27 @@ namespace mongo {
try {
Scope::validateObjectIdString( s );
- } catch ( const MsgAssertionException &m ) {
+ }
+ catch ( const MsgAssertionException &m ) {
static string error = m.toString();
JS_ReportError( cx, error.c_str() );
return JS_FALSE;
}
oid.init( s );
}
-
- if ( ! JS_InstanceOf( cx , obj , &object_id_class , 0 ) ){
+
+ if ( ! JS_InstanceOf( cx , obj , &object_id_class , 0 ) ) {
obj = JS_NewObject( cx , &object_id_class , 0 , 0 );
CHECKNEWOBJECT( obj, cx, "object_id_constructor" );
*rval = OBJECT_TO_JSVAL( obj );
}
-
+
jsval v = c.toval( oid.str().c_str() );
assert( JS_SetProperty( cx , obj , "str" , &v ) );
return JS_TRUE;
}
-
+
JSClass object_id_class = {
"ObjectId" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -533,7 +534,7 @@ namespace mongo {
JSCLASS_NO_OPTIONAL_MEMBERS
};
- JSBool object_id_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool object_id_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
return (JSBool) (*rval = c.getProperty( obj , "str" ));
}
@@ -545,26 +546,26 @@ namespace mongo {
// dbpointer
- JSBool dbpointer_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool dbpointer_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
-
- if ( argc == 2 ){
- if ( ! JSVAL_IS_OID( argv[1] ) ){
+ if ( argc == 2 ) {
+
+ if ( ! JSVAL_IS_OID( argv[1] ) ) {
JS_ReportError( cx , "2nd arg to DBPointer needs to be oid" );
- return JS_FALSE;
+ return JS_FALSE;
}
-
+
assert( JS_SetProperty( cx , obj , "ns" , &(argv[0]) ) );
assert( JS_SetProperty( cx , obj , "id" , &(argv[1]) ) );
return JS_TRUE;
}
else {
JS_ReportError( cx , "DBPointer needs 2 arguments" );
- return JS_FALSE;
+ return JS_FALSE;
}
}
-
+
JSClass dbpointer_class = {
"DBPointer" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -577,10 +578,10 @@ namespace mongo {
};
- JSBool dbref_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool dbref_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
- if ( argc == 2 ){
+ if ( argc == 2 ) {
JSObject * o = JS_NewObject( cx , NULL , NULL, NULL );
CHECKNEWOBJECT( o, cx, "dbref_constructor" );
assert( JS_SetProperty( cx, o , "$ref" , &argv[ 0 ] ) );
@@ -592,37 +593,37 @@ namespace mongo {
else {
JS_ReportError( cx , "DBRef needs 2 arguments" );
assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( BSONObj().getOwned() ) ) ) );
- return JS_FALSE;
+ return JS_FALSE;
}
}
-
+
JSClass dbref_class = bson_class; // name will be fixed later
// UUID **************************
- JSBool uuid_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool uuid_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
-
- if( argc == 0 ) {
+
+ if( argc == 0 ) {
#if defined(HAVE_UUID)
//uuids::uuid
#else
#endif
JS_ReportError( cx , "UUID needs 1 argument -- UUID(hexstr)" );
- return JS_FALSE;
+ return JS_FALSE;
}
else if ( argc == 1 ) {
string encoded = c.toString( argv[ 0 ] );
- if( encoded.size() != 32 ) {
- JS_ReportError( cx, "expect 32 char hex string to UUID()" );
- return JS_FALSE;
- }
+ if( encoded.size() != 32 ) {
+ JS_ReportError( cx, "expect 32 char hex string to UUID()" );
+ return JS_FALSE;
+ }
- char buf[16];
- for( int i = 0; i < 16; i++ ) {
- buf[i] = fromHex(encoded.c_str() + i * 2);
- }
+ char buf[16];
+ for( int i = 0; i < 16; i++ ) {
+ buf[i] = fromHex(encoded.c_str() + i * 2);
+ }
assert( JS_SetPrivate( cx, obj, new BinDataHolder( buf, 16 ) ) );
c.setProperty( obj, "len", c.toval( (double)16 ) );
@@ -632,11 +633,11 @@ namespace mongo {
}
else {
JS_ReportError( cx , "UUID needs 1 argument -- UUID(hexstr)" );
- return JS_FALSE;
+ return JS_FALSE;
}
}
-
- JSBool uuid_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+
+ JSBool uuid_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
void *holder = JS_GetPrivate( cx, obj );
assert( holder );
@@ -648,15 +649,15 @@ namespace mongo {
return *rval = c.toval( ret.c_str() );
}
- void uuid_finalize( JSContext * cx , JSObject * obj ){
+ void uuid_finalize( JSContext * cx , JSObject * obj ) {
Convertor c(cx);
void *holder = JS_GetPrivate( cx, obj );
- if ( holder ){
+ if ( holder ) {
delete ( BinDataHolder* )holder;
assert( JS_SetPrivate( cx , obj , 0 ) );
}
- }
-
+ }
+
JSClass uuid_class = {
"UUID" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -668,25 +669,25 @@ namespace mongo {
{ "toString" , uuid_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ 0 }
};
-
+
// BinData **************************
- JSBool bindata_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool bindata_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
Convertor c( cx );
-
- if ( argc == 2 ){
+
+ if ( argc == 2 ) {
int type = (int)c.toNumber( argv[ 0 ] );
- if( type < 0 || type > 255 ) {
+ if( type < 0 || type > 255 ) {
JS_ReportError( cx , "invalid BinData subtype -- range is 0..255 see bsonspec.org" );
- return JS_FALSE;
+ return JS_FALSE;
}
string encoded = c.toString( argv[ 1 ] );
string decoded;
try {
decoded = base64::decode( encoded );
}
- catch(...) {
+ catch(...) {
JS_ReportError(cx, "BinData could not decode base64 parameter");
return JS_FALSE;
}
@@ -699,11 +700,11 @@ namespace mongo {
}
else {
JS_ReportError( cx , "BinData needs 2 arguments -- BinData(subtype,data)" );
- return JS_FALSE;
+ return JS_FALSE;
}
}
-
- JSBool bindata_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+
+ JSBool bindata_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
int type = (int)c.getNumber( obj , "type" );
int len = (int)c.getNumber( obj, "len" );
@@ -718,7 +719,7 @@ namespace mongo {
return *rval = c.toval( ret.c_str() );
}
- JSBool bindataBase64(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool bindataBase64(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
int len = (int)c.getNumber( obj, "len" );
void *holder = JS_GetPrivate( cx, obj );
@@ -730,7 +731,7 @@ namespace mongo {
return *rval = c.toval( ret.c_str() );
}
- JSBool bindataAsHex(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool bindataAsHex(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
int len = (int)c.getNumber( obj, "len" );
void *holder = JS_GetPrivate( cx, obj );
@@ -746,27 +747,27 @@ namespace mongo {
return *rval = c.toval( ret.c_str() );
}
- JSBool bindataLength(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool bindataLength(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
int len = (int)c.getNumber( obj, "len" );
return *rval = c.toval((double) len);
}
- JSBool bindataSubtype(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool bindataSubtype(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
int t = (int)c.getNumber( obj, "type" );
return *rval = c.toval((double) t);
}
- void bindata_finalize( JSContext * cx , JSObject * obj ){
+ void bindata_finalize( JSContext * cx , JSObject * obj ) {
Convertor c(cx);
void *holder = JS_GetPrivate( cx, obj );
- if ( holder ){
+ if ( holder ) {
delete ( BinDataHolder* )holder;
assert( JS_SetPrivate( cx , obj , 0 ) );
}
- }
-
+ }
+
JSClass bindata_class = {
"BinData" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -782,15 +783,15 @@ namespace mongo {
{ "subtype", bindataSubtype, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
{ 0 }
};
-
+
// Map
- bool specialMapString( const string& s ){
+ bool specialMapString( const string& s ) {
return s == "put" || s == "get" || s == "_get" || s == "values" || s == "_data" || s == "constructor" ;
}
- JSBool map_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
- if ( argc > 0 ){
+ JSBool map_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ if ( argc > 0 ) {
JS_ReportError( cx , "Map takes no arguments" );
return JS_FALSE;
}
@@ -803,28 +804,28 @@ namespace mongo {
return JS_TRUE;
}
-
- JSBool map_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp ){
+
+ JSBool map_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp ) {
Convertor c(cx);
if ( specialMapString( c.toString( idval ) ) )
return JS_TRUE;
-
+
log() << "illegal prop access: " << c.toString( idval ) << endl;
JS_ReportError( cx , "can't use array access with Map" );
return JS_FALSE;
}
-
+
JSClass map_class = {
"Map" , JSCLASS_HAS_PRIVATE ,
map_prop, JS_PropertyStub, map_prop, map_prop,
JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
+
JSFunctionSpec map_functions[] = {
{ 0 }
};
-
+
// -----
@@ -835,10 +836,10 @@ namespace mongo {
JSCLASS_NO_OPTIONAL_MEMBERS
};
- JSBool timestamp_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool timestamp_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
smuassert( cx , "Timestamp needs 0 or 2 args" , argc == 0 || argc == 2 );
-
- if ( ! JS_InstanceOf( cx , obj , &timestamp_class , 0 ) ){
+
+ if ( ! JS_InstanceOf( cx , obj , &timestamp_class , 0 ) ) {
obj = JS_NewObject( cx , &timestamp_class , 0 , 0 );
CHECKNEWOBJECT( obj, cx, "timestamp_constructor" );
*rval = OBJECT_TO_JSVAL( obj );
@@ -848,27 +849,27 @@ namespace mongo {
if ( argc == 0 ) {
c.setProperty( obj, "t", c.toval( 0.0 ) );
c.setProperty( obj, "i", c.toval( 0.0 ) );
- }
+ }
else {
c.setProperty( obj, "t", argv[ 0 ] );
c.setProperty( obj, "i", argv[ 1 ] );
- }
-
+ }
+
return JS_TRUE;
}
-
+
JSClass numberlong_class = {
"NumberLong" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
- JSBool numberlong_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+
+ JSBool numberlong_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
smuassert( cx , "NumberLong needs 0 or 1 args" , argc == 0 || argc == 1 );
-
- if ( ! JS_InstanceOf( cx , obj , &numberlong_class , 0 ) ){
+
+ if ( ! JS_InstanceOf( cx , obj , &numberlong_class , 0 ) ) {
obj = JS_NewObject( cx , &numberlong_class , 0 , 0 );
CHECKNEWOBJECT( obj, cx, "numberlong_constructor" );
*rval = OBJECT_TO_JSVAL( obj );
@@ -877,9 +878,11 @@ namespace mongo {
Convertor c( cx );
if ( argc == 0 ) {
c.setProperty( obj, "floatApprox", c.toval( 0.0 ) );
- } else if ( JSVAL_IS_NUMBER( argv[ 0 ] ) ) {
+ }
+ else if ( JSVAL_IS_NUMBER( argv[ 0 ] ) ) {
c.setProperty( obj, "floatApprox", argv[ 0 ] );
- } else {
+ }
+ else {
string num = c.toString( argv[ 0 ] );
//PRINT(num);
const char *numStr = num.c_str();
@@ -887,25 +890,26 @@ namespace mongo {
try {
n = parseLL( numStr );
//PRINT(n);
- } catch ( const AssertionException & ) {
- smuassert( cx , "could not convert string to long long" , false );
+ }
+ catch ( const AssertionException & ) {
+ smuassert( cx , "could not convert string to long long" , false );
}
c.makeLongObj( n, obj );
}
-
+
return JS_TRUE;
}
-
- JSBool numberlong_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+
+ JSBool numberlong_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
- return *rval = c.toval( double( c.toNumberLongUnsafe( obj ) ) );
+ return *rval = c.toval( double( c.toNumberLongUnsafe( obj ) ) );
}
-
- JSBool numberlong_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+
+ JSBool numberlong_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
return numberlong_valueof( cx, obj, argc, argv, rval );
}
- JSBool numberlong_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){
+ JSBool numberlong_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
Convertor c(cx);
stringstream ss;
long long val = c.toNumberLongUnsafe( obj );
@@ -921,12 +925,12 @@ namespace mongo {
}
JSFunctionSpec numberlong_functions[] = {
- { "valueOf" , numberlong_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
- { "toNumber" , numberlong_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
- { "toString" , numberlong_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
- { 0 }
- };
-
+ { "valueOf" , numberlong_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toNumber" , numberlong_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toString" , numberlong_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
JSClass minkey_class = {
"MinKey" , JSCLASS_HAS_PRIVATE ,
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
@@ -940,12 +944,12 @@ namespace mongo {
JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
+
// dbquery
- JSBool dbquery_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ){
+ JSBool dbquery_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
smuassert( cx , "DDQuery needs at least 4 args" , argc >= 4 );
-
+
Convertor c(cx);
c.setProperty( obj , "_mongo" , argv[0] );
c.setProperty( obj , "_db" , argv[1] );
@@ -959,28 +963,28 @@ namespace mongo {
CHECKNEWOBJECT( temp, cx, "dbquery_constructor" );
c.setProperty( obj , "_query" , OBJECT_TO_JSVAL( temp ) );
}
-
+
if ( argc > 5 && JSVAL_IS_OBJECT( argv[5] ) )
c.setProperty( obj , "_fields" , argv[5] );
else
c.setProperty( obj , "_fields" , JSVAL_NULL );
-
-
+
+
if ( argc > 6 && JSVAL_IS_NUMBER( argv[6] ) )
c.setProperty( obj , "_limit" , argv[6] );
- else
+ else
c.setProperty( obj , "_limit" , JSVAL_ZERO );
-
+
if ( argc > 7 && JSVAL_IS_NUMBER( argv[7] ) )
c.setProperty( obj , "_skip" , argv[7] );
- else
+ else
c.setProperty( obj , "_skip" , JSVAL_ZERO );
if ( argc > 8 && JSVAL_IS_NUMBER( argv[8] ) )
c.setProperty( obj , "_batchSize" , argv[8] );
- else
+ else
c.setProperty( obj , "_batchSize" , JSVAL_ZERO );
-
+
c.setProperty( obj , "_cursor" , JSVAL_NULL );
c.setProperty( obj , "_numReturned" , JSVAL_ZERO );
c.setProperty( obj , "_special" , JSVAL_FALSE );
@@ -988,7 +992,7 @@ namespace mongo {
return JS_TRUE;
}
- JSBool dbquery_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ){
+ JSBool dbquery_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
if ( flags & JSRESOLVE_ASSIGNING )
return JS_TRUE;
@@ -1009,13 +1013,13 @@ namespace mongo {
JS_EnumerateStub, (JSResolveOp)(&dbquery_resolve) , JS_ConvertStub, JS_FinalizeStub,
JSCLASS_NO_OPTIONAL_MEMBERS
};
-
+
// ---- other stuff ----
-
- void initMongoJS( SMScope * scope , JSContext * cx , JSObject * global , bool local ){
+
+ void initMongoJS( SMScope * scope , JSContext * cx , JSObject * global , bool local ) {
assert( JS_InitClass( cx , global , 0 , &mongo_class , local ? mongo_local_constructor : mongo_external_constructor , 0 , 0 , mongo_functions , 0 , 0 ) );
-
+
assert( JS_InitClass( cx , global , 0 , &object_id_class , object_id_constructor , 0 , 0 , object_id_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &db_class , db_constructor , 2 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &db_collection_class , db_collection_constructor , 4 , 0 , 0 , 0 , 0 ) );
@@ -1031,78 +1035,78 @@ namespace mongo {
assert( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &map_class , map_constructor , 0 , 0 , map_functions , 0 , 0 ) );
-
+
assert( JS_InitClass( cx , global , 0 , &bson_ro_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
assert( JS_InitClass( cx , global , 0 , &bson_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
-
+
static const char *dbrefName = "DBRef";
dbref_class.name = dbrefName;
assert( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 2 , 0 , bson_functions , 0 , 0 ) );
-
+
scope->execCoreFiles();
}
- bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , jsval val , JSObject * o ){
-
- if ( JS_InstanceOf( c->_context , o , &object_id_class , 0 ) ){
+ bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , jsval val , JSObject * o ) {
+
+ if ( JS_InstanceOf( c->_context , o , &object_id_class , 0 ) ) {
OID oid;
oid.init( c->getString( o , "str" ) );
b.append( name , oid );
return true;
}
- if ( JS_InstanceOf( c->_context , o , &minkey_class , 0 ) ){
+ if ( JS_InstanceOf( c->_context , o , &minkey_class , 0 ) ) {
b.appendMinKey( name );
return true;
}
- if ( JS_InstanceOf( c->_context , o , &maxkey_class , 0 ) ){
+ if ( JS_InstanceOf( c->_context , o , &maxkey_class , 0 ) ) {
b.appendMaxKey( name );
return true;
}
-
- if ( JS_InstanceOf( c->_context , o , &timestamp_class , 0 ) ){
+
+ if ( JS_InstanceOf( c->_context , o , &timestamp_class , 0 ) ) {
b.appendTimestamp( name , (unsigned long long)c->getNumber( o , "t" ) , (unsigned int )c->getNumber( o , "i" ) );
return true;
}
- if ( JS_InstanceOf( c->_context , o , &numberlong_class , 0 ) ){
+ if ( JS_InstanceOf( c->_context , o , &numberlong_class , 0 ) ) {
b.append( name , c->toNumberLongUnsafe( o ) );
return true;
}
-
- if ( JS_InstanceOf( c->_context , o , &dbpointer_class , 0 ) ){
+
+ if ( JS_InstanceOf( c->_context , o , &dbpointer_class , 0 ) ) {
b.appendDBRef( name , c->getString( o , "ns" ) , c->toOID( c->getProperty( o , "id" ) ) );
return true;
}
-
- if ( JS_InstanceOf( c->_context , o , &bindata_class , 0 ) ){
+
+ if ( JS_InstanceOf( c->_context , o , &bindata_class , 0 ) ) {
void *holder = JS_GetPrivate( c->_context , o );
const char *data = ( ( BinDataHolder * )( holder ) )->c_;
- b.appendBinData( name ,
- (int)(c->getNumber( o , "len" )) , (BinDataType)((char)(c->getNumber( o , "type" ) ) ) ,
+ b.appendBinData( name ,
+ (int)(c->getNumber( o , "len" )) , (BinDataType)((char)(c->getNumber( o , "type" ) ) ) ,
data
- );
+ );
return true;
}
-
+
#if defined( SM16 ) || defined( MOZJS )
#warning dates do not work in your version of spider monkey
{
jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
- if ( d ){
+ if ( d ) {
b.appendDate( name , Date_t(d) );
return true;
}
}
#elif defined( XULRUNNER )
- if ( JS_InstanceOf( c->_context , o, globalSMEngine->_dateClass , 0 ) ){
+ if ( JS_InstanceOf( c->_context , o, globalSMEngine->_dateClass , 0 ) ) {
jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
b.appendDate( name , Date_t(d) );
return true;
}
#else
- if ( JS_InstanceOf( c->_context , o, &js_DateClass , 0 ) ){
+ if ( JS_InstanceOf( c->_context , o, &js_DateClass , 0 ) ) {
jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
//TODO: make signed
b.appendDate( name , Date_t((unsigned long long)d) );
@@ -1110,35 +1114,35 @@ namespace mongo {
}
#endif
-
+
if ( JS_InstanceOf( c->_context , o , &dbquery_class , 0 ) ||
- JS_InstanceOf( c->_context , o , &mongo_class , 0 ) ||
- JS_InstanceOf( c->_context , o , &db_collection_class , 0 ) ){
+ JS_InstanceOf( c->_context , o , &mongo_class , 0 ) ||
+ JS_InstanceOf( c->_context , o , &db_collection_class , 0 ) ) {
b.append( name , c->toString( val ) );
return true;
}
-#if defined( XULRUNNER )
- if ( JS_InstanceOf( c->_context , o , globalSMEngine->_regexClass , 0 ) ){
+#if defined( XULRUNNER )
+ if ( JS_InstanceOf( c->_context , o , globalSMEngine->_regexClass , 0 ) ) {
c->appendRegex( b , name , c->toString( val ) );
return true;
}
-#elif defined( SM18 )
- if ( JS_InstanceOf( c->_context , o , &js_RegExpClass , 0 ) ){
+#elif defined( SM18 )
+ if ( JS_InstanceOf( c->_context , o , &js_RegExpClass , 0 ) ) {
c->appendRegex( b , name , c->toString( val ) );
return true;
}
#endif
-
+
return false;
}
- bool isDate( JSContext * cx , JSObject * o ){
+ bool isDate( JSContext * cx , JSObject * o ) {
#if defined( SM16 ) || defined( MOZJS ) || defined( XULRUNNER )
return js_DateGetMsecSinceEpoch( cx , o ) != 0;
#else
return JS_InstanceOf( cx , o, &js_DateClass, 0 );
#endif
}
-
+
}
diff --git a/scripting/utils.cpp b/scripting/utils.cpp
index de41d50c6ef..97eea103bc2 100644
--- a/scripting/utils.cpp
+++ b/scripting/utils.cpp
@@ -25,20 +25,20 @@ namespace mongo {
void installBenchmarkSystem( Scope& scope );
- BSONObj jsmd5( const BSONObj &a ){
+ BSONObj jsmd5( const BSONObj &a ) {
uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
const char * s = a.firstElement().valuestrsafe();
-
+
md5digest d;
md5_state_t st;
md5_init(&st);
md5_append( &st , (const md5_byte_t*)s , strlen( s ) );
md5_finish(&st, d);
-
+
return BSON( "" << digestToString( d ) );
}
-
- BSONObj JSVersion( const BSONObj& args ){
+
+ BSONObj JSVersion( const BSONObj& args ) {
cout << "version: " << versionString << endl;
if ( strstr( versionString , "+" ) )
printGitVersion();
@@ -50,7 +50,7 @@ namespace mongo {
// ---- installer --------
// ---------------------------------
- void installGlobalUtils( Scope& scope ){
+ void installGlobalUtils( Scope& scope ) {
scope.injectNative( "hex_md5" , jsmd5 );
scope.injectNative( "version" , JSVersion );
@@ -58,5 +58,5 @@ namespace mongo {
}
}
-
+
diff --git a/scripting/v8_db.cpp b/scripting/v8_db.cpp
index 2e9c70e0cc8..4b802c19526 100644
--- a/scripting/v8_db.cpp
+++ b/scripting/v8_db.cpp
@@ -31,15 +31,16 @@ namespace mongo {
#define DDD(x)
- v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( bool local ){
+ v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( bool local ) {
v8::Local<v8::FunctionTemplate> mongo;
if ( local ) {
mongo = newV8Function< mongoConsLocal >();
- } else {
+ }
+ else {
mongo = newV8Function< mongoConsExternal >();
}
mongo->InstanceTemplate()->SetInternalFieldCount( 1 );
-
+
v8::Local<v8::Template> proto = mongo->PrototypeTemplate();
proto->Set( v8::String::New( "find" ) , newV8Function< mongoFind >() );
@@ -53,7 +54,7 @@ namespace mongo {
ic->PrototypeTemplate()->Set( v8::String::New("hasNext") , newV8Function< internalCursorHasNext >() );
ic->PrototypeTemplate()->Set( v8::String::New("objsLeftInBatch") , newV8Function< internalCursorObjsLeftInBatch >() );
proto->Set( v8::String::New( "internalCursor" ) , ic );
-
+
return mongo;
@@ -62,38 +63,38 @@ namespace mongo {
v8::Handle<v8::FunctionTemplate> getNumberLongFunctionTemplate() {
v8::Local<v8::FunctionTemplate> numberLong = newV8Function< numberLongInit >();
v8::Local<v8::Template> proto = numberLong->PrototypeTemplate();
-
- proto->Set( v8::String::New( "valueOf" ) , newV8Function< numberLongValueOf >() );
- proto->Set( v8::String::New( "toNumber" ) , newV8Function< numberLongToNumber >() );
+
+ proto->Set( v8::String::New( "valueOf" ) , newV8Function< numberLongValueOf >() );
+ proto->Set( v8::String::New( "toNumber" ) , newV8Function< numberLongToNumber >() );
proto->Set( v8::String::New( "toString" ) , newV8Function< numberLongToString >() );
-
+
return numberLong;
}
v8::Handle<v8::FunctionTemplate> getBinDataFunctionTemplate() {
v8::Local<v8::FunctionTemplate> binData = newV8Function< binDataInit >();
v8::Local<v8::Template> proto = binData->PrototypeTemplate();
-
- proto->Set( v8::String::New( "toString" ) , newV8Function< binDataToString >() );
-
+
+ proto->Set( v8::String::New( "toString" ) , newV8Function< binDataToString >() );
+
return binData;
- }
+ }
v8::Handle<v8::FunctionTemplate> getTimestampFunctionTemplate() {
v8::Local<v8::FunctionTemplate> ts = newV8Function< dbTimestampInit >();
v8::Local<v8::Template> proto = ts->PrototypeTemplate();
-
+
ts->InstanceTemplate()->SetInternalFieldCount( 1 );
-
+
return ts;
- }
+ }
+
-
- void installDBTypes( Handle<ObjectTemplate>& global ){
+ void installDBTypes( Handle<ObjectTemplate>& global ) {
v8::Local<v8::FunctionTemplate> db = newV8Function< dbInit >();
db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
global->Set(v8::String::New("DB") , db );
-
+
v8::Local<v8::FunctionTemplate> dbCollection = newV8Function< collectionInit >();
dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
global->Set(v8::String::New("DBCollection") , dbCollection );
@@ -116,11 +117,11 @@ namespace mongo {
global->Set( v8::String::New("Timestamp") , getTimestampFunctionTemplate() );
}
- void installDBTypes( Handle<v8::Object>& global ){
+ void installDBTypes( Handle<v8::Object>& global ) {
v8::Local<v8::FunctionTemplate> db = newV8Function< dbInit >();
db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
global->Set(v8::String::New("DB") , db->GetFunction() );
-
+
v8::Local<v8::FunctionTemplate> dbCollection = newV8Function< collectionInit >();
dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
global->Set(v8::String::New("DBCollection") , dbCollection->GetFunction() );
@@ -133,7 +134,7 @@ namespace mongo {
global->Set( v8::String::New("ObjectId") , newV8Function< objectIdInit >()->GetFunction() );
global->Set( v8::String::New("DBRef") , newV8Function< dbRefInit >()->GetFunction() );
-
+
global->Set( v8::String::New("DBPointer") , newV8Function< dbPointerInit >()->GetFunction() );
global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate()->GetFunction() );
@@ -149,21 +150,21 @@ namespace mongo {
BSONObjIterator i( o );
global->Set( v8::String::New("MaxKey"), mongoToV8Element( i.next() ) );
global->Set( v8::String::New("MinKey"), mongoToV8Element( i.next() ) );
-
+
global->Get( v8::String::New( "Object" ) )->ToObject()->Set( v8::String::New("bsonsize") , newV8Function< bsonsize >()->GetFunction() );
}
- void destroyConnection( Persistent<Value> self, void* parameter){
+ void destroyConnection( Persistent<Value> self, void* parameter) {
delete static_cast<DBClientBase*>(parameter);
self.Dispose();
self.Clear();
}
- Handle<Value> mongoConsExternal(const Arguments& args){
+ Handle<Value> mongoConsExternal(const Arguments& args) {
char host[255];
-
- if ( args.Length() > 0 && args[0]->IsString() ){
+
+ if ( args.Length() > 0 && args[0]->IsString() ) {
assert( args[0]->ToString()->Utf8Length() < 250 );
args[0]->ToString()->WriteAscii( host );
}
@@ -175,8 +176,8 @@ namespace mongo {
ConnectionString cs = ConnectionString::parse( host , errmsg );
if ( ! cs.isValid() )
return v8::ThrowException( v8::String::New( errmsg.c_str() ) );
-
-
+
+
DBClientWithCommands * conn;
{
V8Unlock ul;
@@ -184,7 +185,7 @@ namespace mongo {
}
if ( ! conn )
return v8::ThrowException( v8::String::New( errmsg.c_str() ) );
-
+
Persistent<v8::Object> self = Persistent<v8::Object>::New( args.Holder() );
self.MakeWeak( conn , destroyConnection );
@@ -196,12 +197,12 @@ namespace mongo {
args.This()->SetInternalField( 0 , External::New( conn ) );
args.This()->Set( v8::String::New( "slaveOk" ) , Boolean::New( false ) );
args.This()->Set( v8::String::New( "host" ) , v8::String::New( host ) );
-
+
return v8::Undefined();
}
- Handle<Value> mongoConsLocal(const Arguments& args){
-
+ Handle<Value> mongoConsLocal(const Arguments& args) {
+
if ( args.Length() > 0 )
return v8::ThrowException( v8::String::New( "local Mongo constructor takes no args" ) );
@@ -218,7 +219,7 @@ namespace mongo {
args.This()->SetInternalField( 0 , External::New( conn ) );
args.This()->Set( v8::String::New( "slaveOk" ) , Boolean::New( false ) );
args.This()->Set( v8::String::New( "host" ) , v8::String::New( "EMBEDDED" ) );
-
+
return v8::Undefined();
}
@@ -226,12 +227,12 @@ namespace mongo {
// ---
#ifdef _WIN32
-#define GETNS char * ns = new char[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
+#define GETNS char * ns = new char[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
#else
-#define GETNS char ns[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
+#define GETNS char ns[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
#endif
- DBClientBase * getConnection( const Arguments& args ){
+ DBClientBase * getConnection( const Arguments& args ) {
Local<External> c = External::Cast( *(args.This()->GetInternalField( 0 )) );
DBClientBase * conn = (DBClientBase*)(c->Value());
assert( conn );
@@ -240,7 +241,7 @@ namespace mongo {
// ---- real methods
- void destroyCursor( Persistent<Value> self, void* parameter){
+ void destroyCursor( Persistent<Value> self, void* parameter) {
delete static_cast<mongo::DBClientCursor*>(parameter);
self.Dispose();
self.Clear();
@@ -253,7 +254,7 @@ namespace mongo {
3 - limit
4 - skip
*/
- Handle<Value> mongoFind(const Arguments& args){
+ Handle<Value> mongoFind(const Arguments& args) {
HandleScope handle_scope;
jsassert( args.Length() == 6 , "find needs 6 args" );
@@ -263,17 +264,17 @@ namespace mongo {
BSONObj q = v8ToMongo( args[1]->ToObject() );
DDD( "query:" << q );
-
+
BSONObj fields;
bool haveFields = args[2]->IsObject() && args[2]->ToObject()->GetPropertyNames()->Length() > 0;
if ( haveFields )
fields = v8ToMongo( args[2]->ToObject() );
-
+
Local<v8::Object> mongo = args.This();
Local<v8::Value> slaveOkVal = mongo->Get( v8::String::New( "slaveOk" ) );
jsassert( slaveOkVal->IsBoolean(), "slaveOk member invalid" );
bool slaveOk = slaveOkVal->BooleanValue();
-
+
try {
auto_ptr<mongo::DBClientCursor> cursor;
int nToReturn = (int)(args[3]->ToNumber()->Value());
@@ -285,28 +286,28 @@ namespace mongo {
}
v8::Function * cons = (v8::Function*)( *( mongo->Get( v8::String::New( "internalCursor" ) ) ) );
assert( cons );
-
+
Persistent<v8::Object> c = Persistent<v8::Object>::New( cons->NewInstance() );
c.MakeWeak( cursor.get() , destroyCursor );
-
+
c->SetInternalField( 0 , External::New( cursor.release() ) );
return handle_scope.Close(c);
}
- catch ( ... ){
- return v8::ThrowException( v8::String::New( "socket error on query" ) );
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on query" ) );
}
}
- v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args){
+ v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args) {
jsassert( args.Length() == 2 , "insert needs 2 args" );
jsassert( args[1]->IsObject() , "have to insert an object" );
-
+
DBClientBase * conn = getConnection( args );
GETNS;
-
+
v8::Handle<v8::Object> in = args[1]->ToObject();
-
- if ( ! in->Has( v8::String::New( "_id" ) ) ){
+
+ if ( ! in->Has( v8::String::New( "_id" ) ) ) {
v8::Handle<v8::Value> argv[1];
in->Set( v8::String::New( "_id" ) , getObjectIdCons()->NewInstance( 0 , argv ) );
}
@@ -318,25 +319,25 @@ namespace mongo {
V8Unlock u;
conn->insert( ns , o );
}
- catch ( ... ){
+ catch ( ... ) {
return v8::ThrowException( v8::String::New( "socket error on insert" ) );
}
-
+
return v8::Undefined();
}
- v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args){
+ v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args) {
jsassert( args.Length() == 2 || args.Length() == 3 , "remove needs 2 args" );
jsassert( args[1]->IsObject() , "have to remove an object template" );
DBClientBase * conn = getConnection( args );
GETNS;
-
+
v8::Handle<v8::Object> in = args[1]->ToObject();
BSONObj o = v8ToMongo( in );
-
+
bool justOne = false;
- if ( args.Length() > 2 ){
+ if ( args.Length() > 2 ) {
justOne = args[2]->BooleanValue();
}
@@ -345,34 +346,34 @@ namespace mongo {
V8Unlock u;
conn->remove( ns , o , justOne );
}
- catch ( ... ){
+ catch ( ... ) {
return v8::ThrowException( v8::String::New( "socket error on remove" ) );
}
return v8::Undefined();
}
- v8::Handle<v8::Value> mongoUpdate(const v8::Arguments& args){
+ v8::Handle<v8::Value> mongoUpdate(const v8::Arguments& args) {
jsassert( args.Length() >= 3 , "update needs at least 3 args" );
jsassert( args[1]->IsObject() , "1st param to update has to be an object" );
jsassert( args[2]->IsObject() , "2nd param to update has to be an object" );
DBClientBase * conn = getConnection( args );
GETNS;
-
+
v8::Handle<v8::Object> q = args[1]->ToObject();
v8::Handle<v8::Object> o = args[2]->ToObject();
-
+
bool upsert = args.Length() > 3 && args[3]->IsBoolean() && args[3]->ToBoolean()->Value();
- bool multi = args.Length() > 4 && args[4]->IsBoolean() && args[4]->ToBoolean()->Value();
-
+ bool multi = args.Length() > 4 && args[4]->IsBoolean() && args[4]->ToBoolean()->Value();
+
try {
BSONObj q1 = v8ToMongo( q );
BSONObj o1 = v8ToMongo( o );
V8Unlock u;
conn->update( ns , q1 , o1 , upsert, multi );
}
- catch ( ... ){
+ catch ( ... ) {
return v8::ThrowException( v8::String::New( "socket error on remove" ) );
}
@@ -384,18 +385,18 @@ namespace mongo {
// --- cursor ---
- mongo::DBClientCursor * getCursor( const Arguments& args ){
+ mongo::DBClientCursor * getCursor( const Arguments& args ) {
Local<External> c = External::Cast( *(args.This()->GetInternalField( 0 ) ) );
mongo::DBClientCursor * cursor = (mongo::DBClientCursor*)(c->Value());
return cursor;
}
- v8::Handle<v8::Value> internalCursorCons(const v8::Arguments& args){
+ v8::Handle<v8::Value> internalCursorCons(const v8::Arguments& args) {
return v8::Undefined();
}
- v8::Handle<v8::Value> internalCursorNext(const v8::Arguments& args){
+ v8::Handle<v8::Value> internalCursorNext(const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return v8::Undefined();
@@ -407,7 +408,7 @@ namespace mongo {
return mongoToV8( o );
}
- v8::Handle<v8::Value> internalCursorHasNext(const v8::Arguments& args){
+ v8::Handle<v8::Value> internalCursorHasNext(const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return Boolean::New( false );
@@ -419,7 +420,7 @@ namespace mongo {
return Boolean::New( ret );
}
- v8::Handle<v8::Value> internalCursorObjsLeftInBatch(const v8::Arguments& args){
+ v8::Handle<v8::Value> internalCursorObjsLeftInBatch(const v8::Arguments& args) {
mongo::DBClientCursor * cursor = getCursor( args );
if ( ! cursor )
return v8::Number::New( (double) 0 );
@@ -434,7 +435,7 @@ namespace mongo {
// --- DB ----
- v8::Handle<v8::Value> dbInit(const v8::Arguments& args){
+ v8::Handle<v8::Value> dbInit(const v8::Arguments& args) {
assert( args.Length() == 2 );
args.This()->Set( v8::String::New( "_mongo" ) , args[0] );
@@ -446,26 +447,26 @@ namespace mongo {
return v8::Undefined();
}
- v8::Handle<v8::Value> collectionInit( const v8::Arguments& args ){
+ v8::Handle<v8::Value> collectionInit( const v8::Arguments& args ) {
assert( args.Length() == 4 );
args.This()->Set( v8::String::New( "_mongo" ) , args[0] );
args.This()->Set( v8::String::New( "_db" ) , args[1] );
args.This()->Set( v8::String::New( "_shortName" ) , args[2] );
args.This()->Set( v8::String::New( "_fullName" ) , args[3] );
-
+
for ( int i=0; i<args.Length(); i++ )
assert( ! args[i]->IsUndefined() );
return v8::Undefined();
}
- v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args ){
-
+ v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args ) {
+
v8::Handle<v8::Object> t = args.This();
assert( args.Length() >= 4 );
-
+
t->Set( v8::String::New( "_mongo" ) , args[0] );
t->Set( v8::String::New( "_db" ) , args[1] );
t->Set( v8::String::New( "_collection" ) , args[2] );
@@ -473,46 +474,46 @@ namespace mongo {
if ( args.Length() > 4 && args[4]->IsObject() )
t->Set( v8::String::New( "_query" ) , args[4] );
- else
+ else
t->Set( v8::String::New( "_query" ) , v8::Object::New() );
-
+
if ( args.Length() > 5 && args[5]->IsObject() )
t->Set( v8::String::New( "_fields" ) , args[5] );
else
t->Set( v8::String::New( "_fields" ) , v8::Null() );
-
+
if ( args.Length() > 6 && args[6]->IsNumber() )
t->Set( v8::String::New( "_limit" ) , args[6] );
- else
+ else
t->Set( v8::String::New( "_limit" ) , Number::New( 0 ) );
if ( args.Length() > 7 && args[7]->IsNumber() )
t->Set( v8::String::New( "_skip" ) , args[7] );
- else
+ else
t->Set( v8::String::New( "_skip" ) , Number::New( 0 ) );
if ( args.Length() > 8 && args[8]->IsNumber() )
t->Set( v8::String::New( "_batchSize" ) , args[7] );
- else
+ else
t->Set( v8::String::New( "_batchSize" ) , Number::New( 0 ) );
-
+
t->Set( v8::String::New( "_cursor" ) , v8::Null() );
t->Set( v8::String::New( "_numReturned" ) , v8::Number::New(0) );
t->Set( v8::String::New( "_special" ) , Boolean::New(false) );
-
+
return v8::Undefined();
}
v8::Handle<v8::Value> collectionFallback( v8::Local<v8::String> name, const v8::AccessorInfo &info) {
DDD( "collectionFallback [" << name << "]" );
-
+
v8::Handle<v8::Value> real = info.This()->GetPrototype()->ToObject()->Get( name );
if ( ! real->IsUndefined() )
return real;
-
+
string sname = toSTLString( name );
- if ( sname[0] == '_' ){
+ if ( sname[0] == '_' ) {
if ( ! ( info.This()->HasRealNamedProperty( name ) ) )
return v8::Undefined();
return info.This()->GetRealNamedPropertyInPrototypeChain( name );
@@ -528,7 +529,7 @@ namespace mongo {
return f->Call( info.This() , 1 , argv );
}
- v8::Handle<v8::Value> dbQueryIndexAccess( unsigned int index , const v8::AccessorInfo& info ){
+ v8::Handle<v8::Value> dbQueryIndexAccess( unsigned int index , const v8::AccessorInfo& info ) {
v8::Handle<v8::Value> arrayAccess = info.This()->GetPrototype()->ToObject()->Get( v8::String::New( "arrayAccess" ) );
assert( arrayAccess->IsFunction() );
@@ -536,35 +537,36 @@ namespace mongo {
v8::Handle<v8::Value> argv[1];
argv[0] = v8::Number::New( index );
- return f->Call( info.This() , 1 , argv );
+ return f->Call( info.This() , 1 , argv );
}
- v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args ){
+ v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
-
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function * f = getObjectIdCons();
it = f->NewInstance();
}
-
+
OID oid;
-
- if ( args.Length() == 0 ){
+
+ if ( args.Length() == 0 ) {
oid.init();
}
else {
string s = toSTLString( args[0] );
try {
Scope::validateObjectIdString( s );
- } catch ( const MsgAssertionException &m ) {
+ }
+ catch ( const MsgAssertionException &m ) {
string error = m.toString();
return v8::ThrowException( v8::String::New( error.c_str() ) );
- }
+ }
oid.init( s );
- }
+ }
it->Set( v8::String::New( "str" ) , v8::String::New( oid.str().c_str() ) );
-
+
return it;
}
@@ -576,7 +578,7 @@ namespace mongo {
v8::Handle<v8::Object> it = args.This();
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function* f = getNamedCons( "DBRef" );
it = f->NewInstance();
}
@@ -590,41 +592,41 @@ namespace mongo {
}
v8::Handle<v8::Value> dbPointerInit( const v8::Arguments& args ) {
-
+
if (args.Length() != 2) {
return v8::ThrowException( v8::String::New( "DBPointer needs 2 arguments" ) );
}
-
+
v8::Handle<v8::Object> it = args.This();
-
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function* f = getNamedCons( "DBPointer" );
it = f->NewInstance();
}
-
+
it->Set( v8::String::New( "ns" ) , args[0] );
it->Set( v8::String::New( "id" ) , args[1] );
it->SetHiddenValue( v8::String::New( "__DBPointer" ), v8::Number::New( 1 ) );
-
+
return it;
}
v8::Handle<v8::Value> dbTimestampInit( const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
-
- if ( args.Length() == 0 ){
+
+ if ( args.Length() == 0 ) {
it->Set( v8::String::New( "t" ) , v8::Number::New( 0 ) );
it->Set( v8::String::New( "i" ) , v8::Number::New( 0 ) );
}
- else if ( args.Length() == 2 ){
+ else if ( args.Length() == 2 ) {
it->Set( v8::String::New( "t" ) , args[0] );
it->Set( v8::String::New( "i" ) , args[1] );
}
else {
return v8::ThrowException( v8::String::New( "Timestamp needs 0 or 2 arguments" ) );
}
-
+
it->SetInternalField( 0, v8::Uint32::New( Timestamp ) );
return it;
@@ -633,53 +635,55 @@ namespace mongo {
v8::Handle<v8::Value> binDataInit( const v8::Arguments& args ) {
v8::Handle<v8::Object> it = args.This();
-
+
// 3 args: len, type, data
if (args.Length() == 3) {
-
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function* f = getNamedCons( "BinData" );
it = f->NewInstance();
}
-
+
it->Set( v8::String::New( "len" ) , args[0] );
it->Set( v8::String::New( "type" ) , args[1] );
it->Set( v8::String::New( "data" ), args[2] );
it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
- // 2 args: type, base64 string
- } else if ( args.Length() == 2 ) {
-
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+ // 2 args: type, base64 string
+ }
+ else if ( args.Length() == 2 ) {
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function* f = getNamedCons( "BinData" );
it = f->NewInstance();
}
-
+
v8::String::Utf8Value data( args[ 1 ] );
string decoded = base64::decode( *data );
it->Set( v8::String::New( "len" ) , v8::Number::New( decoded.length() ) );
it->Set( v8::String::New( "type" ) , args[ 0 ] );
it->Set( v8::String::New( "data" ), v8::String::New( decoded.data(), decoded.length() ) );
- it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
-
- } else {
+ it->SetHiddenValue( v8::String::New( "__BinData" ), v8::Number::New( 1 ) );
+
+ }
+ else {
return v8::ThrowException( v8::String::New( "BinData needs 3 arguments" ) );
}
return it;
}
-
+
v8::Handle<v8::Value> binDataToString( const v8::Arguments& args ) {
-
+
if (args.Length() != 0) {
return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
}
-
+
v8::Handle<v8::Object> it = args.This();
int len = it->Get( v8::String::New( "len" ) )->ToInt32()->Value();
int type = it->Get( v8::String::New( "type" ) )->ToInt32()->Value();
v8::String::Utf8Value data( it->Get( v8::String::New( "data" ) ) );
-
+
stringstream ss;
ss << "BinData(" << type << ",\"";
base64::encode( ss, *data, len );
@@ -689,49 +693,54 @@ namespace mongo {
}
v8::Handle<v8::Value> numberLongInit( const v8::Arguments& args ) {
-
+
if (args.Length() != 0 && args.Length() != 1 && args.Length() != 3) {
return v8::ThrowException( v8::String::New( "NumberLong needs 0, 1 or 3 arguments" ) );
}
-
+
v8::Handle<v8::Object> it = args.This();
-
- if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ){
+
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
v8::Function* f = getNamedCons( "NumberLong" );
it = f->NewInstance();
}
if ( args.Length() == 0 ) {
it->Set( v8::String::New( "floatApprox" ), v8::Number::New( 0 ) );
- } else if ( args.Length() == 1 ) {
+ }
+ else if ( args.Length() == 1 ) {
if ( args[ 0 ]->IsNumber() ) {
- it->Set( v8::String::New( "floatApprox" ), args[ 0 ] );
- } else {
+ it->Set( v8::String::New( "floatApprox" ), args[ 0 ] );
+ }
+ else {
v8::String::Utf8Value data( args[ 0 ] );
string num = *data;
const char *numStr = num.c_str();
long long n;
try {
n = parseLL( numStr );
- } catch ( const AssertionException & ) {
+ }
+ catch ( const AssertionException & ) {
return v8::ThrowException( v8::String::New( "could not convert string to long long" ) );
}
unsigned long long val = n;
if ( (long long)val == (long long)(double)(long long)(val) ) {
it->Set( v8::String::New( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
- } else {
+ }
+ else {
it->Set( v8::String::New( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
it->Set( v8::String::New( "top" ), v8::Integer::New( val >> 32 ) );
it->Set( v8::String::New( "bottom" ), v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) ) );
- }
+ }
}
- } else {
+ }
+ else {
it->Set( v8::String::New( "floatApprox" ) , args[0] );
it->Set( v8::String::New( "top" ) , args[1] );
it->Set( v8::String::New( "bottom" ) , args[2] );
}
it->SetHiddenValue( v8::String::New( "__NumberLong" ), v8::Number::New( 1 ) );
-
+
return it;
}
@@ -739,21 +748,21 @@ namespace mongo {
if ( !it->Has( v8::String::New( "top" ) ) )
return (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
return
- (long long)
- ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
- (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
+ (long long)
+ ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
}
-
+
v8::Handle<v8::Value> numberLongValueOf( const v8::Arguments& args ) {
-
+
if (args.Length() != 0) {
return v8::ThrowException( v8::String::New( "toNumber needs 0 arguments" ) );
}
-
+
v8::Handle<v8::Object> it = args.This();
-
+
long long val = numberLongVal( it );
-
+
return v8::Number::New( double( val ) );
}
@@ -762,13 +771,13 @@ namespace mongo {
}
v8::Handle<v8::Value> numberLongToString( const v8::Arguments& args ) {
-
+
if (args.Length() != 0) {
return v8::ThrowException( v8::String::New( "toString needs 0 arguments" ) );
}
-
+
v8::Handle<v8::Object> it = args.This();
-
+
stringstream ss;
long long val = numberLongVal( it );
const long long limit = 2LL << 30;
@@ -781,16 +790,16 @@ namespace mongo {
string ret = ss.str();
return v8::String::New( ret.c_str() );
}
-
+
v8::Handle<v8::Value> bsonsize( const v8::Arguments& args ) {
-
+
if ( args.Length() != 1 )
return v8::ThrowException( v8::String::New( "bonsisze needs 1 argument" ) );
if ( args[0]->IsNull() )
return v8::Number::New(0);
- if ( ! args[ 0 ]->IsObject() )
+ if ( ! args[ 0 ]->IsObject() )
return v8::ThrowException( v8::String::New( "argument to bonsisze has to be an object" ) );
return v8::Number::New( v8ToMongo( args[ 0 ]->ToObject() ).objsize() );
@@ -802,12 +811,12 @@ namespace mongo {
__interruptSpecToThreadId[ globalScriptEngine->getInterruptSpec() ] = v8::V8::GetCurrentThreadId();
}
}
-
+
// to be called with v8 mutex
void disableV8Interrupt() {
if ( globalScriptEngine->haveGetInterruptSpecCallback() ) {
__interruptSpecToThreadId.erase( globalScriptEngine->getInterruptSpec() );
- }
+ }
}
namespace v8Locks {
@@ -827,7 +836,7 @@ namespace mongo {
__locked.set( false );
}
}
-
+
RecursiveUnlock::RecursiveUnlock() : _lock() {
if ( __locked.get() ) {
__v8Mutex.unlock();
diff --git a/scripting/v8_db.h b/scripting/v8_db.h
index b6986ec4952..7dbca92366f 100644
--- a/scripting/v8_db.h
+++ b/scripting/v8_db.h
@@ -28,32 +28,32 @@
namespace mongo {
// These functions may depend on the caller creating a handle scope and context scope.
-
+
v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( bool local );
void installDBTypes( v8::Handle<v8::ObjectTemplate>& global );
void installDBTypes( v8::Handle<v8::Object>& global );
-
+
// the actual globals
-
+
mongo::DBClientBase * getConnection( const v8::Arguments& args );
// Mongo members
v8::Handle<v8::Value> mongoConsLocal(const v8::Arguments& args);
v8::Handle<v8::Value> mongoConsExternal(const v8::Arguments& args);
-
+
v8::Handle<v8::Value> mongoFind(const v8::Arguments& args);
v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args);
v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args);
v8::Handle<v8::Value> mongoUpdate(const v8::Arguments& args);
-
-
+
+
v8::Handle<v8::Value> internalCursorCons(const v8::Arguments& args);
v8::Handle<v8::Value> internalCursorNext(const v8::Arguments& args);
v8::Handle<v8::Value> internalCursorHasNext(const v8::Arguments& args);
v8::Handle<v8::Value> internalCursorObjsLeftInBatch(const v8::Arguments& args);
-
+
// DB members
-
+
v8::Handle<v8::Value> dbInit(const v8::Arguments& args);
v8::Handle<v8::Value> collectionInit( const v8::Arguments& args );
v8::Handle<v8::Value> objectIdInit( const v8::Arguments& args );
@@ -69,10 +69,10 @@ namespace mongo {
v8::Handle<v8::Value> numberLongToNumber(const v8::Arguments& args);
v8::Handle<v8::Value> numberLongValueOf(const v8::Arguments& args);
v8::Handle<v8::Value> numberLongToString(const v8::Arguments& args);
-
+
v8::Handle<v8::Value> dbQueryInit( const v8::Arguments& args );
v8::Handle<v8::Value> dbQueryIndexAccess( uint32_t index , const v8::AccessorInfo& info );
-
+
v8::Handle<v8::Value> collectionFallback( v8::Local<v8::String> name, const v8::AccessorInfo &info);
v8::Handle<v8::Value> bsonsize( const v8::Arguments& args );
@@ -80,7 +80,7 @@ namespace mongo {
// call with v8 mutex:
void enableV8Interrupt();
void disableV8Interrupt();
-
+
// The implementation below assumes that SERVER-1816 has been fixed - in
// particular, interrupted() must return true if an interrupt was ever
// sent; currently that is not the case if a new killop overwrites the data
@@ -96,9 +96,11 @@ namespace mongo {
string exception;
try {
ret = f( args );
- } catch( const std::exception &e ) {
+ }
+ catch( const std::exception &e ) {
exception = e.what();
- } catch( ... ) {
+ }
+ catch( ... ) {
exception = "unknown exception";
}
enableV8Interrupt();
@@ -108,16 +110,16 @@ namespace mongo {
}
if ( !exception.empty() ) {
// technically, ThrowException is supposed to be the last v8 call before returning
- ret = v8::ThrowException( v8::String::New( exception.c_str() ) );
+ ret = v8::ThrowException( v8::String::New( exception.c_str() ) );
}
return ret;
}
-
+
template < v8::Handle< v8::Value > ( *f ) ( const v8::Arguments& ) >
v8::Local< v8::FunctionTemplate > newV8Function() {
return v8::FunctionTemplate::New( v8Callback< f > );
- }
-
+ }
+
// Preemption is going to be allowed for the v8 mutex, and some of our v8
// usage is not preemption safe. So we are using an additional mutex that
// will not be preempted. The V8Lock should be used in place of v8::Locker
diff --git a/scripting/v8_utils.cpp b/scripting/v8_utils.cpp
index a7c82753b0c..b4b06d78aae 100644
--- a/scripting/v8_utils.cpp
+++ b/scripting/v8_utils.cpp
@@ -39,7 +39,8 @@ namespace mongo {
HandleScope handle_scope;
if (first) {
first = false;
- } else {
+ }
+ else {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
@@ -49,66 +50,66 @@ namespace mongo {
return v8::Undefined();
}
- std::string toSTLString( const Handle<v8::Value> & o ){
- v8::String::Utf8Value str(o);
+ std::string toSTLString( const Handle<v8::Value> & o ) {
+ v8::String::Utf8Value str(o);
const char * foo = *str;
std::string s(foo);
return s;
}
- std::string toSTLString( const v8::TryCatch * try_catch ){
-
+ std::string toSTLString( const v8::TryCatch * try_catch ) {
+
stringstream ss;
-
+
//while ( try_catch ){ // disabled for v8 bleeding edge
-
- v8::String::Utf8Value exception(try_catch->Exception());
- Handle<v8::Message> message = try_catch->Message();
-
- if (message.IsEmpty()) {
- ss << *exception << endl;
- }
- else {
-
- v8::String::Utf8Value filename(message->GetScriptResourceName());
- int linenum = message->GetLineNumber();
- ss << *filename << ":" << linenum << " " << *exception << endl;
-
- v8::String::Utf8Value sourceline(message->GetSourceLine());
- ss << *sourceline << endl;
-
- int start = message->GetStartColumn();
- for (int i = 0; i < start; i++)
- ss << " ";
-
- int end = message->GetEndColumn();
- for (int i = start; i < end; i++)
- ss << "^";
-
- ss << endl;
- }
-
- //try_catch = try_catch->next_;
+
+ v8::String::Utf8Value exception(try_catch->Exception());
+ Handle<v8::Message> message = try_catch->Message();
+
+ if (message.IsEmpty()) {
+ ss << *exception << endl;
+ }
+ else {
+
+ v8::String::Utf8Value filename(message->GetScriptResourceName());
+ int linenum = message->GetLineNumber();
+ ss << *filename << ":" << linenum << " " << *exception << endl;
+
+ v8::String::Utf8Value sourceline(message->GetSourceLine());
+ ss << *sourceline << endl;
+
+ int start = message->GetStartColumn();
+ for (int i = 0; i < start; i++)
+ ss << " ";
+
+ int end = message->GetEndColumn();
+ for (int i = start; i < end; i++)
+ ss << "^";
+
+ ss << endl;
+ }
+
+ //try_catch = try_catch->next_;
//}
-
+
return ss.str();
}
- std::ostream& operator<<( std::ostream &s, const Handle<v8::Value> & o ){
- v8::String::Utf8Value str(o);
+ std::ostream& operator<<( std::ostream &s, const Handle<v8::Value> & o ) {
+ v8::String::Utf8Value str(o);
s << *str;
return s;
}
- std::ostream& operator<<( std::ostream &s, const v8::TryCatch * try_catch ){
+ std::ostream& operator<<( std::ostream &s, const v8::TryCatch * try_catch ) {
HandleScope handle_scope;
v8::String::Utf8Value exception(try_catch->Exception());
Handle<v8::Message> message = try_catch->Message();
-
+
if (message.IsEmpty()) {
s << *exception << endl;
- }
+ }
else {
v8::String::Utf8Value filename(message->GetScriptResourceName());
@@ -127,7 +128,7 @@ namespace mongo {
cout << "^";
cout << endl;
- }
+ }
//if ( try_catch->next_ ) // disabled for v8 bleeding edge
// s << try_catch->next_;
@@ -144,9 +145,9 @@ namespace mongo {
void ReportException(v8::TryCatch* try_catch) {
cout << try_catch << endl;
}
-
+
Handle< Context > baseContext_;
-
+
class JSThreadConfig {
public:
JSThreadConfig( const Arguments &args, bool newScope = false ) : started_(), done_(), newScope_( newScope ) {
@@ -199,7 +200,8 @@ namespace mongo {
string fCode = toSTLString( config_.f_->ToString() );
Context::Scope context_scope( context );
fun = scope->__createFunction( fCode.c_str() );
- } else {
+ }
+ else {
context = baseContext_;
Context::Scope context_scope( context );
fun = config_.f_;
@@ -221,7 +223,7 @@ namespace mongo {
private:
JSThreadConfig &config_;
};
-
+
bool started_;
bool done_;
bool newScope_;
@@ -230,7 +232,7 @@ namespace mongo {
auto_ptr< boost::thread > thread_;
Persistent< Value > returnData_;
};
-
+
Handle< Value > ThreadInit( const Arguments &args ) {
Handle<v8::Object> it = args.This();
// NOTE I believe the passed JSThreadConfig will never be freed. If this
@@ -239,7 +241,7 @@ namespace mongo {
it->SetHiddenValue( v8::String::New( "_JSThreadConfig" ), External::New( new JSThreadConfig( args ) ) );
return v8::Undefined();
}
-
+
Handle< Value > ScopedThreadInit( const Arguments &args ) {
Handle<v8::Object> it = args.This();
// NOTE I believe the passed JSThreadConfig will never be freed. If this
@@ -254,17 +256,17 @@ namespace mongo {
JSThreadConfig *config = (JSThreadConfig *)( c->Value() );
return config;
}
-
+
Handle< Value > ThreadStart( const Arguments &args ) {
thisConfig( args )->start();
return v8::Undefined();
}
-
+
Handle< Value > ThreadJoin( const Arguments &args ) {
thisConfig( args )->join();
return v8::Undefined();
}
-
+
Handle< Value > ThreadReturnData( const Arguments &args ) {
HandleScope handle_scope;
return handle_scope.Close( thisConfig( args )->returnData() );
@@ -273,29 +275,29 @@ namespace mongo {
Handle< Value > ThreadInject( const Arguments &args ) {
jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
-
+
Local<v8::Object> o = args[0]->ToObject();
-
+
o->Set( v8::String::New( "init" ) , newV8Function< ThreadInit >()->GetFunction() );
o->Set( v8::String::New( "start" ) , newV8Function< ThreadStart >()->GetFunction() );
o->Set( v8::String::New( "join" ) , newV8Function< ThreadJoin >()->GetFunction() );
o->Set( v8::String::New( "returnData" ) , newV8Function< ThreadReturnData >()->GetFunction() );
-
- return v8::Undefined();
+
+ return v8::Undefined();
}
Handle< Value > ScopedThreadInject( const Arguments &args ) {
jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
-
+
Local<v8::Object> o = args[0]->ToObject();
-
+
o->Set( v8::String::New( "init" ) , newV8Function< ScopedThreadInit >()->GetFunction() );
// inheritance takes care of other member functions
-
+
return v8::Undefined();
}
-
+
void installFork( v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context ) {
if ( baseContext_.IsEmpty() ) // if this is the shell, first call will be with shell context, otherwise don't expect to use fork() anyway
baseContext_ = context;
diff --git a/scripting/v8_utils.h b/scripting/v8_utils.h
index bc4b5244184..40662d25506 100644
--- a/scripting/v8_utils.h
+++ b/scripting/v8_utils.h
@@ -32,9 +32,9 @@ namespace mongo {
v8::Handle<v8::Value> GCV8(const v8::Arguments& args);
void ReportException(v8::TryCatch* handler);
-
+
#define jsassert(x,msg) assert(x)
-
+
std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::Value> & o );
std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::TryCatch> * try_catch );
diff --git a/scripting/v8_wrapper.cpp b/scripting/v8_wrapper.cpp
index 149da4f6c97..ff67e8cf953 100644
--- a/scripting/v8_wrapper.cpp
+++ b/scripting/v8_wrapper.cpp
@@ -39,17 +39,17 @@ namespace mongo {
cout << "cannot delete from read-only object" << endl;
return Boolean::New( false );
}
-
+
Handle<Value> IndexedReadOnlySet( uint32_t index, Local<Value> value, const AccessorInfo& info ) {
cout << "cannot write to read-only array" << endl;
return value;
}
-
+
Handle<Boolean> IndexedReadOnlyDelete( uint32_t index, const AccessorInfo& info ) {
cout << "cannot delete from read-only array" << endl;
return Boolean::New( false );
}
-
+
Local< v8::Value > newFunction( const char *code ) {
stringstream codeSS;
codeSS << "____MontoToV8_newFunction_temp = " << code;
@@ -58,15 +58,15 @@ namespace mongo {
Local< Value > ret = compiled->Run();
return ret;
}
-
+
Local< v8::Value > newId( const OID &id ) {
v8::Function * idCons = getObjectIdCons();
v8::Handle<v8::Value> argv[1];
argv[0] = v8::String::New( id.str().c_str() );
- return idCons->NewInstance( 1 , argv );
+ return idCons->NewInstance( 1 , argv );
}
-
- Local<v8::Object> mongoToV8( const BSONObj& m , bool array, bool readOnly ){
+
+ Local<v8::Object> mongoToV8( const BSONObj& m , bool array, bool readOnly ) {
Local<v8::Object> o;
@@ -87,13 +87,16 @@ namespace mongo {
if ( !o.IsEmpty() ) {
readOnly = false;
- } else if ( array ) {
+ }
+ else if ( array ) {
// NOTE Looks like it's impossible to add interceptors to v8 arrays.
readOnly = false;
o = v8::Array::New();
- } else if ( !readOnly ) {
+ }
+ else if ( !readOnly ) {
o = v8::Object::New();
- } else {
+ }
+ else {
// NOTE Our readOnly implemention relies on undocumented ObjectTemplate
// functionality that may be fragile, but it still seems like the best option
// for now -- fwiw, the v8 docs are pretty sparse. I've determined experimentally
@@ -116,15 +119,15 @@ namespace mongo {
readOnlyObjects->SetIndexedPropertyHandler( 0 );
o = readOnlyObjects->NewInstance();
}
-
+
mongo::BSONObj sub;
for ( BSONObjIterator i(m); i.more(); ) {
const BSONElement& f = i.next();
-
+
Local<Value> v;
-
- switch ( f.type() ){
+
+ switch ( f.type() ) {
case mongo::Code:
o->Set( v8::String::New( f.fieldName() ), newFunction( f.valuestr() ) );
@@ -135,31 +138,31 @@ namespace mongo {
log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
o->Set( v8::String::New( f.fieldName() ), newFunction( f.codeWScopeCode() ) );
break;
-
- case mongo::String:
+
+ case mongo::String:
o->Set( v8::String::New( f.fieldName() ) , v8::String::New( f.valuestr() ) );
break;
-
+
case mongo::jstOID: {
v8::Function * idCons = getObjectIdCons();
v8::Handle<v8::Value> argv[1];
argv[0] = v8::String::New( f.__oid().str().c_str() );
- o->Set( v8::String::New( f.fieldName() ) ,
- idCons->NewInstance( 1 , argv ) );
+ o->Set( v8::String::New( f.fieldName() ) ,
+ idCons->NewInstance( 1 , argv ) );
break;
}
-
+
case mongo::NumberDouble:
case mongo::NumberInt:
o->Set( v8::String::New( f.fieldName() ) , v8::Number::New( f.number() ) );
break;
-
+
case mongo::Array:
case mongo::Object:
sub = f.embeddedObject();
o->Set( v8::String::New( f.fieldName() ) , mongoToV8( sub , f.type() == mongo::Array, readOnly ) );
break;
-
+
case mongo::Date:
o->Set( v8::String::New( f.fieldName() ) , v8::Date::New( f.date() ) );
break;
@@ -167,29 +170,29 @@ namespace mongo {
case mongo::Bool:
o->Set( v8::String::New( f.fieldName() ) , v8::Boolean::New( f.boolean() ) );
break;
-
+
case mongo::jstNULL:
case mongo::Undefined: // duplicate sm behavior
o->Set( v8::String::New( f.fieldName() ) , v8::Null() );
break;
-
+
case mongo::RegEx: {
v8::Function * regex = getNamedCons( "RegExp" );
-
+
v8::Handle<v8::Value> argv[2];
argv[0] = v8::String::New( f.regex() );
argv[1] = v8::String::New( f.regexFlags() );
-
+
o->Set( v8::String::New( f.fieldName() ) , regex->NewInstance( 2 , argv ) );
break;
}
-
+
case mongo::BinData: {
Local<v8::Object> b = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
int len;
const char *data = f.binData( len );
-
+
v8::Function* binData = getNamedCons( "BinData" );
v8::Handle<v8::Value> argv[3];
argv[0] = v8::Number::New( len );
@@ -198,36 +201,37 @@ namespace mongo {
o->Set( v8::String::New( f.fieldName() ), binData->NewInstance(3, argv) );
break;
}
-
+
case mongo::Timestamp: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
-
+
sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
-
+
o->Set( v8::String::New( f.fieldName() ) , sub );
break;
}
-
+
case mongo::NumberLong: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
unsigned long long val = f.numberLong();
v8::Function* numberLong = getNamedCons( "NumberLong" );
if ( (long long)val == (long long)(double)(long long)(val) ) {
- v8::Handle<v8::Value> argv[1];
+ v8::Handle<v8::Value> argv[1];
argv[0] = v8::Number::New( (double)(long long)( val ) );
o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance( 1, argv ) );
- } else {
+ }
+ else {
v8::Handle<v8::Value> argv[3];
argv[0] = v8::Number::New( (double)(long long)(val) );
argv[1] = v8::Integer::New( val >> 32 );
argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
o->Set( v8::String::New( f.fieldName() ), numberLong->NewInstance(3, argv) );
}
- break;
+ break;
}
-
+
case mongo::MinKey: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
sub->Set( v8::String::New( "$MinKey" ), v8::Boolean::New( true ) );
@@ -235,7 +239,7 @@ namespace mongo {
o->Set( v8::String::New( f.fieldName() ) , sub );
break;
}
-
+
case mongo::MaxKey: {
Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
sub->Set( v8::String::New( "$MaxKey" ), v8::Boolean::New( true ) );
@@ -252,7 +256,7 @@ namespace mongo {
o->Set( v8::String::New( f.fieldName() ), dbPointer->NewInstance(2, argv) );
break;
}
-
+
default:
cout << "can't handle type: ";
cout << f.type() << " ";
@@ -260,14 +264,14 @@ namespace mongo {
cout << endl;
break;
}
-
+
}
if ( readOnly ) {
readOnlyObjects->SetNamedPropertyHandler( 0, NamedReadOnlySet, 0, NamedReadOnlyDelete );
- readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
+ readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
}
-
+
return o;
}
@@ -275,56 +279,56 @@ namespace mongo {
Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
internalFieldObjects->SetInternalFieldCount( 1 );
- switch ( f.type() ){
+ switch ( f.type() ) {
case mongo::Code:
return newFunction( f.valuestr() );
-
+
case CodeWScope:
if ( f.codeWScopeObject().isEmpty() )
log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
return newFunction( f.codeWScopeCode() );
-
- case mongo::String:
+
+ case mongo::String:
return v8::String::New( f.valuestr() );
-
+
case mongo::jstOID:
return newId( f.__oid() );
-
+
case mongo::NumberDouble:
case mongo::NumberInt:
return v8::Number::New( f.number() );
-
+
case mongo::Array:
case mongo::Object:
return mongoToV8( f.embeddedObject() , f.type() == mongo::Array );
-
+
case mongo::Date:
return v8::Date::New( f.date() );
-
+
case mongo::Bool:
return v8::Boolean::New( f.boolean() );
- case mongo::EOO:
+ case mongo::EOO:
case mongo::jstNULL:
case mongo::Undefined: // duplicate sm behavior
return v8::Null();
-
+
case mongo::RegEx: {
v8::Function * regex = getNamedCons( "RegExp" );
-
+
v8::Handle<v8::Value> argv[2];
argv[0] = v8::String::New( f.regex() );
argv[1] = v8::String::New( f.regexFlags() );
-
+
return regex->NewInstance( 2 , argv );
break;
}
-
+
case mongo::BinData: {
int len;
const char *data = f.binData( len );
-
+
v8::Function* binData = getNamedCons( "BinData" );
v8::Handle<v8::Value> argv[3];
argv[0] = v8::Number::New( len );
@@ -332,26 +336,27 @@ namespace mongo {
argv[2] = v8::String::New( data, len );
return binData->NewInstance( 3, argv );
};
-
+
case mongo::Timestamp: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
-
+
sub->Set( v8::String::New( "t" ) , v8::Number::New( f.timestampTime() ) );
sub->Set( v8::String::New( "i" ) , v8::Number::New( f.timestampInc() ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
return sub;
}
-
+
case mongo::NumberLong: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
unsigned long long val = f.numberLong();
v8::Function* numberLong = getNamedCons( "NumberLong" );
if ( (long long)val == (long long)(double)(long long)(val) ) {
- v8::Handle<v8::Value> argv[1];
+ v8::Handle<v8::Value> argv[1];
argv[0] = v8::Number::New( (double)(long long)( val ) );
return numberLong->NewInstance( 1, argv );
- } else {
+ }
+ else {
v8::Handle<v8::Value> argv[3];
argv[0] = v8::Number::New( (double)(long long)( val ) );
argv[1] = v8::Integer::New( val >> 32 );
@@ -359,21 +364,21 @@ namespace mongo {
return numberLong->NewInstance( 3, argv );
}
}
-
+
case mongo::MinKey: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
sub->Set( v8::String::New( "$MinKey" ), v8::Boolean::New( true ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
return sub;
}
-
+
case mongo::MaxKey: {
Local<v8::Object> sub = internalFieldObjects->NewInstance();
sub->Set( v8::String::New( "$MaxKey" ), v8::Boolean::New( true ) );
sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
return sub;
}
-
+
case mongo::DBRef: {
v8::Function* dbPointer = getNamedCons( "DBPointer" );
v8::Handle<v8::Value> argv[2];
@@ -381,83 +386,83 @@ namespace mongo {
argv[1] = newId( f.dbrefOID() );
return dbPointer->NewInstance(2, argv);
}
-
+
default:
cout << "can't handle type: ";
- cout << f.type() << " ";
- cout << f.toString();
- cout << endl;
+ cout << f.type() << " ";
+ cout << f.toString();
+ cout << endl;
break;
- }
-
+ }
+
return v8::Undefined();
}
- void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value , int depth ){
-
- if ( value->IsString() ){
+ void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value , int depth ) {
+
+ if ( value->IsString() ) {
b.append( sname , toSTLString( value ).c_str() );
return;
}
-
- if ( value->IsFunction() ){
+
+ if ( value->IsFunction() ) {
b.appendCode( sname , toSTLString( value ) );
return;
}
-
- if ( value->IsNumber() ){
+
+ if ( value->IsNumber() ) {
if ( value->IsInt32() )
b.append( sname, int( value->ToInt32()->Value() ) );
else
b.append( sname , value->ToNumber()->Value() );
return;
}
-
- if ( value->IsArray() ){
+
+ if ( value->IsArray() ) {
BSONObj sub = v8ToMongo( value->ToObject() , depth );
b.appendArray( sname , sub );
return;
}
-
- if ( value->IsDate() ){
+
+ if ( value->IsDate() ) {
b.appendDate( sname , Date_t( (unsigned long long)(v8::Date::Cast( *value )->NumberValue())) );
return;
}
if ( value->IsExternal() )
return;
-
- if ( value->IsObject() ){
+
+ if ( value->IsObject() ) {
// The user could potentially modify the fields of these special objects,
// wreaking havoc when we attempt to reinterpret them. Not doing any validation
// for now...
Local< v8::Object > obj = value->ToObject();
if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
- case Timestamp:
- b.appendTimestamp( sname,
- Date_t( (unsigned long long)(obj->Get( v8::String::New( "t" ) )->ToNumber()->Value() )),
- obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
- return;
- case MinKey:
- b.appendMinKey( sname );
- return;
- case MaxKey:
- b.appendMaxKey( sname );
- return;
- default:
- assert( "invalid internal field" == 0 );
+ case Timestamp:
+ b.appendTimestamp( sname,
+ Date_t( (unsigned long long)(obj->Get( v8::String::New( "t" ) )->ToNumber()->Value() )),
+ obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() );
+ return;
+ case MinKey:
+ b.appendMinKey( sname );
+ return;
+ case MaxKey:
+ b.appendMaxKey( sname );
+ return;
+ default:
+ assert( "invalid internal field" == 0 );
}
}
string s = toSTLString( value );
- if ( s.size() && s[0] == '/' ){
+ if ( s.size() && s[0] == '/' ) {
s = s.substr( 1 );
string r = s.substr( 0 , s.rfind( "/" ) );
string o = s.substr( s.rfind( "/" ) + 1 );
b.appendRegex( sname , r , o );
}
else if ( value->ToObject()->GetPrototype()->IsObject() &&
- value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ){
+ value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ) {
OID oid;
oid.init( toSTLString( value ) );
b.appendOID( sname , &oid );
@@ -470,19 +475,20 @@ namespace mongo {
long long val;
if ( !it->Has( v8::String::New( "top" ) ) ) {
val = (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
- } else {
+ }
+ else {
val = (long long)
- ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
- (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
+ ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
}
-
+
b.append( sname, val );
}
else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) {
OID oid;
oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) );
string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) );
- b.appendDBRef( sname, ns, oid );
+ b.appendDBRef( sname, ns, oid );
}
else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) {
int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value();
@@ -490,27 +496,28 @@ namespace mongo {
const char *dataArray = *data;
assert( data.length() == len );
b.appendBinData( sname,
- len,
- mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
- dataArray );
- } else {
+ len,
+ mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ),
+ dataArray );
+ }
+ else {
BSONObj sub = v8ToMongo( value->ToObject() , depth );
b.append( sname , sub );
}
return;
}
-
- if ( value->IsBoolean() ){
+
+ if ( value->IsBoolean() ) {
b.appendBool( sname , value->ToBoolean()->Value() );
return;
}
-
- else if ( value->IsUndefined() ){
+
+ else if ( value->IsUndefined() ) {
b.appendUndefined( sname );
return;
}
-
- else if ( value->IsNull() ){
+
+ else if ( value->IsNull() ) {
b.appendNull( sname );
return;
}
@@ -518,26 +525,26 @@ namespace mongo {
cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl;
}
- BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth ){
+ BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth ) {
BSONObjBuilder b;
-
- if ( depth == 0 ){
+
+ if ( depth == 0 ) {
v8::Handle<v8::String> idName = v8::String::New( "_id" );
- if ( o->HasRealNamedProperty( idName ) ){
+ if ( o->HasRealNamedProperty( idName ) ) {
v8ToMongoElement( b , idName , "_id" , o->Get( idName ) );
}
}
-
+
Local<v8::Array> names = o->GetPropertyNames();
- for ( unsigned int i=0; i<names->Length(); i++ ){
+ for ( unsigned int i=0; i<names->Length(); i++ ) {
v8::Local<v8::String> name = names->Get(v8::Integer::New(i) )->ToString();
if ( o->GetPrototype()->IsObject() &&
- o->GetPrototype()->ToObject()->HasRealNamedProperty( name ) )
+ o->GetPrototype()->ToObject()->HasRealNamedProperty( name ) )
continue;
-
+
v8::Local<v8::Value> value = o->Get( name );
-
+
const string sname = toSTLString( name );
if ( depth == 0 && sname == "_id" )
continue;
@@ -554,15 +561,15 @@ namespace mongo {
WrapperHolder( const BSONObj * o , bool readOnly , bool iDelete )
: _o(o), _readOnly( readOnly ), _iDelete( iDelete ) {
}
-
- ~WrapperHolder(){
- if ( _o && _iDelete ){
+
+ ~WrapperHolder() {
+ if ( _o && _iDelete ) {
delete _o;
}
_o = 0;
}
- v8::Handle<v8::Value> get( v8::Local<v8::String> name ){
+ v8::Handle<v8::Value> get( v8::Local<v8::String> name ) {
const string& s = toSTLString( name );
const BSONElement& e = _o->getField( s );
return mongoToV8Element(e);
@@ -573,13 +580,13 @@ namespace mongo {
bool _iDelete;
};
- WrapperHolder * createWrapperHolder( const BSONObj * o , bool readOnly , bool iDelete ){
+ WrapperHolder * createWrapperHolder( const BSONObj * o , bool readOnly , bool iDelete ) {
return new WrapperHolder( o , readOnly , iDelete );
}
#define WRAPPER_STRING (v8::String::New( "_wrapper" ) )
- WrapperHolder * getWrapper( v8::Handle<v8::Object> o ){
+ WrapperHolder * getWrapper( v8::Handle<v8::Object> o ) {
Handle<v8::Value> t = o->GetRealNamedProperty( WRAPPER_STRING );
assert( t->IsExternal() );
Local<External> c = External::Cast( *t );
@@ -589,20 +596,20 @@ namespace mongo {
}
- Handle<Value> wrapperCons(const Arguments& args){
+ Handle<Value> wrapperCons(const Arguments& args) {
if ( ! ( args.Length() == 1 && args[0]->IsExternal() ) )
return v8::ThrowException( v8::String::New( "wrapperCons needs 1 External arg" ) );
args.This()->Set( WRAPPER_STRING , args[0] );
-
+
return v8::Undefined();
}
- v8::Handle<v8::Value> wrapperGetHandler( v8::Local<v8::String> name, const v8::AccessorInfo &info){
+ v8::Handle<v8::Value> wrapperGetHandler( v8::Local<v8::String> name, const v8::AccessorInfo &info) {
return getWrapper( info.This() )->get( name );
}
- v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate(){
+ v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate() {
v8::Local<v8::FunctionTemplate> t = newV8Function< wrapperCons >();
t->InstanceTemplate()->SetNamedPropertyHandler( wrapperGetHandler );
return t;
@@ -610,11 +617,11 @@ namespace mongo {
// --- random utils ----
- v8::Function * getNamedCons( const char * name ){
+ v8::Function * getNamedCons( const char * name ) {
return v8::Function::Cast( *(v8::Context::GetCurrent()->Global()->Get( v8::String::New( name ) ) ) );
}
- v8::Function * getObjectIdCons(){
+ v8::Function * getObjectIdCons() {
return getNamedCons( "ObjectId" );
}
diff --git a/scripting/v8_wrapper.h b/scripting/v8_wrapper.h
index 838aaf4269c..e0b79e304d2 100644
--- a/scripting/v8_wrapper.h
+++ b/scripting/v8_wrapper.h
@@ -28,10 +28,10 @@ namespace mongo {
v8::Local<v8::Object> mongoToV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth = 0 );
- void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name ,
+ void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name ,
const string sname , v8::Handle<v8::Value> value , int depth = 0 );
v8::Handle<v8::Value> mongoToV8Element( const BSONElement &f );
-
+
v8::Function * getNamedCons( const char * name );
v8::Function * getObjectIdCons();
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index 59851854540..e2db06a9e4f 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -63,7 +63,7 @@ namespace mongo {
extern bool dbexitCalled;
}
-void generateCompletions( const string& prefix , vector<string>& all ){
+void generateCompletions( const string& prefix , vector<string>& all ) {
if ( prefix.find( '"' ) != string::npos )
return;
@@ -74,7 +74,7 @@ void generateCompletions( const string& prefix , vector<string>& all ){
BSONObj arr = res.firstElement().Obj();
BSONObjIterator i(arr);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
all.push_back( e.String() );
}
@@ -82,43 +82,43 @@ void generateCompletions( const string& prefix , vector<string>& all ){
}
#ifdef USE_READLINE
-static char** completionHook(const char* text , int start ,int end ){
+static char** completionHook(const char* text , int start ,int end ) {
static map<string,string> m;
-
+
vector<string> all;
-
+
generateCompletions( string(text,end) , all );
-
- if ( all.size() == 0 ){
+
+ if ( all.size() == 0 ) {
return 0;
}
-
+
string longest = all[0];
- for ( vector<string>::iterator i=all.begin(); i!=all.end(); ++i ){
+ for ( vector<string>::iterator i=all.begin(); i!=all.end(); ++i ) {
string s = *i;
- for ( unsigned j=0; j<s.size(); j++ ){
+ for ( unsigned j=0; j<s.size(); j++ ) {
if ( longest[j] == s[j] )
continue;
longest = longest.substr(0,j);
break;
}
}
-
+
char ** matches = (char**)malloc( sizeof(char*) * (all.size()+2) );
unsigned x=0;
matches[x++] = strdup( longest.c_str() );
- for ( unsigned i=0; i<all.size(); i++ ){
+ for ( unsigned i=0; i<all.size(); i++ ) {
matches[x++] = strdup( all[i].c_str() );
}
matches[x++] = 0;
- rl_completion_append_character = '\0'; // don't add a space after completions
+ rl_completion_append_character = '\0'; // don't add a space after completions
return matches;
}
#endif
-void shellHistoryInit(){
+void shellHistoryInit() {
#ifdef USE_READLINE
stringstream ss;
char * h = getenv( "HOME" );
@@ -129,19 +129,19 @@ void shellHistoryInit(){
using_history();
read_history( historyFile.c_str() );
-
+
rl_attempted_completion_function = completionHook;
#else
//cout << "type \"exit\" to exit" << endl;
#endif
}
-void shellHistoryDone(){
+void shellHistoryDone() {
#ifdef USE_READLINE
write_history( historyFile.c_str() );
#endif
}
-void shellHistoryAdd( const char * line ){
+void shellHistoryAdd( const char * line ) {
#ifdef USE_READLINE
if ( line[0] == '\0' )
return;
@@ -157,7 +157,7 @@ void shellHistoryAdd( const char * line ){
#endif
}
-void intr( int sig ){
+void intr( int sig ) {
#ifdef CTRLC_HANDLE
longjmp( jbuf , 1 );
#endif
@@ -173,7 +173,7 @@ void killOps() {
sleepmillis(10); // give current op a chance to finish
- for( map< string, set<string> >::const_iterator i = shellUtils::_allMyUris.begin(); i != shellUtils::_allMyUris.end(); ++i ){
+ for( map< string, set<string> >::const_iterator i = shellUtils::_allMyUris.begin(); i != shellUtils::_allMyUris.end(); ++i ) {
string errmsg;
ConnectionString cs = ConnectionString::parse(i->first, errmsg);
if (!cs.isValid()) continue;
@@ -183,7 +183,7 @@ void killOps() {
const set<string>& uris = i->second;
BSONObj inprog = conn->findOne("admin.$cmd.sys.inprog", Query())["inprog"].embeddedObject().getOwned();
- BSONForEach(op, inprog){
+ BSONForEach(op, inprog) {
if ( uris.count(op["client"].String()) ) {
ONCE if ( !autoKillOp ) {
cout << endl << "do you want to kill the current op(s) on the server? (y/n): ";
@@ -202,9 +202,9 @@ void killOps() {
}
}
-void quitNicely( int sig ){
+void quitNicely( int sig ) {
mongo::dbexitCalled = true;
- if ( sig == SIGINT && inMultiLine ){
+ if ( sig == SIGINT && inMultiLine ) {
gotInterrupted = 1;
return;
}
@@ -215,7 +215,7 @@ void quitNicely( int sig ){
exit(0);
}
#else
-void quitNicely( int sig ){
+void quitNicely( int sig ) {
mongo::dbexitCalled = true;
//killOps();
shellHistoryDone();
@@ -223,7 +223,7 @@ void quitNicely( int sig ){
}
#endif
-char * shellReadline( const char * prompt , int handlesigint = 0 ){
+char * shellReadline( const char * prompt , int handlesigint = 0 ) {
atPrompt = true;
#ifdef USE_READLINE
@@ -232,12 +232,12 @@ char * shellReadline( const char * prompt , int handlesigint = 0 ){
#ifdef CTRLC_HANDLE
- if ( ! handlesigint ){
+ if ( ! handlesigint ) {
char* ret = readline( prompt );
atPrompt = false;
return ret;
}
- if ( setjmp( jbuf ) ){
+ if ( setjmp( jbuf ) ) {
gotInterrupted = 1;
sigrelse(SIGINT);
signal( SIGINT , quitNicely );
@@ -300,23 +300,23 @@ void setupSignals() {
inline void setupSignals() {}
#endif
-string fixHost( string url , string host , string port ){
+string fixHost( string url , string host , string port ) {
//cout << "fixHost url: " << url << " host: " << host << " port: " << port << endl;
- if ( host.size() == 0 && port.size() == 0 ){
- if ( url.find( "/" ) == string::npos ){
+ if ( host.size() == 0 && port.size() == 0 ) {
+ if ( url.find( "/" ) == string::npos ) {
// check for ips
if ( url.find( "." ) != string::npos )
return url + "/test";
if ( url.rfind( ":" ) != string::npos &&
- isdigit( url[url.rfind(":")+1] ) )
+ isdigit( url[url.rfind(":")+1] ) )
return url + "/test";
}
return url;
}
- if ( url.find( "/" ) != string::npos ){
+ if ( url.find( "/" ) != string::npos ) {
cerr << "url can't have host or port if you specify them individually" << endl;
exit(-1);
}
@@ -327,7 +327,7 @@ string fixHost( string url , string host , string port ){
string newurl = host;
if ( port.size() > 0 )
newurl += ":" + port;
- else if (host.find(':') == string::npos){
+ else if (host.find(':') == string::npos) {
// need to add port with IPv6 addresses
newurl += ":27017";
}
@@ -339,21 +339,21 @@ string fixHost( string url , string host , string port ){
static string OpSymbols = "~!%^&*-+=|:,<>/?";
-bool isOpSymbol( char c ){
+bool isOpSymbol( char c ) {
for ( size_t i = 0; i < OpSymbols.size(); i++ )
if ( OpSymbols[i] == c ) return true;
return false;
}
-bool isBalanced( string code ){
+bool isBalanced( string code ) {
int brackets = 0;
int parens = 0;
bool danglingOp = false;
- for ( size_t i=0; i<code.size(); i++ ){
- switch( code[i] ){
+ for ( size_t i=0; i<code.size(); i++ ) {
+ switch( code[i] ) {
case '/':
- if ( i+1 < code.size() && code[i+1] == '/' ){
+ if ( i+1 < code.size() && code[i+1] == '/' ) {
while ( i<code.size() && code[i] != '\n' )
i++;
}
@@ -393,7 +393,7 @@ using mongo::asserted;
struct BalancedTest : public mongo::UnitTest {
public:
- void run(){
+ void run() {
assert( isBalanced( "x = 5" ) );
assert( isBalanced( "function(){}" ) );
assert( isBalanced( "function(){\n}" ) );
@@ -413,8 +413,8 @@ public:
}
} balnaced_test;
-string finishCode( string code ){
- while ( ! isBalanced( code ) ){
+string finishCode( string code ) {
+ while ( ! isBalanced( code ) ) {
inMultiLine = 1;
code += "\n";
char * line = shellReadline("... " , 1 );
@@ -446,12 +446,12 @@ void show_help_text(const char* name, po::options_description options) {
<< "unless --shell is specified" << endl;
};
-bool fileExists( string file ){
+bool fileExists( string file ) {
try {
path p(file);
return boost::filesystem::exists( file );
}
- catch (...){
+ catch (...) {
return false;
}
}
@@ -461,7 +461,7 @@ namespace mongo {
extern DBClientWithCommands *latestConn;
}
-string stateToString(MemberState s) {
+string stateToString(MemberState s) {
if( s.s == MemberState::RS_STARTUP ) return "STARTUP";
if( s.s == MemberState::RS_PRIMARY ) return "PRIMARY";
if( s.s == MemberState::RS_SECONDARY ) return "SECONDARY";
@@ -473,9 +473,9 @@ string stateToString(MemberState s) {
if( s.s == MemberState::RS_ROLLBACK ) return "ROLLBACK";
return "";
}
-string sayReplSetMemberState() {
+string sayReplSetMemberState() {
try {
- if( latestConn ) {
+ if( latestConn ) {
BSONObj info;
if( latestConn->runCommand("admin", BSON( "replSetGetStatus" << 1 << "forShell" << 1 ) , info ) ) {
stringstream ss;
@@ -486,7 +486,8 @@ string sayReplSetMemberState() {
return ss.str();
}
}
- } catch(...) { }
+ }
+ catch(...) { }
return "";
}
@@ -506,35 +507,35 @@ int _main(int argc, char* argv[]) {
bool runShell = false;
bool nodb = false;
-
+
string script;
po::options_description shell_options("options");
po::options_description hidden_options("Hidden options");
po::options_description cmdline_options("Command line options");
po::positional_options_description positional_options;
-
+
shell_options.add_options()
- ("shell", "run the shell after executing files")
- ("nodb", "don't connect to mongod on startup - no 'db address' arg expected")
- ("quiet", "be less chatty" )
- ("port", po::value<string>(&port), "port to connect to")
- ("host", po::value<string>(&dbhost), "server to connect to")
- ("eval", po::value<string>(&script), "evaluate javascript")
- ("username,u", po::value<string>(&username), "username for authentication")
- ("password,p", new mongo::PasswordValue(&password),
- "password for authentication")
- ("help,h", "show this usage information")
- ("version", "show version information")
- ("ipv6", "enable IPv6 support (disabled by default)")
- ;
+ ("shell", "run the shell after executing files")
+ ("nodb", "don't connect to mongod on startup - no 'db address' arg expected")
+ ("quiet", "be less chatty" )
+ ("port", po::value<string>(&port), "port to connect to")
+ ("host", po::value<string>(&dbhost), "server to connect to")
+ ("eval", po::value<string>(&script), "evaluate javascript")
+ ("username,u", po::value<string>(&username), "username for authentication")
+ ("password,p", new mongo::PasswordValue(&password),
+ "password for authentication")
+ ("help,h", "show this usage information")
+ ("version", "show version information")
+ ("ipv6", "enable IPv6 support (disabled by default)")
+ ;
hidden_options.add_options()
- ("dbaddress", po::value<string>(), "dbaddress")
- ("files", po::value< vector<string> >(), "files")
- ("nokillop", "nokillop") // for testing, kill op will also be disabled automatically if the tests starts a mongo program
- ("autokillop", "autokillop") // for testing, will kill op without prompting
- ;
+ ("dbaddress", po::value<string>(), "dbaddress")
+ ("files", po::value< vector<string> >(), "files")
+ ("nokillop", "nokillop") // for testing, kill op will also be disabled automatically if the tests starts a mongo program
+ ("autokillop", "autokillop") // for testing, will kill op without prompting
+ ;
positional_options.add("dbaddress", 1);
positional_options.add("files", -1);
@@ -555,17 +556,18 @@ int _main(int argc, char* argv[]) {
positional(positional_options).
style(command_line_style).run(), params);
po::notify(params);
- } catch (po::error &e) {
+ }
+ catch (po::error &e) {
cout << "ERROR: " << e.what() << endl << endl;
show_help_text(argv[0], shell_options);
return mongo::EXIT_BADOPTIONS;
}
// hide password from ps output
- for (int i=0; i < (argc-1); ++i){
- if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")){
+ for (int i=0; i < (argc-1); ++i) {
+ if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")) {
char* arg = argv[i+1];
- while (*arg){
+ while (*arg) {
*arg++ = 'x';
}
}
@@ -597,7 +599,7 @@ int _main(int argc, char* argv[]) {
if (params.count("autokillop")) {
autoKillOp = true;
}
-
+
/* This is a bit confusing, here are the rules:
*
* if nodb is set then all positional parameters are files
@@ -609,41 +611,43 @@ int _main(int argc, char* argv[]) {
string dbaddress = params["dbaddress"].as<string>();
if (nodb) {
files.insert(files.begin(), dbaddress);
- } else {
+ }
+ else {
string basename = dbaddress.substr(dbaddress.find_last_of("/\\") + 1);
if (basename.find_first_of('.') == string::npos ||
- (basename.find(".js", basename.size() - 3) == string::npos && !fileExists(dbaddress))) {
+ (basename.find(".js", basename.size() - 3) == string::npos && !fileExists(dbaddress))) {
url = dbaddress;
- } else {
+ }
+ else {
files.insert(files.begin(), dbaddress);
}
}
}
- if (params.count("ipv6")){
+ if (params.count("ipv6")) {
mongo::enableIPv6();
}
-
- if ( ! mongo::cmdLine.quiet )
+
+ if ( ! mongo::cmdLine.quiet )
cout << "MongoDB shell version: " << mongo::versionString << endl;
mongo::UnitTest::runTests();
if ( !nodb ) { // connect to db
//if ( ! mongo::cmdLine.quiet ) cout << "url: " << url << endl;
-
+
stringstream ss;
if ( mongo::cmdLine.quiet )
ss << "__quiet = true;";
ss << "db = connect( \"" << fixHost( url , dbhost , port ) << "\")";
-
+
mongo::shellUtils::_dbConnect = ss.str();
if ( params.count( "password" )
- && ( password.empty() ) ) {
+ && ( password.empty() ) ) {
password = mongo::askPassword();
}
- if ( username.size() && password.size() ){
+ if ( username.size() && password.size() ) {
stringstream ss;
ss << "if ( ! db.auth( \"" << username << "\" , \"" << password << "\" ) ){ throw 'login failed'; }";
mongo::shellUtils::_dbAuth = ss.str();
@@ -654,12 +658,12 @@ int _main(int argc, char* argv[]) {
mongo::ScriptEngine::setConnectCallback( mongo::shellUtils::onConnect );
mongo::ScriptEngine::setup();
mongo::globalScriptEngine->setScopeInitCallback( mongo::shellUtils::initScope );
- auto_ptr< mongo::Scope > scope( mongo::globalScriptEngine->newScope() );
+ auto_ptr< mongo::Scope > scope( mongo::globalScriptEngine->newScope() );
shellMainScope = scope.get();
if( runShell )
cout << "type \"help\" for help" << endl;
-
+
if ( !script.empty() ) {
mongo::shellUtils::MongoProgramScope s;
if ( ! scope->exec( script , "(shell eval)" , true , true , false ) )
@@ -672,7 +676,7 @@ int _main(int argc, char* argv[]) {
if ( files.size() > 1 )
cout << "loading file: " << files[i] << endl;
- if ( ! scope->execFile( files[i] , false , true , false ) ){
+ if ( ! scope->execFile( files[i] , false , true , false ) ) {
cout << "failed to load: " << files[i] << endl;
return -3;
}
@@ -682,7 +686,7 @@ int _main(int argc, char* argv[]) {
runShell = true;
}
- if ( runShell ){
+ if ( runShell ) {
mongo::shellUtils::MongoProgramScope s;
@@ -690,7 +694,7 @@ int _main(int argc, char* argv[]) {
//v8::Handle<v8::Object> shellHelper = baseContext_->Global()->Get( v8::String::New( "shellHelper" ) )->ToObject();
- while ( 1 ){
+ while ( 1 ) {
inMultiLine = 0;
gotInterrupted = 0;
// shellMainScope->localConnect;
@@ -700,7 +704,7 @@ int _main(int argc, char* argv[]) {
char * line = shellReadline( prompt.c_str() );
- if ( line ){
+ if ( line ) {
while (startsWith(line, "> "))
line += 2;
@@ -708,20 +712,20 @@ int _main(int argc, char* argv[]) {
line++;
}
- if ( ! line || ( strlen(line) == 4 && strstr( line , "exit" ) ) ){
+ if ( ! line || ( strlen(line) == 4 && strstr( line , "exit" ) ) ) {
cout << "bye" << endl;
break;
}
string code = line;
- if ( code == "exit" || code == "exit;" ){
+ if ( code == "exit" || code == "exit;" ) {
break;
}
if ( code.size() == 0 )
continue;
code = finishCode( code );
- if ( gotInterrupted ){
+ if ( gotInterrupted ) {
cout << endl;
continue;
}
@@ -735,28 +739,28 @@ int _main(int argc, char* argv[]) {
if ( cmd.find( " " ) > 0 )
cmd = cmd.substr( 0 , cmd.find( " " ) );
- if ( cmd.find( "\"" ) == string::npos ){
+ if ( cmd.find( "\"" ) == string::npos ) {
try {
scope->exec( (string)"__iscmd__ = shellHelper[\"" + cmd + "\"];" , "(shellhelp1)" , false , true , true );
- if ( scope->getBoolean( "__iscmd__" ) ){
+ if ( scope->getBoolean( "__iscmd__" ) ) {
scope->exec( (string)"shellHelper( \"" + cmd + "\" , \"" + code.substr( cmd.size() ) + "\");" , "(shellhelp2)" , false , true , false );
wascmd = true;
}
}
- catch ( std::exception& e ){
- cout << "error2:" << e.what() << endl;
+ catch ( std::exception& e ) {
+ cout << "error2:" << e.what() << endl;
wascmd = true;
}
}
}
- if ( ! wascmd ){
+ if ( ! wascmd ) {
try {
if ( scope->exec( code.c_str() , "(shell)" , false , true , false ) )
scope->exec( "shellPrintHelper( __lastres__ );" , "(shell2)" , true , true , false );
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "error:" << e.what() << endl;
}
}
@@ -776,7 +780,7 @@ int main(int argc, char* argv[]) {
try {
return _main( argc , argv );
}
- catch ( mongo::DBException& e ){
+ catch ( mongo::DBException& e ) {
cerr << "exception: " << e.what() << endl;
return -1;
}
diff --git a/shell/mongo_vstudio.cpp b/shell/mongo_vstudio.cpp
index ead73aa4b11..d8f8dcf943e 100644
--- a/shell/mongo_vstudio.cpp
+++ b/shell/mongo_vstudio.cpp
@@ -1,3381 +1,3381 @@
#include "bson/stringdata.h"
namespace mongo {
-struct JSFile{ const char* name; const StringData& source; };
-namespace JSFiles{
-const StringData _jscode_raw_utils =
-"__quiet = false;\n"
-"__magicNoPrint = { __magicNoPrint : 1111 }\n"
-"\n"
-"chatty = function(s){\n"
-"if ( ! __quiet )\n"
-"print( s );\n"
-"}\n"
-"\n"
-"friendlyEqual = function( a , b ){\n"
-"if ( a == b )\n"
-"return true;\n"
-"\n"
-"if ( tojson( a ) == tojson( b ) )\n"
-"return true;\n"
-"\n"
-"return false;\n"
-"}\n"
-"\n"
-"printStackTrace = function(){\n"
-"try{\n"
-"throw new Error(\"Printing Stack Trace\");\n"
-"} catch (e) {\n"
-"print(e.stack);\n"
-"}\n"
-"}\n"
-"\n"
-"doassert = function (msg) {\n"
-"if (msg.indexOf(\"assert\") == 0)\n"
-"print(msg);\n"
-"else\n"
-"print(\"assert: \" + msg);\n"
-"printStackTrace();\n"
-"throw msg;\n"
-"}\n"
-"\n"
-"assert = function( b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( b )\n"
-"return;\n"
-"\n"
-"doassert( msg == undefined ? \"assert failed\" : \"assert failed : \" + msg );\n"
-"}\n"
-"\n"
-"assert.automsg = function( b ) {\n"
-"assert( eval( b ), b );\n"
-"}\n"
-"\n"
-"assert._debug = false;\n"
-"\n"
-"assert.eq = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( a == b )\n"
-"return;\n"
-"\n"
-"if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
-"return;\n"
-"\n"
-"doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );\n"
-"}\n"
-"\n"
-"assert.eq.automsg = function( a, b ) {\n"
-"assert.eq( eval( a ), eval( b ), \"[\" + a + \"] != [\" + b + \"]\" );\n"
-"}\n"
-"\n"
-"assert.neq = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"if ( a != b )\n"
-"return;\n"
-"\n"
-"doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );\n"
-"}\n"
-"\n"
-"assert.repeat = function( f, msg, timeout, interval ) {\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"var start = new Date();\n"
-"timeout = timeout || 30000;\n"
-"interval = interval || 200;\n"
-"var last;\n"
-"while( 1 ) {\n"
-"\n"
-"if ( typeof( f ) == \"string\" ){\n"
-"if ( eval( f ) )\n"
-"return;\n"
-"}\n"
-"else {\n"
-"if ( f() )\n"
-"return;\n"
-"}\n"
-"\n"
-"if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
-"break;\n"
-"sleep( interval );\n"
-"}\n"
-"}\n"
-"\n"
-"assert.soon = function( f, msg, timeout /*ms*/, interval ) {\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"var start = new Date();\n"
-"timeout = timeout || 30000;\n"
-"interval = interval || 200;\n"
-"var last;\n"
-"while( 1 ) {\n"
-"\n"
-"if ( typeof( f ) == \"string\" ){\n"
-"if ( eval( f ) )\n"
-"return;\n"
-"}\n"
-"else {\n"
-"if ( f() )\n"
-"return;\n"
-"}\n"
-"\n"
-"if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
-"doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
-"sleep( interval );\n"
-"}\n"
-"}\n"
-"\n"
-"assert.throws = function( func , params , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"try {\n"
-"func.apply( null , params );\n"
-"}\n"
-"catch ( e ){\n"
-"return e;\n"
-"}\n"
-"\n"
-"doassert( \"did not throw exception: \" + msg );\n"
-"}\n"
-"\n"
-"assert.throws.automsg = function( func, params ) {\n"
-"assert.throws( func, params, func.toString() );\n"
-"}\n"
-"\n"
-"assert.commandWorked = function( res , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( res.ok == 1 )\n"
-"return;\n"
-"\n"
-"doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );\n"
-"}\n"
-"\n"
-"assert.commandFailed = function( res , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( res.ok == 0 )\n"
-"return;\n"
-"\n"
-"doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );\n"
-"}\n"
-"\n"
-"assert.isnull = function( what , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( what == null )\n"
-"return;\n"
-"\n"
-"doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );\n"
-"}\n"
-"\n"
-"assert.lt = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( a < b )\n"
-"return;\n"
-"doassert( a + \" is not less than \" + b + \" : \" + msg );\n"
-"}\n"
-"\n"
-"assert.gt = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( a > b )\n"
-"return;\n"
-"doassert( a + \" is not greater than \" + b + \" : \" + msg );\n"
-"}\n"
-"\n"
-"assert.lte = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( a <= b )\n"
-"return;\n"
-"doassert( a + \" is not less than or eq \" + b + \" : \" + msg );\n"
-"}\n"
-"\n"
-"assert.gte = function( a , b , msg ){\n"
-"if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
-"\n"
-"if ( a >= b )\n"
-"return;\n"
-"doassert( a + \" is not greater than or eq \" + b + \" : \" + msg );\n"
-"}\n"
-"\n"
-"\n"
-"assert.close = function( a , b , msg , places ){\n"
-"if (places === undefined) {\n"
-"places = 4;\n"
-"}\n"
-"if (Math.round((a - b) * Math.pow(10, places)) === 0) {\n"
-"return;\n"
-"}\n"
-"doassert( a + \" is not equal to \" + b + \" within \" + places +\n"
-"\" places, diff: \" + (a-b) + \" : \" + msg );\n"
-"};\n"
-"\n"
-"Object.extend = function( dst , src , deep ){\n"
-"for ( var k in src ){\n"
-"var v = src[k];\n"
-"if ( deep && typeof(v) == \"object\" ){\n"
-"if ( \"floatApprox\" in v ) { // convert NumberLong properly\n"
-"eval( \"v = \" + tojson( v ) );\n"
-"} else {\n"
-"v = Object.extend( typeof ( v.length ) == \"number\" ? [] : {} , v , true );\n"
-"}\n"
-"}\n"
-"dst[k] = v;\n"
-"}\n"
-"return dst;\n"
-"}\n"
-"\n"
-"argumentsToArray = function( a ){\n"
-"var arr = [];\n"
-"for ( var i=0; i<a.length; i++ )\n"
-"arr[i] = a[i];\n"
-"return arr;\n"
-"}\n"
-"\n"
-"isString = function( x ){\n"
-"return typeof( x ) == \"string\";\n"
-"}\n"
-"\n"
-"isNumber = function(x){\n"
-"return typeof( x ) == \"number\";\n"
-"}\n"
-"\n"
-"isObject = function( x ){\n"
-"return typeof( x ) == \"object\";\n"
-"}\n"
-"\n"
-"String.prototype.trim = function() {\n"
-"return this.replace(/^\\s+|\\s+$/g,\"\");\n"
-"}\n"
-"String.prototype.ltrim = function() {\n"
-"return this.replace(/^\\s+/,\"\");\n"
-"}\n"
-"String.prototype.rtrim = function() {\n"
-"return this.replace(/\\s+$/,\"\");\n"
-"}\n"
-"\n"
-"Number.prototype.zeroPad = function(width) {\n"
-"var str = this + '';\n"
-"while (str.length < width)\n"
-"str = '0' + str;\n"
-"return str;\n"
-"}\n"
-"\n"
-"Date.timeFunc = function( theFunc , numTimes ){\n"
-"\n"
-"var start = new Date();\n"
-"\n"
-"numTimes = numTimes || 1;\n"
-"for ( var i=0; i<numTimes; i++ ){\n"
-"theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );\n"
-"}\n"
-"\n"
-"return (new Date()).getTime() - start.getTime();\n"
-"}\n"
-"\n"
-"Date.prototype.tojson = function(){\n"
-"\n"
-"var UTC = Date.printAsUTC ? 'UTC' : '';\n"
-"\n"
-"var year = this['get'+UTC+'FullYear']().zeroPad(4);\n"
-"var month = (this['get'+UTC+'Month']() + 1).zeroPad(2);\n"
-"var date = this['get'+UTC+'Date']().zeroPad(2);\n"
-"var hour = this['get'+UTC+'Hours']().zeroPad(2);\n"
-"var minute = this['get'+UTC+'Minutes']().zeroPad(2);\n"
-"var sec = this['get'+UTC+'Seconds']().zeroPad(2)\n"
-"\n"
-"if (this['get'+UTC+'Milliseconds']())\n"
-"sec += '.' + this['get'+UTC+'Milliseconds']().zeroPad(3)\n"
-"\n"
-"var ofs = 'Z';\n"
-"if (!Date.printAsUTC){\n"
-"var ofsmin = this.getTimezoneOffset();\n"
-"if (ofsmin != 0){\n"
-"ofs = ofsmin > 0 ? '-' : '+'; // This is correct\n"
-"ofs += (ofsmin/60).zeroPad(2)\n"
-"ofs += (ofsmin%60).zeroPad(2)\n"
-"}\n"
-"}\n"
-"\n"
-"return 'ISODate(\"'+year+'-'+month+'-'+date+'T'+hour+':'+minute+':'+sec+ofs+'\")';\n"
-"}\n"
-"\n"
-"Date.printAsUTC = true;\n"
-"\n"
-"\n"
-"ISODate = function(isoDateStr){\n"
-"if (!isoDateStr)\n"
-"return new Date();\n"
-"\n"
-"var isoDateRegex = /(\\d{4})-?(\\d{2})-?(\\d{2})([T ](\\d{2})(:?(\\d{2})(:?(\\d{2}(\\.\\d+)?))?)?(Z|([+-])(\\d{2}):?(\\d{2})?)?)?/;\n"
-"var res = isoDateRegex.exec(isoDateStr);\n"
-"\n"
-"if (!res)\n"
-"throw \"invalid ISO date\";\n"
-"\n"
-"var year = parseInt(res[1],10) || 1970; // this should always be present\n"
-"var month = (parseInt(res[2],10) || 1) - 1;\n"
-"var date = parseInt(res[3],10) || 0;\n"
-"var hour = parseInt(res[5],10) || 0;\n"
-"var min = parseInt(res[7],10) || 0;\n"
-"var sec = parseFloat(res[9]) || 0;\n"
-"var ms = Math.round((sec%1) * 1000)\n"
-"sec -= ms/1000\n"
-"\n"
-"var time = Date.UTC(year, month, date, hour, min, sec, ms);\n"
-"\n"
-"if (res[11] && res[11] != 'Z'){\n"
-"var ofs = 0;\n"
-"ofs += (parseInt(res[13],10) || 0) * 60*60*1000; // hours\n"
-"ofs += (parseInt(res[14],10) || 0) * 60*1000; // mins\n"
-"if (res[12] == '+') // if ahead subtract\n"
-"ofs *= -1;\n"
-"\n"
-"time += ofs\n"
-"}\n"
-"\n"
-"return new Date(time);\n"
-"}\n"
-"\n"
-"RegExp.prototype.tojson = RegExp.prototype.toString;\n"
-"\n"
-"Array.contains = function( a , x ){\n"
-"for ( var i=0; i<a.length; i++ ){\n"
-"if ( a[i] == x )\n"
-"return true;\n"
-"}\n"
-"return false;\n"
-"}\n"
-"\n"
-"Array.unique = function( a ){\n"
-"var u = [];\n"
-"for ( var i=0; i<a.length; i++){\n"
-"var o = a[i];\n"
-"if ( ! Array.contains( u , o ) ){\n"
-"u.push( o );\n"
-"}\n"
-"}\n"
-"return u;\n"
-"}\n"
-"\n"
-"Array.shuffle = function( arr ){\n"
-"for ( var i=0; i<arr.length-1; i++ ){\n"
-"var pos = i+Random.randInt(arr.length-i);\n"
-"var save = arr[i];\n"
-"arr[i] = arr[pos];\n"
-"arr[pos] = save;\n"
-"}\n"
-"return arr;\n"
-"}\n"
-"\n"
-"\n"
-"Array.tojson = function( a , indent ){\n"
-"if (!indent)\n"
-"indent = \"\";\n"
-"\n"
-"if (a.length == 0) {\n"
-"return \"[ ]\";\n"
-"}\n"
-"\n"
-"var s = \"[\\n\";\n"
-"indent += \"\\t\";\n"
-"for ( var i=0; i<a.length; i++){\n"
-"s += indent + tojson( a[i], indent );\n"
-"if ( i < a.length - 1 ){\n"
-"s += \",\\n\";\n"
-"}\n"
-"}\n"
-"if ( a.length == 0 ) {\n"
-"s += indent;\n"
-"}\n"
-"\n"
-"indent = indent.substring(1);\n"
-"s += \"\\n\"+indent+\"]\";\n"
-"return s;\n"
-"}\n"
-"\n"
-"Array.fetchRefs = function( arr , coll ){\n"
-"var n = [];\n"
-"for ( var i=0; i<arr.length; i ++){\n"
-"var z = arr[i];\n"
-"if ( coll && coll != z.getCollection() )\n"
-"continue;\n"
-"n.push( z.fetch() );\n"
-"}\n"
-"\n"
-"return n;\n"
-"}\n"
-"\n"
-"Array.sum = function( arr ){\n"
-"if ( arr.length == 0 )\n"
-"return null;\n"
-"var s = arr[0];\n"
-"for ( var i=1; i<arr.length; i++ )\n"
-"s += arr[i];\n"
-"return s;\n"
-"}\n"
-"\n"
-"Array.avg = function( arr ){\n"
-"if ( arr.length == 0 )\n"
-"return null;\n"
-"return Array.sum( arr ) / arr.length;\n"
-"}\n"
-"\n"
-"Array.stdDev = function( arr ){\n"
-"var avg = Array.avg( arr );\n"
-"var sum = 0;\n"
-"\n"
-"for ( var i=0; i<arr.length; i++ ){\n"
-"sum += Math.pow( arr[i] - avg , 2 );\n"
-"}\n"
-"\n"
-"return Math.sqrt( sum / arr.length );\n"
-"}\n"
-"\n"
-"//these two are helpers for Array.sort(func)\n"
-"compare = function(l, r){ return (l == r ? 0 : (l < r ? -1 : 1)); }\n"
-"\n"
-"// arr.sort(compareOn('name'))\n"
-"compareOn = function(field){\n"
-"return function(l, r) { return compare(l[field], r[field]); }\n"
-"}\n"
-"\n"
-"Object.keySet = function( o ) {\n"
-"var ret = new Array();\n"
-"for( i in o ) {\n"
-"if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {\n"
-"ret.push( i );\n"
-"}\n"
-"}\n"
-"return ret;\n"
-"}\n"
-"\n"
-"if ( ! NumberLong.prototype ) {\n"
-"NumberLong.prototype = {}\n"
-"}\n"
-"\n"
-"NumberLong.prototype.tojson = function() {\n"
-"return this.toString();\n"
-"}\n"
-"\n"
-"if ( ! ObjectId.prototype )\n"
-"ObjectId.prototype = {}\n"
-"\n"
-"ObjectId.prototype.toString = function(){\n"
-"return this.str;\n"
-"}\n"
-"\n"
-"ObjectId.prototype.tojson = function(){\n"
-"return \"ObjectId(\\\"\" + this.str + \"\\\")\";\n"
-"}\n"
-"\n"
-"ObjectId.prototype.isObjectId = true;\n"
-"\n"
-"ObjectId.prototype.getTimestamp = function(){\n"
-"return new Date(parseInt(this.toString().slice(0,8), 16)*1000);\n"
-"}\n"
-"\n"
-"ObjectId.prototype.equals = function( other){\n"
-"return this.str == other.str;\n"
-"}\n"
-"\n"
-"if ( typeof( DBPointer ) != \"undefined\" ){\n"
-"DBPointer.prototype.fetch = function(){\n"
-"assert( this.ns , \"need a ns\" );\n"
-"assert( this.id , \"need an id\" );\n"
-"\n"
-"return db[ this.ns ].findOne( { _id : this.id } );\n"
-"}\n"
-"\n"
-"DBPointer.prototype.tojson = function(indent){\n"
-"return tojson({\"ns\" : this.ns, \"id\" : this.id}, indent);\n"
-"}\n"
-"\n"
-"DBPointer.prototype.getCollection = function(){\n"
-"return this.ns;\n"
-"}\n"
-"\n"
-"DBPointer.prototype.toString = function(){\n"
-"return \"DBPointer \" + this.ns + \":\" + this.id;\n"
-"}\n"
-"}\n"
-"else {\n"
-"print( \"warning: no DBPointer\" );\n"
-"}\n"
-"\n"
-"if ( typeof( DBRef ) != \"undefined\" ){\n"
-"DBRef.prototype.fetch = function(){\n"
-"assert( this.$ref , \"need a ns\" );\n"
-"assert( this.$id , \"need an id\" );\n"
-"\n"
-"return db[ this.$ref ].findOne( { _id : this.$id } );\n"
-"}\n"
-"\n"
-"DBRef.prototype.tojson = function(indent){\n"
-"return tojson({\"$ref\" : this.$ref, \"$id\" : this.$id}, indent);\n"
-"}\n"
-"\n"
-"DBRef.prototype.getCollection = function(){\n"
-"return this.$ref;\n"
-"}\n"
-"\n"
-"DBRef.prototype.toString = function(){\n"
-"return this.tojson();\n"
-"}\n"
-"}\n"
-"else {\n"
-"print( \"warning: no DBRef\" );\n"
-"}\n"
-"\n"
-"if ( typeof( BinData ) != \"undefined\" ){\n"
-"BinData.prototype.tojson = function () {\n"
-"//return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
-"return this.toString();\n"
-"}\n"
-"}\n"
-"else {\n"
-"print( \"warning: no BinData class\" );\n"
-"}\n"
-"\n"
-"if ( typeof( UUID ) != \"undefined\" ){\n"
-"UUID.prototype.tojson = function () {\n"
-"return this.toString();\n"
-"}\n"
-"}\n"
-"\n"
-"if ( typeof _threadInject != \"undefined\" ){\n"
-"print( \"fork() available!\" );\n"
-"\n"
-"Thread = function(){\n"
-"this.init.apply( this, arguments );\n"
-"}\n"
-"_threadInject( Thread.prototype );\n"
-"\n"
-"ScopedThread = function() {\n"
-"this.init.apply( this, arguments );\n"
-"}\n"
-"ScopedThread.prototype = new Thread( function() {} );\n"
-"_scopedThreadInject( ScopedThread.prototype );\n"
-"\n"
-"fork = function() {\n"
-"var t = new Thread( function() {} );\n"
-"Thread.apply( t, arguments );\n"
-"return t;\n"
-"}\n"
-"\n"
-"// Helper class to generate a list of events which may be executed by a ParallelTester\n"
-"EventGenerator = function( me, collectionName, mean ) {\n"
-"this.mean = mean;\n"
-"this.events = new Array( me, collectionName );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype._add = function( action ) {\n"
-"this.events.push( [ Random.genExp( this.mean ), action ] );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype.addInsert = function( obj ) {\n"
-"this._add( \"t.insert( \" + tojson( obj ) + \" )\" );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype.addRemove = function( obj ) {\n"
-"this._add( \"t.remove( \" + tojson( obj ) + \" )\" );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype.addUpdate = function( objOld, objNew ) {\n"
-"this._add( \"t.update( \" + tojson( objOld ) + \", \" + tojson( objNew ) + \" )\" );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {\n"
-"query = query || {};\n"
-"shouldPrint = shouldPrint || false;\n"
-"checkQuery = checkQuery || false;\n"
-"var action = \"assert.eq( \" + count + \", t.count( \" + tojson( query ) + \" ) );\"\n"
-"if ( checkQuery ) {\n"
-"action += \" assert.eq( \" + count + \", t.find( \" + tojson( query ) + \" ).toArray().length );\"\n"
-"}\n"
-"if ( shouldPrint ) {\n"
-"action += \" print( me + ' ' + \" + count + \" );\";\n"
-"}\n"
-"this._add( action );\n"
-"}\n"
-"\n"
-"EventGenerator.prototype.getEvents = function() {\n"
-"return this.events;\n"
-"}\n"
-"\n"
-"EventGenerator.dispatch = function() {\n"
-"var args = argumentsToArray( arguments );\n"
-"var me = args.shift();\n"
-"var collectionName = args.shift();\n"
-"var m = new Mongo( db.getMongo().host );\n"
-"var t = m.getDB( \"test\" )[ collectionName ];\n"
-"for( var i in args ) {\n"
-"sleep( args[ i ][ 0 ] );\n"
-"eval( args[ i ][ 1 ] );\n"
-"}\n"
-"}\n"
-"\n"
-"// Helper class for running tests in parallel. It assembles a set of tests\n"
-"// and then calls assert.parallelests to run them.\n"
-"ParallelTester = function() {\n"
-"this.params = new Array();\n"
-"}\n"
-"\n"
-"ParallelTester.prototype.add = function( fun, args ) {\n"
-"args = args || [];\n"
-"args.unshift( fun );\n"
-"this.params.push( args );\n"
-"}\n"
-"\n"
-"ParallelTester.prototype.run = function( msg, newScopes ) {\n"
-"newScopes = newScopes || false;\n"
-"assert.parallelTests( this.params, msg, newScopes );\n"
-"}\n"
-"\n"
-"// creates lists of tests from jstests dir in a format suitable for use by\n"
-"// ParallelTester.fileTester. The lists will be in random order.\n"
-"// n: number of lists to split these tests into\n"
-"ParallelTester.createJstestsLists = function( n ) {\n"
-"var params = new Array();\n"
-"for( var i = 0; i < n; ++i ) {\n"
-"params.push( [] );\n"
-"}\n"
-"\n"
-"var makeKeys = function( a ) {\n"
-"var ret = {};\n"
-"for( var i in a ) {\n"
-"ret[ a[ i ] ] = 1;\n"
-"}\n"
-"return ret;\n"
-"}\n"
-"\n"
-"// some tests can't run in parallel with most others\n"
-"var skipTests = makeKeys( [ \"jstests/dbadmin.js\",\n"
-"\"jstests/repair.js\",\n"
-"\"jstests/cursor8.js\",\n"
-"\"jstests/recstore.js\",\n"
-"\"jstests/extent.js\",\n"
-"\"jstests/indexb.js\",\n"
-"\"jstests/profile1.js\",\n"
-"\"jstests/mr3.js\",\n"
-"\"jstests/indexh.js\",\n"
-"\"jstests/apitest_db.js\",\n"
-"\"jstests/evalb.js\",\n"
-"\"jstests/evald.js\",\n"
-"\"jstests/evalf.js\",\n"
-"\"jstests/killop.js\",\n"
-"\"jstests/run_program1.js\",\n"
-"\"jstests/notablescan.js\"] );\n"
-"\n"
-"// some tests can't be run in parallel with each other\n"
-"var serialTestsArr = [ \"jstests/fsync.js\",\n"
-"\"jstests/fsync2.js\" ];\n"
-"var serialTests = makeKeys( serialTestsArr );\n"
-"\n"
-"params[ 0 ] = serialTestsArr;\n"
-"\n"
-"var files = listFiles(\"jstests\");\n"
-"files = Array.shuffle( files );\n"
-"\n"
-"var i = 0;\n"
-"files.forEach(\n"
-"function(x) {\n"
-"\n"
-"if ( ( /[\\/\\\\]_/.test(x.name) ) ||\n"
-"( ! /\\.js$/.test(x.name ) ) ||\n"
-"( x.name in skipTests ) ||\n"
-"( x.name in serialTests ) ||\n"
-"! /\\.js$/.test(x.name ) ){\n"
-"print(\" >>>>>>>>>>>>>>> skipping \" + x.name);\n"
-"return;\n"
-"}\n"
-"\n"
-"params[ i % n ].push( x.name );\n"
-"++i;\n"
-"}\n"
-");\n"
-"\n"
-"// randomize ordering of the serialTests\n"
-"params[ 0 ] = Array.shuffle( params[ 0 ] );\n"
-"\n"
-"for( var i in params ) {\n"
-"params[ i ].unshift( i );\n"
-"}\n"
-"\n"
-"return params;\n"
-"}\n"
-"\n"
-"// runs a set of test files\n"
-"// first argument is an identifier for this tester, remaining arguments are file names\n"
-"ParallelTester.fileTester = function() {\n"
-"var args = argumentsToArray( arguments );\n"
-"var suite = args.shift();\n"
-"args.forEach(\n"
-"function( x ) {\n"
-"print(\" S\" + suite + \" Test : \" + x + \" ...\");\n"
-"var time = Date.timeFunc( function() { load(x); }, 1);\n"
-"print(\" S\" + suite + \" Test : \" + x + \" \" + time + \"ms\" );\n"
-"}\n"
-");\n"
-"}\n"
-"\n"
-"// params: array of arrays, each element of which consists of a function followed\n"
-"// by zero or more arguments to that function. Each function and its arguments will\n"
-"// be called in a separate thread.\n"
-"// msg: failure message\n"
-"// newScopes: if true, each thread starts in a fresh scope\n"
-"assert.parallelTests = function( params, msg, newScopes ) {\n"
-"newScopes = newScopes || false;\n"
-"var wrapper = function( fun, argv ) {\n"
-"eval (\n"
-"\"var z = function() {\" +\n"
-"\"var __parallelTests__fun = \" + fun.toString() + \";\" +\n"
-"\"var __parallelTests__argv = \" + tojson( argv ) + \";\" +\n"
-"\"var __parallelTests__passed = false;\" +\n"
-"\"try {\" +\n"
-"\"__parallelTests__fun.apply( 0, __parallelTests__argv );\" +\n"
-"\"__parallelTests__passed = true;\" +\n"
-"\"} catch ( e ) {\" +\n"
-"\"print( e );\" +\n"
-"\"}\" +\n"
-"\"return __parallelTests__passed;\" +\n"
-"\"}\"\n"
-");\n"
-"return z;\n"
-"}\n"
-"var runners = new Array();\n"
-"for( var i in params ) {\n"
-"var param = params[ i ];\n"
-"var test = param.shift();\n"
-"var t;\n"
-"if ( newScopes )\n"
-"t = new ScopedThread( wrapper( test, param ) );\n"
-"else\n"
-"t = new Thread( wrapper( test, param ) );\n"
-"runners.push( t );\n"
-"}\n"
-"\n"
-"runners.forEach( function( x ) { x.start(); } );\n"
-"var nFailed = 0;\n"
-"// v8 doesn't like it if we exit before all threads are joined (SERVER-529)\n"
-"runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );\n"
-"assert.eq( 0, nFailed, msg );\n"
-"}\n"
-"}\n"
-"\n"
-"tojsononeline = function( x ){\n"
-"return tojson( x , \" \" , true );\n"
-"}\n"
-"\n"
-"tojson = function( x, indent , nolint ){\n"
-"if ( x === null )\n"
-"return \"null\";\n"
-"\n"
-"if ( x === undefined )\n"
-"return \"undefined\";\n"
-"\n"
-"if (!indent)\n"
-"indent = \"\";\n"
-"\n"
-"switch ( typeof x ) {\n"
-"case \"string\": {\n"
-"var s = \"\\\"\";\n"
-"for ( var i=0; i<x.length; i++ ){\n"
-"switch (x[i]){\n"
-"case '\"': s += '\\\\\"'; break;\n"
-"case '\\\\': s += '\\\\\\\\'; break;\n"
-"case '\\b': s += '\\\\b'; break;\n"
-"case '\\f': s += '\\\\f'; break;\n"
-"case '\\n': s += '\\\\n'; break;\n"
-"case '\\r': s += '\\\\r'; break;\n"
-"case '\\t': s += '\\\\t'; break;\n"
-"\n"
-"default: {\n"
-"var code = x.charCodeAt(i);\n"
-"if (code < 0x20){\n"
-"s += (code < 0x10 ? '\\\\u000' : '\\\\u00') + code.toString(16);\n"
-"} else {\n"
-"s += x[i];\n"
-"}\n"
-"}\n"
-"}\n"
-"}\n"
-"return s + \"\\\"\";\n"
-"}\n"
-"case \"number\":\n"
-"case \"boolean\":\n"
-"return \"\" + x;\n"
-"case \"object\":{\n"
-"var s = tojsonObject( x, indent , nolint );\n"
-"if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){\n"
-"s = s.replace( /[\\s\\r\\n ]+/gm , \" \" );\n"
-"}\n"
-"return s;\n"
-"}\n"
-"case \"function\":\n"
-"return x.toString();\n"
-"default:\n"
-"throw \"tojson can't handle type \" + ( typeof x );\n"
-"}\n"
-"\n"
-"}\n"
-"\n"
-"tojsonObject = function( x, indent , nolint ){\n"
-"var lineEnding = nolint ? \" \" : \"\\n\";\n"
-"var tabSpace = nolint ? \"\" : \"\\t\";\n"
-"\n"
-"assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
-"\n"
-"if (!indent)\n"
-"indent = \"\";\n"
-"\n"
-"if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson ) {\n"
-"return x.tojson(indent,nolint);\n"
-"}\n"
-"\n"
-"if ( x.constructor && typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson ) {\n"
-"return x.constructor.tojson( x, indent , nolint );\n"
-"}\n"
-"\n"
-"if ( x.toString() == \"[object MaxKey]\" )\n"
-"return \"{ $maxKey : 1 }\";\n"
-"if ( x.toString() == \"[object MinKey]\" )\n"
-"return \"{ $minKey : 1 }\";\n"
-"\n"
-"var s = \"{\" + lineEnding;\n"
-"\n"
-"// push one level of indent\n"
-"indent += tabSpace;\n"
-"\n"
-"var total = 0;\n"
-"for ( var k in x ) total++;\n"
-"if ( total == 0 ) {\n"
-"s += indent + lineEnding;\n"
-"}\n"
-"\n"
-"var keys = x;\n"
-"if ( typeof( x._simpleKeys ) == \"function\" )\n"
-"keys = x._simpleKeys();\n"
-"var num = 1;\n"
-"for ( var k in keys ){\n"
-"\n"
-"var val = x[k];\n"
-"if ( val == DB.prototype || val == DBCollection.prototype )\n"
-"continue;\n"
-"\n"
-"s += indent + \"\\\"\" + k + \"\\\" : \" + tojson( val, indent , nolint );\n"
-"if (num != total) {\n"
-"s += \",\";\n"
-"num++;\n"
-"}\n"
-"s += lineEnding;\n"
-"}\n"
-"\n"
-"// pop one level of indent\n"
-"indent = indent.substring(1);\n"
-"return s + indent + \"}\";\n"
-"}\n"
-"\n"
-"shellPrint = function( x ){\n"
-"it = x;\n"
-"if ( x != undefined )\n"
-"shellPrintHelper( x );\n"
-"\n"
-"if ( db ){\n"
-"var e = db.getPrevError();\n"
-"if ( e.err ) {\n"
-"if( e.nPrev <= 1 )\n"
-"print( \"error on last call: \" + tojson( e.err ) );\n"
-"else\n"
-"print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );\n"
-"}\n"
-"db.resetError();\n"
-"}\n"
-"}\n"
-"\n"
-"printjson = function(x){\n"
-"print( tojson( x ) );\n"
-"}\n"
-"\n"
-"printjsononeline = function(x){\n"
-"print( tojsononeline( x ) );\n"
-"}\n"
-"\n"
-"shellPrintHelper = function (x) {\n"
-"\n"
-"if (typeof (x) == \"undefined\") {\n"
-"\n"
-"if (typeof (db) != \"undefined\" && db.getLastError) {\n"
-"// explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad.\n"
-"var e = db.getLastError(1);\n"
-"if (e != null)\n"
-"print(e);\n"
-"}\n"
-"\n"
-"return;\n"
-"}\n"
-"\n"
-"if (x == __magicNoPrint)\n"
-"return;\n"
-"\n"
-"if (x == null) {\n"
-"print(\"null\");\n"
-"return;\n"
-"}\n"
-"\n"
-"if (typeof x != \"object\")\n"
-"return print(x);\n"
-"\n"
-"var p = x.shellPrint;\n"
-"if (typeof p == \"function\")\n"
-"return x.shellPrint();\n"
-"\n"
-"var p = x.tojson;\n"
-"if (typeof p == \"function\")\n"
-"print(x.tojson());\n"
-"else\n"
-"print(tojson(x));\n"
-"}\n"
-"\n"
-"shellAutocomplete = function (/*prefix*/){ // outer scope function called on init. Actual function at end\n"
-"\n"
-"var universalMethods = \"constructor prototype toString valueOf toLocaleString hasOwnProperty propertyIsEnumerable\".split(' ');\n"
-"\n"
-"var builtinMethods = {}; // uses constructor objects as keys\n"
-"builtinMethods[Array] = \"length concat join pop push reverse shift slice sort splice unshift indexOf lastIndexOf every filter forEach map some\".split(' ');\n"
-"builtinMethods[Boolean] = \"\".split(' '); // nothing more than universal methods\n"
-"builtinMethods[Date] = \"getDate getDay getFullYear getHours getMilliseconds getMinutes getMonth getSeconds getTime getTimezoneOffset getUTCDate getUTCDay getUTCFullYear getUTCHours getUTCMilliseconds getUTCMinutes getUTCMonth getUTCSeconds getYear parse setDate setFullYear setHours setMilliseconds setMinutes setMonth setSeconds setTime setUTCDate setUTCFullYear setUTCHours setUTCMilliseconds setUTCMinutes setUTCMonth setUTCSeconds setYear toDateString toGMTString toLocaleDateString toLocaleTimeString toTimeString toUTCString UTC\".split(' ');\n"
-"builtinMethods[Math] = \"E LN2 LN10 LOG2E LOG10E PI SQRT1_2 SQRT2 abs acos asin atan atan2 ceil cos exp floor log max min pow random round sin sqrt tan\".split(' ');\n"
-"builtinMethods[Number] = \"MAX_VALUE MIN_VALUE NEGATIVE_INFINITY POSITIVE_INFINITY toExponential toFixed toPrecision\".split(' ');\n"
-"builtinMethods[RegExp] = \"global ignoreCase lastIndex multiline source compile exec test\".split(' ');\n"
-"builtinMethods[String] = \"length charAt charCodeAt concat fromCharCode indexOf lastIndexOf match replace search slice split substr substring toLowerCase toUpperCase\".split(' ');\n"
-"builtinMethods[Function] = \"call apply\".split(' ');\n"
-"builtinMethods[Object] = \"bsonsize\".split(' ');\n"
-"\n"
-"builtinMethods[Mongo] = \"find update insert remove\".split(' ');\n"
-"builtinMethods[BinData] = \"hex base64 length subtype\".split(' ');\n"
-"builtinMethods[NumberLong] = \"toNumber\".split(' ');\n"
-"\n"
-"var extraGlobals = \"Infinity NaN undefined null true false decodeURI decodeURIComponent encodeURI encodeURIComponent escape eval isFinite isNaN parseFloat parseInt unescape Array Boolean Date Math Number RegExp String print load gc MinKey MaxKey Mongo NumberLong ObjectId DBPointer UUID BinData Map\".split(' ');\n"
-"\n"
-"var isPrivate = function(name){\n"
-"if (shellAutocomplete.showPrivate) return false;\n"
-"if (name == '_id') return false;\n"
-"if (name[0] == '_') return true;\n"
-"if (name[name.length-1] == '_') return true; // some native functions have an extra name_ method\n"
-"return false;\n"
-"}\n"
-"\n"
-"var customComplete = function(obj){\n"
-"try {\n"
-"if(obj.__proto__.constructor.autocomplete){\n"
-"var ret = obj.constructor.autocomplete(obj);\n"
-"if (ret.constructor != Array){\n"
-"print(\"\\nautocompleters must return real Arrays\");\n"
-"return [];\n"
-"}\n"
-"return ret;\n"
-"} else {\n"
-"return [];\n"
-"}\n"
-"} catch (e) {\n"
-"// print(e); // uncomment if debugging custom completers\n"
-"return [];\n"
-"}\n"
-"}\n"
-"\n"
-"var worker = function( prefix ){\n"
-"var global = (function(){return this;}).call(); // trick to get global object\n"
-"\n"
-"var curObj = global;\n"
-"var parts = prefix.split('.');\n"
-"for (var p=0; p < parts.length - 1; p++){ // doesn't include last part\n"
-"curObj = curObj[parts[p]];\n"
-"if (curObj == null)\n"
-"return [];\n"
-"}\n"
-"\n"
-"var lastPrefix = parts[parts.length-1] || '';\n"
-"var begining = parts.slice(0, parts.length-1).join('.');\n"
-"if (begining.length)\n"
-"begining += '.';\n"
-"\n"
-"var possibilities = new Array().concat(\n"
-"universalMethods,\n"
-"Object.keySet(curObj),\n"
-"Object.keySet(curObj.__proto__),\n"
-"builtinMethods[curObj] || [], // curObj is a builtin constructor\n"
-"builtinMethods[curObj.__proto__.constructor] || [], // curObj is made from a builtin constructor\n"
-"curObj == global ? extraGlobals : [],\n"
-"customComplete(curObj)\n"
-");\n"
-"\n"
-"var ret = [];\n"
-"for (var i=0; i < possibilities.length; i++){\n"
-"var p = possibilities[i];\n"
-"if (typeof(curObj[p]) == \"undefined\" && curObj != global) continue; // extraGlobals aren't in the global object\n"
-"if (p.length == 0 || p.length < lastPrefix.length) continue;\n"
-"if (isPrivate(p)) continue;\n"
-"if (p.match(/^[0-9]+$/)) continue; // don't array number indexes\n"
-"if (p.substr(0, lastPrefix.length) != lastPrefix) continue;\n"
-"\n"
-"var completion = begining + p;\n"
-"if(curObj[p] && curObj[p].constructor == Function && p != 'constructor')\n"
-"completion += '(';\n"
-"\n"
-"ret.push(completion);\n"
-"}\n"
-"\n"
-"return ret;\n"
-"}\n"
-"\n"
-"// this is the actual function that gets assigned to shellAutocomplete\n"
-"return function( prefix ){\n"
-"try {\n"
-"__autocomplete__ = worker(prefix).sort();\n"
-"}catch (e){\n"
-"print(\"exception durring autocomplete: \" + tojson(e.message));\n"
-"__autocomplete__ = [];\n"
-"}\n"
-"}\n"
-"}();\n"
-"\n"
-"shellAutocomplete.showPrivate = false; // toggle to show (useful when working on internals)\n"
-"\n"
-"shellHelper = function( command , rest , shouldPrint ){\n"
-"command = command.trim();\n"
-"var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
-"\n"
-"if ( ! shellHelper[command] )\n"
-"throw \"no command [\" + command + \"]\";\n"
-"\n"
-"var res = shellHelper[command].apply( null , args );\n"
-"if ( shouldPrint ){\n"
-"shellPrintHelper( res );\n"
-"}\n"
-"return res;\n"
-"}\n"
-"\n"
-"shellHelper.use = function (dbname) {\n"
-"var s = \"\" + dbname;\n"
-"if (s == \"\") {\n"
-"print(\"bad use parameter\");\n"
-"return;\n"
-"}\n"
-"db = db.getMongo().getDB(dbname);\n"
-"print(\"switched to db \" + db.getName());\n"
-"}\n"
-"\n"
-"shellHelper.it = function(){\n"
-"if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
-"print( \"no cursor\" );\n"
-"return;\n"
-"}\n"
-"shellPrintHelper( ___it___ );\n"
-"}\n"
-"\n"
-"shellHelper.show = function (what) {\n"
-"assert(typeof what == \"string\");\n"
-"\n"
-"if (what == \"profile\") {\n"
-"if (db.system.profile.count() == 0) {\n"
-"print(\"db.system.profile is empty\");\n"
-"print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
-"print(\"Use db.system.profile.find() to show raw profile entries\");\n"
-"}\n"
-"else {\n"
-"print();\n"
-"db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(function (x) { print(\"\" + x.millis + \"ms \" + String(x.ts).substring(0, 24)); print(x.info); print(\"\\n\"); })\n"
-"}\n"
-"return \"\";\n"
-"}\n"
-"\n"
-"if (what == \"users\") {\n"
-"db.system.users.find().forEach(printjson);\n"
-"return \"\";\n"
-"}\n"
-"\n"
-"if (what == \"collections\" || what == \"tables\") {\n"
-"db.getCollectionNames().forEach(function (x) { print(x) });\n"
-"return \"\";\n"
-"}\n"
-"\n"
-"if (what == \"dbs\") {\n"
-"var dbs = db.getMongo().getDBs();\n"
-"var size = {};\n"
-"dbs.databases.forEach(function (x) { size[x.name] = x.sizeOnDisk; });\n"
-"var names = dbs.databases.map(function (z) { return z.name; }).sort();\n"
-"names.forEach(function (n) {\n"
-"if (size[n] > 1) {\n"
-"print(n + \"\\t\" + size[n] / 1024 / 1024 / 1024 + \"GB\");\n"
-"} else {\n"
-"print(n + \"\\t(empty)\");\n"
-"}\n"
-"});\n"
-"//db.getMongo().getDBNames().sort().forEach(function (x) { print(x) });\n"
-"return \"\";\n"
-"}\n"
-"\n"
-"throw \"don't know how to show [\" + what + \"]\";\n"
-"\n"
-"}\n"
-"\n"
-"if ( typeof( Map ) == \"undefined\" ){\n"
-"Map = function(){\n"
-"this._data = {};\n"
-"}\n"
-"}\n"
-"\n"
-"Map.hash = function( val ){\n"
-"if ( ! val )\n"
-"return val;\n"
-"\n"
-"switch ( typeof( val ) ){\n"
-"case 'string':\n"
-"case 'number':\n"
-"case 'date':\n"
-"return val.toString();\n"
-"case 'object':\n"
-"case 'array':\n"
-"var s = \"\";\n"
-"for ( var k in val ){\n"
-"s += k + val[k];\n"
-"}\n"
-"return s;\n"
-"}\n"
-"\n"
-"throw \"can't hash : \" + typeof( val );\n"
-"}\n"
-"\n"
-"Map.prototype.put = function( key , value ){\n"
-"var o = this._get( key );\n"
-"var old = o.value;\n"
-"o.value = value;\n"
-"return old;\n"
-"}\n"
-"\n"
-"Map.prototype.get = function( key ){\n"
-"return this._get( key ).value;\n"
-"}\n"
-"\n"
-"Map.prototype._get = function( key ){\n"
-"var h = Map.hash( key );\n"
-"var a = this._data[h];\n"
-"if ( ! a ){\n"
-"a = [];\n"
-"this._data[h] = a;\n"
-"}\n"
-"\n"
-"for ( var i=0; i<a.length; i++ ){\n"
-"if ( friendlyEqual( key , a[i].key ) ){\n"
-"return a[i];\n"
-"}\n"
-"}\n"
-"var o = { key : key , value : null };\n"
-"a.push( o );\n"
-"return o;\n"
-"}\n"
-"\n"
-"Map.prototype.values = function(){\n"
-"var all = [];\n"
-"for ( var k in this._data ){\n"
-"this._data[k].forEach( function(z){ all.push( z.value ); } );\n"
-"}\n"
-"return all;\n"
-"}\n"
-"\n"
-"if ( typeof( gc ) == \"undefined\" ){\n"
-"gc = function(){\n"
-"print( \"warning: using noop gc()\" );\n"
-"}\n"
-"}\n"
-"\n"
-"\n"
-"Math.sigFig = function( x , N ){\n"
-"if ( ! N ){\n"
-"N = 3;\n"
-"}\n"
-"var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
-"return Math.round(x*p)/p;\n"
-"}\n"
-"\n"
-"Random = function() {}\n"
-"\n"
-"// set random seed\n"
-"Random.srand = function( s ) { _srand( s ); }\n"
-"\n"
-"// random number 0 <= r < 1\n"
-"Random.rand = function() { return _rand(); }\n"
-"\n"
-"// random integer 0 <= r < n\n"
-"Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }\n"
-"\n"
-"Random.setRandomSeed = function( s ) {\n"
-"s = s || new Date().getTime();\n"
-"print( \"setting random seed: \" + s );\n"
-"Random.srand( s );\n"
-"}\n"
-"\n"
-"// generate a random value from the exponential distribution with the specified mean\n"
-"Random.genExp = function( mean ) {\n"
-"return -Math.log( Random.rand() ) * mean;\n"
-"}\n"
-"\n"
-"Geo = {};\n"
-"Geo.distance = function( a , b ){\n"
-"var ax = null;\n"
-"var ay = null;\n"
-"var bx = null;\n"
-"var by = null;\n"
-"\n"
-"for ( var key in a ){\n"
-"if ( ax == null )\n"
-"ax = a[key];\n"
-"else if ( ay == null )\n"
-"ay = a[key];\n"
-"}\n"
-"\n"
-"for ( var key in b ){\n"
-"if ( bx == null )\n"
-"bx = b[key];\n"
-"else if ( by == null )\n"
-"by = b[key];\n"
-"}\n"
-"\n"
-"return Math.sqrt( Math.pow( by - ay , 2 ) +\n"
-"Math.pow( bx - ax , 2 ) );\n"
-"}\n"
-"\n"
-"Geo.sphereDistance = function( a , b ){\n"
-"var ax = null;\n"
-"var ay = null;\n"
-"var bx = null;\n"
-"var by = null;\n"
-"\n"
-"// TODO swap order of x and y when done on server\n"
-"for ( var key in a ){\n"
-"if ( ax == null )\n"
-"ax = a[key] * (Math.PI/180);\n"
-"else if ( ay == null )\n"
-"ay = a[key] * (Math.PI/180);\n"
-"}\n"
-"\n"
-"for ( var key in b ){\n"
-"if ( bx == null )\n"
-"bx = b[key] * (Math.PI/180);\n"
-"else if ( by == null )\n"
-"by = b[key] * (Math.PI/180);\n"
-"}\n"
-"\n"
-"var sin_x1=Math.sin(ax), cos_x1=Math.cos(ax);\n"
-"var sin_y1=Math.sin(ay), cos_y1=Math.cos(ay);\n"
-"var sin_x2=Math.sin(bx), cos_x2=Math.cos(bx);\n"
-"var sin_y2=Math.sin(by), cos_y2=Math.cos(by);\n"
-"\n"
-"var cross_prod =\n"
-"(cos_y1*cos_x1 * cos_y2*cos_x2) +\n"
-"(cos_y1*sin_x1 * cos_y2*sin_x2) +\n"
-"(sin_y1 * sin_y2);\n"
-"\n"
-"if (cross_prod >= 1 || cross_prod <= -1){\n"
-"// fun with floats\n"
-"assert( Math.abs(cross_prod)-1 < 1e-6 );\n"
-"return cross_prod > 0 ? 0 : Math.PI;\n"
-"}\n"
-"\n"
-"return Math.acos(cross_prod);\n"
-"}\n"
-"\n"
-"rs = function () { return \"try rs.help()\"; }\n"
-"\n"
-"rs.help = function () {\n"
-"print(\"\\trs.status() { replSetGetStatus : 1 } checks repl set status\");\n"
-"print(\"\\trs.initiate() { replSetInitiate : null } initiates set with default settings\");\n"
-"print(\"\\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg\");\n"
-"print(\"\\trs.conf() get the current configuration object from local.system.replset\");\n"
-"print(\"\\trs.reconfig(cfg) updates the configuration of a running replica set with cfg (disconnects)\");\n"
-"print(\"\\trs.add(hostportstr) add a new member to the set with default attributes (disconnects)\");\n"
-"print(\"\\trs.add(membercfgobj) add a new member to the set with extra attributes (disconnects)\");\n"
-"print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true (disconnects)\");\n"
-"print(\"\\trs.stepDown([secs]) step down as primary (momentarily) (disconnects)\");\n"
-"print(\"\\trs.freeze(secs) make a node ineligible to become primary for the time specified\");\n"
-"print(\"\\trs.remove(hostportstr) remove a host from the replica set (disconnects)\");\n"
-"print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
-"print();\n"
-"print(\"\\tdb.isMaster() check who is primary\");\n"
-"print();\n"
-"print(\"\\treconfiguration helpers disconnect from the database so the shell will display\");\n"
-"print(\"\\tan error, even if the command succeeds.\");\n"
-"print(\"\\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info\");\n"
-"}\n"
-"rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }\n"
-"rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
-"rs.isMaster = function () { return db.isMaster(); }\n"
-"rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
-"rs.reconfig = function (cfg) {\n"
-"cfg.version = rs.conf().version + 1;\n"
-"var res = null;\n"
-"try {\n"
-"res = db.adminCommand({ replSetReconfig: cfg });\n"
-"}\n"
-"catch (e) {\n"
-"print(\"shell got exception during reconfig: \" + e);\n"
-"print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
-"}\n"
-"return res;\n"
-"}\n"
-"rs.add = function (hostport, arb) {\n"
-"var cfg = hostport;\n"
-"\n"
-"var local = db.getSisterDB(\"local\");\n"
-"assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
-"var c = local.system.replset.findOne();\n"
-"assert(c, \"no config object retrievable from local.system.replset\");\n"
-"\n"
-"c.version++;\n"
-"\n"
-"var max = 0;\n"
-"for (var i in c.members)\n"
-"if (c.members[i]._id > max) max = c.members[i]._id;\n"
-"if (isString(hostport)) {\n"
-"cfg = { _id: max + 1, host: hostport };\n"
-"if (arb)\n"
-"cfg.arbiterOnly = true;\n"
-"}\n"
-"c.members.push(cfg);\n"
-"var res = null;\n"
-"try {\n"
-"res = db.adminCommand({ replSetReconfig: c });\n"
-"}\n"
-"catch (e) {\n"
-"print(\"shell got exception during reconfig: \" + e);\n"
-"print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
-"}\n"
-"return res;\n"
-"}\n"
-"rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:secs||60}); }\n"
-"rs.freeze = function (secs) { return db._adminCommand({replSetFreeze:secs}); }\n"
-"rs.addArb = function (hn) { return this.add(hn, true); }\n"
-"rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
-"\n"
-"rs.remove = function (hn) {\n"
-"var local = db.getSisterDB(\"local\");\n"
-"assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
-"var c = local.system.replset.findOne();\n"
-"assert(c, \"no config object retrievable from local.system.replset\");\n"
-"c.version++;\n"
-"\n"
-"for (var i in c.members) {\n"
-"if (c.members[i].host == hn) {\n"
-"c.members.splice(i, 1);\n"
-"return db._adminCommand({ replSetReconfig : c});\n"
-"}\n"
-"}\n"
-"\n"
-"return \"error: couldn't find \"+hn+\" in \"+tojson(c.members);\n"
-"};\n"
-"\n"
-"help = shellHelper.help = function (x) {\n"
-"if (x == \"mr\") {\n"
-"print(\"\\nSee also http://www.mongodb.org/display/DOCS/MapReduce\");\n"
-"print(\"\\nfunction mapf() {\");\n"
-"print(\" // 'this' holds current document to inspect\");\n"
-"print(\" emit(key, value);\");\n"
-"print(\"}\");\n"
-"print(\"\\nfunction reducef(key,value_array) {\");\n"
-"print(\" return reduced_value;\");\n"
-"print(\"}\");\n"
-"print(\"\\ndb.mycollection.mapReduce(mapf, reducef[, options])\");\n"
-"print(\"\\noptions\");\n"
-"print(\"{[query : <query filter object>]\");\n"
-"print(\" [, sort : <sort the query. useful for optimization>]\");\n"
-"print(\" [, limit : <number of objects to return from collection>]\");\n"
-"print(\" [, out : <output-collection name>]\");\n"
-"print(\" [, keeptemp: <true|false>]\");\n"
-"print(\" [, finalize : <finalizefunction>]\");\n"
-"print(\" [, scope : <object where fields go into javascript global scope >]\");\n"
-"print(\" [, verbose : true]}\\n\");\n"
-"return;\n"
-"} else if (x == \"connect\") {\n"
-"print(\"\\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.\");\n"
-"print(\"Additional connections may be opened:\\n\");\n"
-"print(\" var x = new Mongo('host[:port]');\");\n"
-"print(\" var mydb = x.getDB('mydb');\");\n"
-"print(\" or\");\n"
-"print(\" var mydb = connect('host[:port]/mydb');\");\n"
-"print(\"\\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\\n\");\n"
-"return;\n"
-"}\n"
-"else if (x == \"misc\") {\n"
-"print(\"\\tb = new BinData(subtype,base64str) create a BSON BinData value\");\n"
-"print(\"\\tb.subtype() the BinData subtype (0..255)\");\n"
-"print(\"\\tb.length() length of the BinData data in bytes\");\n"
-"print(\"\\tb.hex() the data as a hex encoded string\");\n"
-"print(\"\\tb.base64() the data as a base 64 encoded string\");\n"
-"print(\"\\tb.toString()\");\n"
-"print();\n"
-"print(\"\\to = new ObjectId() create a new ObjectId\");\n"
-"print(\"\\to.getTimestamp() return timestamp derived from first 32 bits of the OID\");\n"
-"print(\"\\to.isObjectId()\");\n"
-"print(\"\\to.toString()\");\n"
-"print(\"\\to.equals(otherid)\");\n"
-"return;\n"
-"}\n"
-"else if (x == \"admin\") {\n"
-"print(\"\\tls([path]) list files\");\n"
-"print(\"\\tpwd() returns current directory\");\n"
-"print(\"\\tlistFiles([path]) returns file list\");\n"
-"print(\"\\thostname() returns name of this host\");\n"
-"print(\"\\tcat(fname) returns contents of text file as a string\");\n"
-"print(\"\\tremoveFile(f) delete a file or directory\");\n"
-"print(\"\\tload(jsfilename) load and execute a .js file\");\n"
-"print(\"\\trun(program[, args...]) spawn a program and wait for its completion\");\n"
-"print(\"\\tsleep(m) sleep m milliseconds\");\n"
-"print(\"\\tgetMemInfo() diagnostic\");\n"
-"return;\n"
-"}\n"
-"else if (x == \"test\") {\n"
-"print(\"\\tstartMongodEmpty(args) DELETES DATA DIR and then starts mongod\");\n"
-"print(\"\\t returns a connection to the new server\");\n"
-"print(\"\\tstartMongodTest(port,dir,options)\");\n"
-"print(\"\\t DELETES DATA DIR\");\n"
-"print(\"\\t automatically picks port #s starting at 27000 and increasing\");\n"
-"print(\"\\t or you can specify the port as the first arg\");\n"
-"print(\"\\t dir is /data/db/<port>/ if not specified as the 2nd arg\");\n"
-"print(\"\\t returns a connection to the new server\");\n"
-"print(\"\\tresetDbpath(dirpathstr) deletes everything under the dir specified including subdirs\");\n"
-"print(\"\\tstopMongoProgram(port[, signal])\");\n"
-"return;\n"
-"}\n"
-"else if (x == \"\") {\n"
-"print(\"\\t\" + \"db.help() help on db methods\");\n"
-"print(\"\\t\" + \"db.mycoll.help() help on collection methods\");\n"
-"print(\"\\t\" + \"rs.help() help on replica set methods\");\n"
-"print(\"\\t\" + \"help connect connecting to a db help\");\n"
-"print(\"\\t\" + \"help admin administrative help\");\n"
-"print(\"\\t\" + \"help misc misc things to know\");\n"
-"print(\"\\t\" + \"help mr mapreduce help\");\n"
-"print();\n"
-"print(\"\\t\" + \"show dbs show database names\");\n"
-"print(\"\\t\" + \"show collections show collections in current database\");\n"
-"print(\"\\t\" + \"show users show users in current database\");\n"
-"print(\"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
-"print(\"\\t\" + \"use <db_name> set current database\");\n"
-"print(\"\\t\" + \"db.foo.find() list objects in collection foo\");\n"
-"print(\"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\");\n"
-"print(\"\\t\" + \"it result of the last line evaluated; use to further iterate\");\n"
-"print(\"\\t\" + \"DBQuery.shellBatchSize = x set default number of items to display on shell\");\n"
-"print(\"\\t\" + \"exit quit the mongo shell\");\n"
-"}\n"
-"else\n"
-"print(\"unknown help option\");\n"
-"}\n"
-;
-extern const JSFile utils;
-const JSFile utils = { "shell/utils.js" , _jscode_raw_utils };
-const StringData _jscode_raw_db =
-"// db.js\n"
-"\n"
-"if ( typeof DB == \"undefined\" ){\n"
-"DB = function( mongo , name ){\n"
-"this._mongo = mongo;\n"
-"this._name = name;\n"
-"}\n"
-"}\n"
-"\n"
-"DB.prototype.getMongo = function(){\n"
-"assert( this._mongo , \"why no mongo!\" );\n"
-"return this._mongo;\n"
-"}\n"
-"\n"
-"DB.prototype.getSiblingDB = function( name ){\n"
-"return this.getMongo().getDB( name );\n"
-"}\n"
-"\n"
-"DB.prototype.getSisterDB = DB.prototype.getSiblingDB;\n"
-"\n"
-"DB.prototype.getName = function(){\n"
-"return this._name;\n"
-"}\n"
-"\n"
-"DB.prototype.stats = function(){\n"
-"return this.runCommand( { dbstats : 1 } );\n"
-"}\n"
-"\n"
-"DB.prototype.getCollection = function( name ){\n"
-"return new DBCollection( this._mongo , this , name , this._name + \".\" + name );\n"
-"}\n"
-"\n"
-"DB.prototype.commandHelp = function( name ){\n"
-"var c = {};\n"
-"c[name] = 1;\n"
-"c.help = true;\n"
-"return this.runCommand( c ).help;\n"
-"}\n"
-"\n"
-"DB.prototype.runCommand = function( obj ){\n"
-"if ( typeof( obj ) == \"string\" ){\n"
-"var n = {};\n"
-"n[obj] = 1;\n"
-"obj = n;\n"
-"}\n"
-"return this.getCollection( \"$cmd\" ).findOne( obj );\n"
-"}\n"
-"\n"
-"DB.prototype._dbCommand = DB.prototype.runCommand;\n"
-"\n"
-"DB.prototype.adminCommand = function( obj ){\n"
-"if ( this._name == \"admin\" )\n"
-"return this.runCommand( obj );\n"
-"return this.getSiblingDB( \"admin\" ).runCommand( obj );\n"
-"}\n"
-"\n"
-"DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name\n"
-"\n"
-"DB.prototype.addUser = function( username , pass, readOnly ){\n"
-"readOnly = readOnly || false;\n"
-"var c = this.getCollection( \"system.users\" );\n"
-"\n"
-"var u = c.findOne( { user : username } ) || { user : username };\n"
-"u.readOnly = readOnly;\n"
-"u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
-"print( tojson( u ) );\n"
-"\n"
-"c.save( u );\n"
-"}\n"
-"\n"
-"DB.prototype.removeUser = function( username ){\n"
-"this.getCollection( \"system.users\" ).remove( { user : username } );\n"
-"}\n"
-"\n"
-"DB.prototype.__pwHash = function( nonce, username, pass ) {\n"
-"return hex_md5( nonce + username + hex_md5( username + \":mongo:\" + pass ) );\n"
-"}\n"
-"\n"
-"DB.prototype.auth = function( username , pass ){\n"
-"var n = this.runCommand( { getnonce : 1 } );\n"
-"\n"
-"var a = this.runCommand(\n"
-"{\n"
-"authenticate : 1 ,\n"
-"user : username ,\n"
-"nonce : n.nonce ,\n"
-"key : this.__pwHash( n.nonce, username, pass )\n"
-"}\n"
-");\n"
-"\n"
-"return a.ok;\n"
-"}\n"
-"\n"
-"/**\n"
-"Create a new collection in the database. Normally, collection creation is automatic. You would\n"
-"use this function if you wish to specify special options on creation.\n"
-"\n"
-"If the collection already exists, no action occurs.\n"
-"\n"
-"<p>Options:</p>\n"
-"<ul>\n"
-"<li>\n"
-"size: desired initial extent size for the collection. Must be <= 1000000000.\n"
-"for fixed size (capped) collections, this size is the total/max size of the\n"
-"collection.\n"
-"</li>\n"
-"<li>\n"
-"capped: if true, this is a capped collection (where old data rolls out).\n"
-"</li>\n"
-"<li> max: maximum number of objects if capped (optional).</li>\n"
-"</ul>\n"
-"\n"
-"<p>Example: </p>\n"
-"\n"
-"<code>db.createCollection(\"movies\", { size: 10 * 1024 * 1024, capped:true } );</code>\n"
-"\n"
-"* @param {String} name Name of new collection to create\n"
-"* @param {Object} options Object with options for call. Options are listed above.\n"
-"* @return SOMETHING_FIXME\n"
-"*/\n"
-"DB.prototype.createCollection = function(name, opt) {\n"
-"var options = opt || {};\n"
-"var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
-"var res = this._dbCommand(cmd);\n"
-"return res;\n"
-"}\n"
-"\n"
-"/**\n"
-"* @deprecated use getProfilingStatus\n"
-"* Returns the current profiling level of this database\n"
-"* @return SOMETHING_FIXME or null on error\n"
-"*/\n"
-"DB.prototype.getProfilingLevel = function() {\n"
-"var res = this._dbCommand( { profile: -1 } );\n"
-"return res ? res.was : null;\n"
-"}\n"
-"\n"
-"/**\n"
-"* @return the current profiling status\n"
-"* example { was : 0, slowms : 100 }\n"
-"* @return SOMETHING_FIXME or null on error\n"
-"*/\n"
-"DB.prototype.getProfilingStatus = function() {\n"
-"var res = this._dbCommand( { profile: -1 } );\n"
-"if ( ! res.ok )\n"
-"throw \"profile command failed: \" + tojson( res );\n"
-"delete res.ok\n"
-"return res;\n"
-"}\n"
-"\n"
-"\n"
-"/**\n"
-"Erase the entire database. (!)\n"
-"\n"
-"* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
-"*/\n"
-"DB.prototype.dropDatabase = function() {\n"
-"if ( arguments.length )\n"
-"throw \"dropDatabase doesn't take arguments\";\n"
-"return this._dbCommand( { dropDatabase: 1 } );\n"
-"}\n"
-"\n"
-"\n"
-"DB.prototype.shutdownServer = function() {\n"
-"if( \"admin\" != this._name ){\n"
-"return \"shutdown command only works with the admin database; try 'use admin'\";\n"
-"}\n"
-"\n"
-"try {\n"
-"var res = this._dbCommand(\"shutdown\");\n"
-"if( res )\n"
-"throw \"shutdownServer failed: \" + res.errmsg;\n"
-"throw \"shutdownServer failed\";\n"
-"}\n"
-"catch ( e ){\n"
-"assert( tojson( e ).indexOf( \"error doing query: failed\" ) >= 0 , \"unexpected error: \" + tojson( e ) );\n"
-"print( \"server should be down...\" );\n"
-"}\n"
-"}\n"
-"\n"
-"/**\n"
-"Clone database on another server to here.\n"
-"<p>\n"
-"Generally, you should dropDatabase() first as otherwise the cloned information will MERGE\n"
-"into whatever data is already present in this database. (That is however a valid way to use\n"
-"clone if you are trying to do something intentionally, such as union three non-overlapping\n"
-"databases into one.)\n"
-"<p>\n"
-"This is a low level administrative function will is not typically used.\n"
-"\n"
-"* @param {String} from Where to clone from (dbhostname[:port]). May not be this database\n"
-"(self) as you cannot clone to yourself.\n"
-"* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
-"* See also: db.copyDatabase()\n"
-"*/\n"
-"DB.prototype.cloneDatabase = function(from) {\n"
-"assert( isString(from) && from.length );\n"
-"//this.resetIndexCache();\n"
-"return this._dbCommand( { clone: from } );\n"
-"}\n"
-"\n"
-"\n"
-"/**\n"
-"Clone collection on another server to here.\n"
-"<p>\n"
-"Generally, you should drop() first as otherwise the cloned information will MERGE\n"
-"into whatever data is already present in this collection. (That is however a valid way to use\n"
-"clone if you are trying to do something intentionally, such as union three non-overlapping\n"
-"collections into one.)\n"
-"<p>\n"
-"This is a low level administrative function is not typically used.\n"
-"\n"
-"* @param {String} from mongod instance from which to clnoe (dbhostname:port). May\n"
-"not be this mongod instance, as clone from self is not allowed.\n"
-"* @param {String} collection name of collection to clone.\n"
-"* @param {Object} query query specifying which elements of collection are to be cloned.\n"
-"* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
-"* See also: db.cloneDatabase()\n"
-"*/\n"
-"DB.prototype.cloneCollection = function(from, collection, query) {\n"
-"assert( isString(from) && from.length );\n"
-"assert( isString(collection) && collection.length );\n"
-"collection = this._name + \".\" + collection;\n"
-"query = query || {};\n"
-"//this.resetIndexCache();\n"
-"return this._dbCommand( { cloneCollection:collection, from:from, query:query } );\n"
-"}\n"
-"\n"
-"\n"
-"/**\n"
-"Copy database from one server or name to another server or name.\n"
-"\n"
-"Generally, you should dropDatabase() first as otherwise the copied information will MERGE\n"
-"into whatever data is already present in this database (and you will get duplicate objects\n"
-"in collections potentially.)\n"
-"\n"
-"For security reasons this function only works when executed on the \"admin\" db. However,\n"
-"if you have access to said db, you can copy any database from one place to another.\n"
-"\n"
-"This method provides a way to \"rename\" a database by copying it to a new db name and\n"
-"location. Additionally, it effectively provides a repair facility.\n"
-"\n"
-"* @param {String} fromdb database name from which to copy.\n"
-"* @param {String} todb database name to copy to.\n"
-"* @param {String} fromhost hostname of the database (and optionally, \":port\") from which to\n"
-"copy the data. default if unspecified is to copy from self.\n"
-"* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
-"* See also: db.clone()\n"
-"*/\n"
-"DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {\n"
-"assert( isString(fromdb) && fromdb.length );\n"
-"assert( isString(todb) && todb.length );\n"
-"fromhost = fromhost || \"\";\n"
-"if ( username && password ) {\n"
-"var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );\n"
-"return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );\n"
-"} else {\n"
-"return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );\n"
-"}\n"
-"}\n"
-"\n"
-"/**\n"
-"Repair database.\n"
-"\n"
-"* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
-"*/\n"
-"DB.prototype.repairDatabase = function() {\n"
-"return this._dbCommand( { repairDatabase: 1 } );\n"
-"}\n"
-"\n"
-"\n"
-"DB.prototype.help = function() {\n"
-"print(\"DB methods:\");\n"
-"print(\"\\tdb.addUser(username, password[, readOnly=false])\");\n"
-"print(\"\\tdb.auth(username, password)\");\n"
-"print(\"\\tdb.cloneDatabase(fromhost)\");\n"
-"print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
-"print(\"\\tdb.copyDatabase(fromdb, todb, fromhost)\");\n"
-"print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
-"print(\"\\tdb.currentOp() displays the current operation in the db\");\n"
-"print(\"\\tdb.dropDatabase()\");\n"
-"print(\"\\tdb.eval(func, args) run code server-side\");\n"
-"print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
-"print(\"\\tdb.getCollectionNames()\");\n"
-"print(\"\\tdb.getLastError() - just returns the err msg string\");\n"
-"print(\"\\tdb.getLastErrorObj() - return full status object\");\n"
-"print(\"\\tdb.getMongo() get the server connection object\");\n"
-"print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
-"print(\"\\tdb.getName()\");\n"
-"print(\"\\tdb.getPrevError()\");\n"
-"print(\"\\tdb.getProfilingLevel() - deprecated\");\n"
-"print(\"\\tdb.getProfilingStatus() - returns if profiling is on and slow threshold \");\n"
-"print(\"\\tdb.getReplicationInfo()\");\n"
-"print(\"\\tdb.getSiblingDB(name) get the db at the same server as this one\");\n"
-"print(\"\\tdb.isMaster() check replica primary status\");\n"
-"print(\"\\tdb.killOp(opid) kills the current operation in the db\");\n"
-"print(\"\\tdb.listCommands() lists all the db commands\");\n"
-"print(\"\\tdb.printCollectionStats()\");\n"
-"print(\"\\tdb.printReplicationInfo()\");\n"
-"print(\"\\tdb.printSlaveReplicationInfo()\");\n"
-"print(\"\\tdb.printShardingStatus()\");\n"
-"print(\"\\tdb.removeUser(username)\");\n"
-"print(\"\\tdb.repairDatabase()\");\n"
-"print(\"\\tdb.resetError()\");\n"
-"print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
-"print(\"\\tdb.serverStatus()\");\n"
-"print(\"\\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all\");\n"
-"print(\"\\tdb.shutdownServer()\");\n"
-"print(\"\\tdb.stats()\");\n"
-"print(\"\\tdb.version() current version of the server\");\n"
-"print(\"\\tdb.getMongo().setSlaveOk() allow queries on a replication slave server\");\n"
-"\n"
-"return __magicNoPrint;\n"
-"}\n"
-"\n"
-"DB.prototype.printCollectionStats = function(){\n"
-"var mydb = this;\n"
-"this.getCollectionNames().forEach(\n"
-"function(z){\n"
-"print( z );\n"
-"printjson( mydb.getCollection(z).stats() );\n"
-"print( \"---\" );\n"
-"}\n"
-");\n"
-"}\n"
-"\n"
-"/**\n"
-"* <p> Set profiling level for your db. Profiling gathers stats on query performance. </p>\n"
-"*\n"
-"* <p>Default is off, and resets to off on a database restart -- so if you want it on,\n"
-"* turn it on periodically. </p>\n"
-"*\n"
-"* <p>Levels :</p>\n"
-"* <ul>\n"
-"* <li>0=off</li>\n"
-"* <li>1=log very slow operations; optional argument slowms specifies slowness threshold</li>\n"
-"* <li>2=log all</li>\n"
-"* @param {String} level Desired level of profiling\n"
-"* @param {String} slowms For slow logging, query duration that counts as slow (default 100ms)\n"
-"* @return SOMETHING_FIXME or null on error\n"
-"*/\n"
-"DB.prototype.setProfilingLevel = function(level,slowms) {\n"
-"\n"
-"if (level < 0 || level > 2) {\n"
-"throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };\n"
-"}\n"
-"\n"
-"var cmd = { profile: level };\n"
-"if ( slowms )\n"
-"cmd[\"slowms\"] = slowms;\n"
-"return this._dbCommand( cmd );\n"
-"}\n"
-"\n"
-"\n"
-"/**\n"
-"* <p> Evaluate a js expression at the database server.</p>\n"
-"*\n"
-"* <p>Useful if you need to touch a lot of data lightly; in such a scenario\n"
-"* the network transfer of the data could be a bottleneck. A good example\n"
-"* is \"select count(*)\" -- can be done server side via this mechanism.\n"
-"* </p>\n"
-"*\n"
-"* <p>\n"
-"* If the eval fails, an exception is thrown of the form:\n"
-"* </p>\n"
-"* <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg: str] } }</code>\n"
-"*\n"
-"* <p>Example: </p>\n"
-"* <code>print( \"mycount: \" + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();} );</code>\n"
-"*\n"
-"* @param {Function} jsfunction Javascript function to run on server. Note this it not a closure, but rather just \"code\".\n"
-"* @return result of your function, or null if error\n"
-"*\n"
-"*/\n"
-"DB.prototype.eval = function(jsfunction) {\n"
-"var cmd = { $eval : jsfunction };\n"
-"if ( arguments.length > 1 ) {\n"
-"cmd.args = argumentsToArray( arguments ).slice(1);\n"
-"}\n"
-"\n"
-"var res = this._dbCommand( cmd );\n"
-"\n"
-"if (!res.ok)\n"
-"throw tojson( res );\n"
-"\n"
-"return res.retval;\n"
-"}\n"
-"\n"
-"DB.prototype.dbEval = DB.prototype.eval;\n"
-"\n"
-"\n"
-"/**\n"
-"*\n"
-"* <p>\n"
-"* Similar to SQL group by. For example: </p>\n"
-"*\n"
-"* <code>select a,b,sum(c) csum from coll where active=1 group by a,b</code>\n"
-"*\n"
-"* <p>\n"
-"* corresponds to the following in 10gen:\n"
-"* </p>\n"
-"*\n"
-"* <code>\n"
-"db.group(\n"
-"{\n"
-"ns: \"coll\",\n"
-"key: { a:true, b:true },\n"
-"// keyf: ...,\n"
-"cond: { active:1 },\n"
-"reduce: function(obj,prev) { prev.csum += obj.c; } ,\n"
-"initial: { csum: 0 }\n"
-"});\n"
-"</code>\n"
-"*\n"
-"*\n"
-"* <p>\n"
-"* An array of grouped items is returned. The array must fit in RAM, thus this function is not\n"
-"* suitable when the return set is extremely large.\n"
-"* </p>\n"
-"* <p>\n"
-"* To order the grouped data, simply sort it client side upon return.\n"
-"* <p>\n"
-"Defaults\n"
-"cond may be null if you want to run against all rows in the collection\n"
-"keyf is a function which takes an object and returns the desired key. set either key or keyf (not both).\n"
-"* </p>\n"
-"*/\n"
-"DB.prototype.groupeval = function(parmsObj) {\n"
-"\n"
-"var groupFunction = function() {\n"
-"var parms = args[0];\n"
-"var c = db[parms.ns].find(parms.cond||{});\n"
-"var map = new Map();\n"
-"var pks = parms.key ? Object.keySet( parms.key ) : null;\n"
-"var pkl = pks ? pks.length : 0;\n"
-"var key = {};\n"
-"\n"
-"while( c.hasNext() ) {\n"
-"var obj = c.next();\n"
-"if ( pks ) {\n"
-"for( var i=0; i<pkl; i++ ){\n"
-"var k = pks[i];\n"
-"key[k] = obj[k];\n"
-"}\n"
-"}\n"
-"else {\n"
-"key = parms.$keyf(obj);\n"
-"}\n"
-"\n"
-"var aggObj = map.get(key);\n"
-"if( aggObj == null ) {\n"
-"var newObj = Object.extend({}, key); // clone\n"
-"aggObj = Object.extend(newObj, parms.initial)\n"
-"map.put( key , aggObj );\n"
-"}\n"
-"parms.$reduce(obj, aggObj);\n"
-"}\n"
-"\n"
-"return map.values();\n"
-"}\n"
-"\n"
-"return this.eval(groupFunction, this._groupFixParms( parmsObj ));\n"
-"}\n"
-"\n"
-"DB.prototype.groupcmd = function( parmsObj ){\n"
-"var ret = this.runCommand( { \"group\" : this._groupFixParms( parmsObj ) } );\n"
-"if ( ! ret.ok ){\n"
-"throw \"group command failed: \" + tojson( ret );\n"
-"}\n"
-"return ret.retval;\n"
-"}\n"
-"\n"
-"DB.prototype.group = DB.prototype.groupcmd;\n"
-"\n"
-"DB.prototype._groupFixParms = function( parmsObj ){\n"
-"var parms = Object.extend({}, parmsObj);\n"
-"\n"
-"if( parms.reduce ) {\n"
-"parms.$reduce = parms.reduce; // must have $ to pass to db\n"
-"delete parms.reduce;\n"
-"}\n"
-"\n"
-"if( parms.keyf ) {\n"
-"parms.$keyf = parms.keyf;\n"
-"delete parms.keyf;\n"
-"}\n"
-"\n"
-"return parms;\n"
-"}\n"
-"\n"
-"DB.prototype.resetError = function(){\n"
-"return this.runCommand( { reseterror : 1 } );\n"
-"}\n"
-"\n"
-"DB.prototype.forceError = function(){\n"
-"return this.runCommand( { forceerror : 1 } );\n"
-"}\n"
-"\n"
-"DB.prototype.getLastError = function( w , wtimeout ){\n"
-"var res = this.getLastErrorObj( w , wtimeout );\n"
-"if ( ! res.ok )\n"
-"throw \"getlasterror failed: \" + tojson( res );\n"
-"return res.err;\n"
-"}\n"
-"DB.prototype.getLastErrorObj = function( w , wtimeout ){\n"
-"var cmd = { getlasterror : 1 };\n"
-"if ( w ){\n"
-"cmd.w = w;\n"
-"if ( wtimeout )\n"
-"cmd.wtimeout = wtimeout;\n"
-"}\n"
-"var res = this.runCommand( cmd );\n"
-"\n"
-"if ( ! res.ok )\n"
-"throw \"getlasterror failed: \" + tojson( res );\n"
-"return res;\n"
-"}\n"
-"DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;\n"
-"\n"
-"\n"
-"/* Return the last error which has occurred, even if not the very last error.\n"
-"\n"
-"Returns:\n"
-"{ err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }\n"
-"\n"
-"result.err will be null if no error has occurred.\n"
-"*/\n"
-"DB.prototype.getPrevError = function(){\n"
-"return this.runCommand( { getpreverror : 1 } );\n"
-"}\n"
-"\n"
-"DB.prototype.getCollectionNames = function(){\n"
-"var all = [];\n"
-"\n"
-"var nsLength = this._name.length + 1;\n"
-"\n"
-"var c = this.getCollection( \"system.namespaces\" ).find();\n"
-"while ( c.hasNext() ){\n"
-"var name = c.next().name;\n"
-"\n"
-"if ( name.indexOf( \"$\" ) >= 0 && name.indexOf( \".oplog.$\" ) < 0 )\n"
-"continue;\n"
-"\n"
-"all.push( name.substring( nsLength ) );\n"
-"}\n"
-"\n"
-"return all.sort();\n"
-"}\n"
-"\n"
-"DB.prototype.tojson = function(){\n"
-"return this._name;\n"
-"}\n"
-"\n"
-"DB.prototype.toString = function(){\n"
-"return this._name;\n"
-"}\n"
-"\n"
-"DB.prototype.isMaster = function () { return this.runCommand(\"isMaster\"); }\n"
-"\n"
-"DB.prototype.currentOp = function(){\n"
-"return db.$cmd.sys.inprog.findOne();\n"
-"}\n"
-"DB.prototype.currentOP = DB.prototype.currentOp;\n"
-"\n"
-"DB.prototype.killOp = function(op) {\n"
-"if( !op )\n"
-"throw \"no opNum to kill specified\";\n"
-"return db.$cmd.sys.killop.findOne({'op':op});\n"
-"}\n"
-"DB.prototype.killOP = DB.prototype.killOp;\n"
-"\n"
-"DB.tsToSeconds = function(x){\n"
-"if ( x.t && x.i )\n"
-"return x.t / 1000;\n"
-"return x / 4294967296; // low 32 bits are ordinal #s within a second\n"
-"}\n"
-"\n"
-"/**\n"
-"Get a replication log information summary.\n"
-"<p>\n"
-"This command is for the database/cloud administer and not applicable to most databases.\n"
-"It is only used with the local database. One might invoke from the JS shell:\n"
-"<pre>\n"
-"use local\n"
-"db.getReplicationInfo();\n"
-"</pre>\n"
-"It is assumed that this database is a replication master -- the information returned is\n"
-"about the operation log stored at local.oplog.$main on the replication master. (It also\n"
-"works on a machine in a replica pair: for replica pairs, both machines are \"masters\" from\n"
-"an internal database perspective.\n"
-"<p>\n"
-"* @return Object timeSpan: time span of the oplog from start to end if slave is more out\n"
-"* of date than that, it can't recover without a complete resync\n"
-"*/\n"
-"DB.prototype.getReplicationInfo = function() {\n"
-"var db = this.getSiblingDB(\"local\");\n"
-"\n"
-"var result = { };\n"
-"var oplog;\n"
-"if (db.system.namespaces.findOne({name:\"local.oplog.rs\"}) != null) {\n"
-"oplog = 'oplog.rs';\n"
-"}\n"
-"else if (db.system.namespaces.findOne({name:\"local.oplog.$main\"}) != null) {\n"
-"oplog = 'oplog.$main';\n"
-"}\n"
-"else {\n"
-"result.errmsg = \"neither master/slave nor replica set replication detected\";\n"
-"return result;\n"
-"}\n"
-"\n"
-"var ol_entry = db.system.namespaces.findOne({name:\"local.\"+oplog});\n"
-"if( ol_entry && ol_entry.options ) {\n"
-"result.logSizeMB = ol_entry.options.size / ( 1024 * 1024 );\n"
-"} else {\n"
-"result.errmsg = \"local.\"+oplog+\", or its options, not found in system.namespaces collection\";\n"
-"return result;\n"
-"}\n"
-"ol = db.getCollection(oplog);\n"
-"\n"
-"result.usedMB = ol.stats().size / ( 1024 * 1024 );\n"
-"result.usedMB = Math.ceil( result.usedMB * 100 ) / 100;\n"
-"\n"
-"var firstc = ol.find().sort({$natural:1}).limit(1);\n"
-"var lastc = ol.find().sort({$natural:-1}).limit(1);\n"
-"if( !firstc.hasNext() || !lastc.hasNext() ) {\n"
-"result.errmsg = \"objects not found in local.oplog.$main -- is this a new and empty db instance?\";\n"
-"result.oplogMainRowCount = ol.count();\n"
-"return result;\n"
-"}\n"
-"\n"
-"var first = firstc.next();\n"
-"var last = lastc.next();\n"
-"{\n"
-"var tfirst = first.ts;\n"
-"var tlast = last.ts;\n"
-"\n"
-"if( tfirst && tlast ) {\n"
-"tfirst = DB.tsToSeconds( tfirst );\n"
-"tlast = DB.tsToSeconds( tlast );\n"
-"result.timeDiff = tlast - tfirst;\n"
-"result.timeDiffHours = Math.round(result.timeDiff / 36)/100;\n"
-"result.tFirst = (new Date(tfirst*1000)).toString();\n"
-"result.tLast = (new Date(tlast*1000)).toString();\n"
-"result.now = Date();\n"
-"}\n"
-"else {\n"
-"result.errmsg = \"ts element not found in oplog objects\";\n"
-"}\n"
-"}\n"
-"\n"
-"return result;\n"
-"};\n"
-"\n"
-"DB.prototype.printReplicationInfo = function() {\n"
-"var result = this.getReplicationInfo();\n"
-"if( result.errmsg ) {\n"
-"print(tojson(result));\n"
-"return;\n"
-"}\n"
-"print(\"configured oplog size: \" + result.logSizeMB + \"MB\");\n"
-"print(\"log length start to end: \" + result.timeDiff + \"secs (\" + result.timeDiffHours + \"hrs)\");\n"
-"print(\"oplog first event time: \" + result.tFirst);\n"
-"print(\"oplog last event time: \" + result.tLast);\n"
-"print(\"now: \" + result.now);\n"
-"}\n"
-"\n"
-"DB.prototype.printSlaveReplicationInfo = function() {\n"
-"function getReplLag(st) {\n"
-"var now = new Date();\n"
-"print(\"\\t syncedTo: \" + st.toString() );\n"
-"var ago = (now-st)/1000;\n"
-"var hrs = Math.round(ago/36)/100;\n"
-"print(\"\\t\\t = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");\n"
-"};\n"
-"\n"
-"function g(x) {\n"
-"assert( x , \"how could this be null (printSlaveReplicationInfo gx)\" )\n"
-"print(\"source: \" + x.host);\n"
-"if ( x.syncedTo ){\n"
-"var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );\n"
-"getReplLag(st);\n"
-"}\n"
-"else {\n"
-"print( \"\\t doing initial sync\" );\n"
-"}\n"
-"};\n"
-"\n"
-"function r(x) {\n"
-"assert( x , \"how could this be null (printSlaveReplicationInfo rx)\" );\n"
-"if ( x.state == 1 ) {\n"
-"return;\n"
-"}\n"
-"\n"
-"print(\"source: \" + x.name);\n"
-"if ( x.optime ) {\n"
-"getReplLag(x.optimeDate);\n"
-"}\n"
-"else {\n"
-"print( \"\\t no replication info, yet. State: \" + x.stateStr );\n"
-"}\n"
-"};\n"
-"\n"
-"var L = this.getSiblingDB(\"local\");\n"
-"if( L.sources.count() != 0 ) {\n"
-"L.sources.find().forEach(g);\n"
-"}\n"
-"else if (L.system.replset.count() != 0) {\n"
-"var status = this.adminCommand({'replSetGetStatus' : 1});\n"
-"status.members.forEach(r);\n"
-"}\n"
-"else {\n"
-"print(\"local.sources is empty; is this db a --slave?\");\n"
-"return;\n"
-"}\n"
-"}\n"
-"\n"
-"DB.prototype.serverBuildInfo = function(){\n"
-"return this._adminCommand( \"buildinfo\" );\n"
-"}\n"
-"\n"
-"DB.prototype.serverStatus = function(){\n"
-"return this._adminCommand( \"serverStatus\" );\n"
-"}\n"
-"\n"
-"DB.prototype.serverCmdLineOpts = function(){\n"
-"return this._adminCommand( \"getCmdLineOpts\" );\n"
-"}\n"
-"\n"
-"DB.prototype.version = function(){\n"
-"return this.serverBuildInfo().version;\n"
-"}\n"
-"\n"
-"DB.prototype.listCommands = function(){\n"
-"var x = this.runCommand( \"listCommands\" );\n"
-"for ( var name in x.commands ){\n"
-"var c = x.commands[name];\n"
-"\n"
-"var s = name + \": \";\n"
-"\n"
-"switch ( c.lockType ){\n"
-"case -1: s += \"read-lock\"; break;\n"
-"case 0: s += \"no-lock\"; break;\n"
-"case 1: s += \"write-lock\"; break;\n"
-"default: s += c.lockType;\n"
-"}\n"
-"\n"
-"if (c.adminOnly) s += \" adminOnly \";\n"
-"if (c.adminOnly) s += \" slaveOk \";\n"
-"\n"
-"s += \"\\n \";\n"
-"s += c.help.replace(/\\n/g, '\\n ');\n"
-"s += \"\\n\";\n"
-"\n"
-"print( s );\n"
-"}\n"
-"}\n"
-"\n"
-"DB.prototype.printShardingStatus = function(){\n"
-"printShardingStatus( this.getSiblingDB( \"config\" ) );\n"
-"}\n"
-"\n"
-"DB.autocomplete = function(obj){\n"
-"var colls = obj.getCollectionNames();\n"
-"var ret=[];\n"
-"for (var i=0; i<colls.length; i++){\n"
-"if (colls[i].match(/^[a-zA-Z0-9_.\\$]+$/))\n"
-"ret.push(colls[i]);\n"
-"}\n"
-"return ret;\n"
-"}\n"
-;
-extern const JSFile db;
-const JSFile db = { "shell/db.js" , _jscode_raw_db };
-const StringData _jscode_raw_mongo =
-"// mongo.js\n"
-"\n"
-"// NOTE 'Mongo' may be defined here or in MongoJS.cpp. Add code to init, not to this constructor.\n"
-"if ( typeof Mongo == \"undefined\" ){\n"
-"Mongo = function( host ){\n"
-"this.init( host );\n"
-"}\n"
-"}\n"
-"\n"
-"if ( ! Mongo.prototype ){\n"
-"throw \"Mongo.prototype not defined\";\n"
-"}\n"
-"\n"
-"if ( ! Mongo.prototype.find )\n"
-"Mongo.prototype.find = function( ns , query , fields , limit , skip ){ throw \"find not implemented\"; }\n"
-"if ( ! Mongo.prototype.insert )\n"
-"Mongo.prototype.insert = function( ns , obj ){ throw \"insert not implemented\"; }\n"
-"if ( ! Mongo.prototype.remove )\n"
-"Mongo.prototype.remove = function( ns , pattern ){ throw \"remove not implemented;\" }\n"
-"if ( ! Mongo.prototype.update )\n"
-"Mongo.prototype.update = function( ns , query , obj , upsert ){ throw \"update not implemented;\" }\n"
-"\n"
-"if ( typeof mongoInject == \"function\" ){\n"
-"mongoInject( Mongo.prototype );\n"
-"}\n"
-"\n"
-"Mongo.prototype.setSlaveOk = function() {\n"
-"this.slaveOk = true;\n"
-"}\n"
-"\n"
-"Mongo.prototype.getDB = function( name ){\n"
-"return new DB( this , name );\n"
-"}\n"
-"\n"
-"Mongo.prototype.getDBs = function(){\n"
-"var res = this.getDB( \"admin\" ).runCommand( { \"listDatabases\" : 1 } );\n"
-"if ( ! res.ok )\n"
-"throw \"listDatabases failed:\" + tojson( res );\n"
-"return res;\n"
-"}\n"
-"\n"
-"Mongo.prototype.adminCommand = function( cmd ){\n"
-"return this.getDB( \"admin\" ).runCommand( cmd );\n"
-"}\n"
-"\n"
-"Mongo.prototype.getDBNames = function(){\n"
-"return this.getDBs().databases.map(\n"
-"function(z){\n"
-"return z.name;\n"
-"}\n"
-");\n"
-"}\n"
-"\n"
-"Mongo.prototype.getCollection = function(ns){\n"
-"var idx = ns.indexOf( \".\" );\n"
-"if ( idx < 0 )\n"
-"throw \"need . in ns\";\n"
-"var db = ns.substring( 0 , idx );\n"
-"var c = ns.substring( idx + 1 );\n"
-"return this.getDB( db ).getCollection( c );\n"
-"}\n"
-"\n"
-"Mongo.prototype.toString = function(){\n"
-"return \"connection to \" + this.host;\n"
-"}\n"
-"Mongo.prototype.tojson = Mongo.prototype.toString;\n"
-"\n"
-"connect = function( url , user , pass ){\n"
-"chatty( \"connecting to: \" + url )\n"
-"\n"
-"if ( user && ! pass )\n"
-"throw \"you specified a user and not a password. either you need a password, or you're using the old connect api\";\n"
-"\n"
-"var idx = url.lastIndexOf( \"/\" );\n"
-"\n"
-"var db;\n"
-"\n"
-"if ( idx < 0 )\n"
-"db = new Mongo().getDB( url );\n"
-"else\n"
-"db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );\n"
-"\n"
-"if ( user && pass ){\n"
-"if ( ! db.auth( user , pass ) ){\n"
-"throw \"couldn't login\";\n"
-"}\n"
-"}\n"
-"\n"
-"return db;\n"
-"}\n"
-;
-extern const JSFile mongo;
-const JSFile mongo = { "shell/mongo.js" , _jscode_raw_mongo };
-const StringData _jscode_raw_mr =
-"// mr.js\n"
-"\n"
-"MR = {};\n"
-"\n"
-"MR.init = function(){\n"
-"$max = 0;\n"
-"$arr = [];\n"
-"emit = MR.emit;\n"
-"$numEmits = 0;\n"
-"$numReduces = 0;\n"
-"$numReducesToDB = 0;\n"
-"gc(); // this is just so that keep memory size sane\n"
-"}\n"
-"\n"
-"MR.cleanup = function(){\n"
-"MR.init();\n"
-"gc();\n"
-"}\n"
-"\n"
-"MR.emit = function(k,v){\n"
-"$numEmits++;\n"
-"var num = nativeHelper.apply( get_num_ , [ k ] );\n"
-"var data = $arr[num];\n"
-"if ( ! data ){\n"
-"data = { key : k , values : new Array(1000) , count : 0 };\n"
-"$arr[num] = data;\n"
-"}\n"
-"data.values[data.count++] = v;\n"
-"$max = Math.max( $max , data.count );\n"
-"}\n"
-"\n"
-"MR.doReduce = function( useDB ){\n"
-"$numReduces++;\n"
-"if ( useDB )\n"
-"$numReducesToDB++;\n"
-"$max = 0;\n"
-"for ( var i=0; i<$arr.length; i++){\n"
-"var data = $arr[i];\n"
-"if ( ! data )\n"
-"continue;\n"
-"\n"
-"if ( useDB ){\n"
-"var x = tempcoll.findOne( { _id : data.key } );\n"
-"if ( x ){\n"
-"data.values[data.count++] = x.value;\n"
-"}\n"
-"}\n"
-"\n"
-"var r = $reduce( data.key , data.values.slice( 0 , data.count ) );\n"
-"if ( r && r.length && r[0] ){\n"
-"data.values = r;\n"
-"data.count = r.length;\n"
-"}\n"
-"else{\n"
-"data.values[0] = r;\n"
-"data.count = 1;\n"
-"}\n"
-"\n"
-"$max = Math.max( $max , data.count );\n"
-"\n"
-"if ( useDB ){\n"
-"if ( data.count == 1 ){\n"
-"tempcoll.save( { _id : data.key , value : data.values[0] } );\n"
-"}\n"
-"else {\n"
-"tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );\n"
-"}\n"
-"}\n"
-"}\n"
-"}\n"
-"\n"
-"MR.check = function(){\n"
-"if ( $max < 2000 && $arr.length < 1000 ){\n"
-"return 0;\n"
-"}\n"
-"MR.doReduce();\n"
-"if ( $max < 2000 && $arr.length < 1000 ){\n"
-"return 1;\n"
-"}\n"
-"MR.doReduce( true );\n"
-"$arr = [];\n"
-"$max = 0;\n"
-"reset_num();\n"
-"gc();\n"
-"return 2;\n"
-"}\n"
-"\n"
-"MR.finalize = function(){\n"
-"tempcoll.find().forEach(\n"
-"function(z){\n"
-"z.value = $finalize( z._id , z.value );\n"
-"tempcoll.save( z );\n"
-"}\n"
-");\n"
-"}\n"
-;
-extern const JSFile mr;
-const JSFile mr = { "shell/mr.js" , _jscode_raw_mr };
-const StringData _jscode_raw_query =
-"// query.js\n"
-"\n"
-"if ( typeof DBQuery == \"undefined\" ){\n"
-"DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){\n"
-"\n"
-"this._mongo = mongo; // 0\n"
-"this._db = db; // 1\n"
-"this._collection = collection; // 2\n"
-"this._ns = ns; // 3\n"
-"\n"
-"this._query = query || {}; // 4\n"
-"this._fields = fields; // 5\n"
-"this._limit = limit || 0; // 6\n"
-"this._skip = skip || 0; // 7\n"
-"this._batchSize = batchSize || 0;\n"
-"\n"
-"this._cursor = null;\n"
-"this._numReturned = 0;\n"
-"this._special = false;\n"
-"this._prettyShell = false;\n"
-"}\n"
-"print( \"DBQuery probably won't have array access \" );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.help = function () {\n"
-"print(\"find() modifiers\")\n"
-"print(\"\\t.sort( {...} )\")\n"
-"print(\"\\t.limit( n )\")\n"
-"print(\"\\t.skip( n )\")\n"
-"print(\"\\t.count() - total # of objects matching query, ignores skip,limit\")\n"
-"print(\"\\t.size() - total # of objects cursor would return, honors skip,limit\")\n"
-"print(\"\\t.explain([verbose])\")\n"
-"print(\"\\t.hint(...)\")\n"
-"print(\"\\t.showDiskLoc() - adds a $diskLoc field to each returned object\")\n"
-"print(\"\\nCursor methods\");\n"
-"print(\"\\t.forEach( func )\")\n"
-"print(\"\\t.print() - output to console in full pretty format\")\n"
-"print(\"\\t.map( func )\")\n"
-"print(\"\\t.hasNext()\")\n"
-"print(\"\\t.next()\")\n"
-"}\n"
-"\n"
-"DBQuery.prototype.clone = function(){\n"
-"var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,\n"
-"this._query , this._fields ,\n"
-"this._limit , this._skip , this._batchSize );\n"
-"q._special = this._special;\n"
-"return q;\n"
-"}\n"
-"\n"
-"DBQuery.prototype._ensureSpecial = function(){\n"
-"if ( this._special )\n"
-"return;\n"
-"\n"
-"var n = { query : this._query };\n"
-"this._query = n;\n"
-"this._special = true;\n"
-"}\n"
-"\n"
-"DBQuery.prototype._checkModify = function(){\n"
-"if ( this._cursor )\n"
-"throw \"query already executed\";\n"
-"}\n"
-"\n"
-"DBQuery.prototype._exec = function(){\n"
-"if ( ! this._cursor ){\n"
-"assert.eq( 0 , this._numReturned );\n"
-"this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );\n"
-"this._cursorSeen = 0;\n"
-"}\n"
-"return this._cursor;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.limit = function( limit ){\n"
-"this._checkModify();\n"
-"this._limit = limit;\n"
-"return this;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.batchSize = function( batchSize ){\n"
-"this._checkModify();\n"
-"this._batchSize = batchSize;\n"
-"return this;\n"
-"}\n"
-"\n"
-"\n"
-"DBQuery.prototype.skip = function( skip ){\n"
-"this._checkModify();\n"
-"this._skip = skip;\n"
-"return this;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.hasNext = function(){\n"
-"this._exec();\n"
-"\n"
-"if ( this._limit > 0 && this._cursorSeen >= this._limit )\n"
-"return false;\n"
-"var o = this._cursor.hasNext();\n"
-"return o;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.next = function(){\n"
-"this._exec();\n"
-"\n"
-"var o = this._cursor.hasNext();\n"
-"if ( o )\n"
-"this._cursorSeen++;\n"
-"else\n"
-"throw \"error hasNext: \" + o;\n"
-"\n"
-"var ret = this._cursor.next();\n"
-"if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )\n"
-"throw \"error: \" + tojson( ret );\n"
-"\n"
-"this._numReturned++;\n"
-"return ret;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.objsLeftInBatch = function(){\n"
-"this._exec();\n"
-"\n"
-"var ret = this._cursor.objsLeftInBatch();\n"
-"if ( ret.$err )\n"
-"throw \"error: \" + tojson( ret );\n"
-"\n"
-"return ret;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.toArray = function(){\n"
-"if ( this._arr )\n"
-"return this._arr;\n"
-"\n"
-"var a = [];\n"
-"while ( this.hasNext() )\n"
-"a.push( this.next() );\n"
-"this._arr = a;\n"
-"return a;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.count = function( applySkipLimit ){\n"
-"var cmd = { count: this._collection.getName() };\n"
-"if ( this._query ){\n"
-"if ( this._special )\n"
-"cmd.query = this._query.query;\n"
-"else\n"
-"cmd.query = this._query;\n"
-"}\n"
-"cmd.fields = this._fields || {};\n"
-"\n"
-"if ( applySkipLimit ){\n"
-"if ( this._limit )\n"
-"cmd.limit = this._limit;\n"
-"if ( this._skip )\n"
-"cmd.skip = this._skip;\n"
-"}\n"
-"\n"
-"var res = this._db.runCommand( cmd );\n"
-"if( res && res.n != null ) return res.n;\n"
-"throw \"count failed: \" + tojson( res );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.size = function(){\n"
-"return this.count( true );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.countReturn = function(){\n"
-"var c = this.count();\n"
-"\n"
-"if ( this._skip )\n"
-"c = c - this._skip;\n"
-"\n"
-"if ( this._limit > 0 && this._limit < c )\n"
-"return this._limit;\n"
-"\n"
-"return c;\n"
-"}\n"
-"\n"
-"/**\n"
-"* iterative count - only for testing\n"
-"*/\n"
-"DBQuery.prototype.itcount = function(){\n"
-"var num = 0;\n"
-"while ( this.hasNext() ){\n"
-"num++;\n"
-"this.next();\n"
-"}\n"
-"return num;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.length = function(){\n"
-"return this.toArray().length;\n"
-"}\n"
-"\n"
-"DBQuery.prototype._addSpecial = function( name , value ){\n"
-"this._ensureSpecial();\n"
-"this._query[name] = value;\n"
-"return this;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.sort = function( sortBy ){\n"
-"return this._addSpecial( \"orderby\" , sortBy );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.hint = function( hint ){\n"
-"return this._addSpecial( \"$hint\" , hint );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.min = function( min ) {\n"
-"return this._addSpecial( \"$min\" , min );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.max = function( max ) {\n"
-"return this._addSpecial( \"$max\" , max );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.showDiskLoc = function() {\n"
-"return this._addSpecial( \"$showDiskLoc\" , true);\n"
-"}\n"
-"\n"
-"DBQuery.prototype.forEach = function( func ){\n"
-"while ( this.hasNext() )\n"
-"func( this.next() );\n"
-"}\n"
-"\n"
-"DBQuery.prototype.map = function( func ){\n"
-"var a = [];\n"
-"while ( this.hasNext() )\n"
-"a.push( func( this.next() ) );\n"
-"return a;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.arrayAccess = function( idx ){\n"
-"return this.toArray()[idx];\n"
-"}\n"
-"\n"
-"DBQuery.prototype.explain = function (verbose) {\n"
-"/* verbose=true --> include allPlans, oldPlan fields */\n"
-"var n = this.clone();\n"
-"n._ensureSpecial();\n"
-"n._query.$explain = true;\n"
-"n._limit = Math.abs(n._limit) * -1;\n"
-"var e = n.next();\n"
-"\n"
-"function cleanup(obj){\n"
-"if (typeof(obj) != 'object'){\n"
-"return;\n"
-"}\n"
-"\n"
-"delete obj.allPlans;\n"
-"delete obj.oldPlan;\n"
-"\n"
-"if (typeof(obj.length) == 'number'){\n"
-"for (var i=0; i < obj.length; i++){\n"
-"cleanup(obj[i]);\n"
-"}\n"
-"}\n"
-"\n"
-"if (obj.shards){\n"
-"for (var key in obj.shards){\n"
-"cleanup(obj.shards[key]);\n"
-"}\n"
-"}\n"
-"\n"
-"if (obj.clauses){\n"
-"cleanup(obj.clauses);\n"
-"}\n"
-"}\n"
-"\n"
-"if (!verbose)\n"
-"cleanup(e);\n"
-"\n"
-"return e;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.snapshot = function(){\n"
-"this._ensureSpecial();\n"
-"this._query.$snapshot = true;\n"
-"return this;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.pretty = function(){\n"
-"this._prettyShell = true;\n"
-"return this;\n"
-"}\n"
-"\n"
-"DBQuery.prototype.shellPrint = function(){\n"
-"try {\n"
-"var n = 0;\n"
-"while ( this.hasNext() && n < DBQuery.shellBatchSize ){\n"
-"var s = this._prettyShell ? tojson( this.next() ) : tojson( this.next() , \"\" , true );\n"
-"print( s );\n"
-"n++;\n"
-"}\n"
-"if ( this.hasNext() ){\n"
-"print( \"has more\" );\n"
-"___it___ = this;\n"
-"}\n"
-"else {\n"
-"___it___ = null;\n"
-"}\n"
-"}\n"
-"catch ( e ){\n"
-"print( e );\n"
-"}\n"
-"\n"
-"}\n"
-"\n"
-"DBQuery.prototype.toString = function(){\n"
-"return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );\n"
-"}\n"
-"\n"
-"DBQuery.shellBatchSize = 20;\n"
-;
-extern const JSFile query;
-const JSFile query = { "shell/query.js" , _jscode_raw_query };
-const StringData _jscode_raw_collection =
-"// @file collection.js - DBCollection support in the mongo shell\n"
-"// db.colName is a DBCollection object\n"
-"// or db[\"colName\"]\n"
-"\n"
-"if ( ( typeof DBCollection ) == \"undefined\" ){\n"
-"DBCollection = function( mongo , db , shortName , fullName ){\n"
-"this._mongo = mongo;\n"
-"this._db = db;\n"
-"this._shortName = shortName;\n"
-"this._fullName = fullName;\n"
-"\n"
-"this.verify();\n"
-"}\n"
-"}\n"
-"\n"
-"DBCollection.prototype.verify = function(){\n"
-"assert( this._fullName , \"no fullName\" );\n"
-"assert( this._shortName , \"no shortName\" );\n"
-"assert( this._db , \"no db\" );\n"
-"\n"
-"assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
-"\n"
-"assert( this._mongo , \"no mongo in DBCollection\" );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getName = function(){\n"
-"return this._shortName;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.help = function () {\n"
-"var shortName = this.getName();\n"
-"print(\"DBCollection help\");\n"
-"print(\"\\tdb.\" + shortName + \".find().help() - show DBCursor help\");\n"
-"print(\"\\tdb.\" + shortName + \".count()\");\n"
-"print(\"\\tdb.\" + shortName + \".dataSize()\");\n"
-"print(\"\\tdb.\" + shortName + \".distinct( key ) - eg. db.\" + shortName + \".distinct( 'x' )\");\n"
-"print(\"\\tdb.\" + shortName + \".drop() drop the collection\");\n"
-"print(\"\\tdb.\" + shortName + \".dropIndex(name)\");\n"
-"print(\"\\tdb.\" + shortName + \".dropIndexes()\");\n"
-"print(\"\\tdb.\" + shortName + \".ensureIndex(keypattern[,options]) - options is an object with these possible fields: name, unique, dropDups\");\n"
-"print(\"\\tdb.\" + shortName + \".reIndex()\");\n"
-"print(\"\\tdb.\" + shortName + \".find([query],[fields]) - query is an optional query filter. fields is optional set of fields to return.\");\n"
-"print(\"\\t e.g. db.\" + shortName + \".find( {x:77} , {name:1, x:1} )\");\n"
-"print(\"\\tdb.\" + shortName + \".find(...).count()\");\n"
-"print(\"\\tdb.\" + shortName + \".find(...).limit(n)\");\n"
-"print(\"\\tdb.\" + shortName + \".find(...).skip(n)\");\n"
-"print(\"\\tdb.\" + shortName + \".find(...).sort(...)\");\n"
-"print(\"\\tdb.\" + shortName + \".findOne([query])\");\n"
-"print(\"\\tdb.\" + shortName + \".findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )\");\n"
-"print(\"\\tdb.\" + shortName + \".getDB() get DB object associated with collection\");\n"
-"print(\"\\tdb.\" + shortName + \".getIndexes()\");\n"
-"print(\"\\tdb.\" + shortName + \".group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
-"print(\"\\tdb.\" + shortName + \".mapReduce( mapFunction , reduceFunction , <optional params> )\");\n"
-"print(\"\\tdb.\" + shortName + \".remove(query)\");\n"
-"print(\"\\tdb.\" + shortName + \".renameCollection( newName , <dropTarget> ) renames the collection.\");\n"
-"print(\"\\tdb.\" + shortName + \".runCommand( name , <options> ) runs a db command with the given name where the first param is the collection name\");\n"
-"print(\"\\tdb.\" + shortName + \".save(obj)\");\n"
-"print(\"\\tdb.\" + shortName + \".stats()\");\n"
-"print(\"\\tdb.\" + shortName + \".storageSize() - includes free space allocated to this collection\");\n"
-"print(\"\\tdb.\" + shortName + \".totalIndexSize() - size in bytes of all the indexes\");\n"
-"print(\"\\tdb.\" + shortName + \".totalSize() - storage allocated for all data and indexes\");\n"
-"print(\"\\tdb.\" + shortName + \".update(query, object[, upsert_bool, multi_bool])\");\n"
-"print(\"\\tdb.\" + shortName + \".validate() - SLOW\");\n"
-"print(\"\\tdb.\" + shortName + \".getShardVersion() - only for use with sharding\");\n"
-"return __magicNoPrint;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getFullName = function(){\n"
-"return this._fullName;\n"
-"}\n"
-"DBCollection.prototype.getMongo = function(){\n"
-"return this._db.getMongo();\n"
-"}\n"
-"DBCollection.prototype.getDB = function(){\n"
-"return this._db;\n"
-"}\n"
-"\n"
-"DBCollection.prototype._dbCommand = function( cmd , params ){\n"
-"if ( typeof( cmd ) == \"object\" )\n"
-"return this._db._dbCommand( cmd );\n"
-"\n"
-"var c = {};\n"
-"c[cmd] = this.getName();\n"
-"if ( params )\n"
-"Object.extend( c , params );\n"
-"return this._db._dbCommand( c );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;\n"
-"\n"
-"DBCollection.prototype._massageObject = function( q ){\n"
-"if ( ! q )\n"
-"return {};\n"
-"\n"
-"var type = typeof q;\n"
-"\n"
-"if ( type == \"function\" )\n"
-"return { $where : q };\n"
-"\n"
-"if ( q.isObjectId )\n"
-"return { _id : q };\n"
-"\n"
-"if ( type == \"object\" )\n"
-"return q;\n"
-"\n"
-"if ( type == \"string\" ){\n"
-"if ( q.length == 24 )\n"
-"return { _id : q };\n"
-"\n"
-"return { $where : q };\n"
-"}\n"
-"\n"
-"throw \"don't know how to massage : \" + type;\n"
-"\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype._validateObject = function( o ){\n"
-"if ( o._ensureSpecial && o._checkModify )\n"
-"throw \"can't save a DBQuery object\";\n"
-"}\n"
-"\n"
-"DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
-"\n"
-"DBCollection.prototype._validateForStorage = function( o ){\n"
-"this._validateObject( o );\n"
-"for ( var k in o ){\n"
-"if ( k.indexOf( \".\" ) >= 0 ) {\n"
-"throw \"can't have . in field names [\" + k + \"]\" ;\n"
-"}\n"
-"\n"
-"if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
-"throw \"field names cannot start with $ [\" + k + \"]\";\n"
-"}\n"
-"\n"
-"if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
-"this._validateForStorage( o[k] );\n"
-"}\n"
-"}\n"
-"};\n"
-"\n"
-"\n"
-"DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
-"return new DBQuery( this._mongo , this._db , this ,\n"
-"this._fullName , this._massageObject( query ) , fields , limit , skip );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.findOne = function( query , fields ){\n"
-"var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );\n"
-"if ( ! cursor.hasNext() )\n"
-"return null;\n"
-"var ret = cursor.next();\n"
-"if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
-"if ( ret.$err )\n"
-"throw \"error \" + tojson( ret );\n"
-"return ret;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
-"if ( ! obj )\n"
-"throw \"no object passed to insert!\";\n"
-"if ( ! _allow_dot ) {\n"
-"this._validateForStorage( obj );\n"
-"}\n"
-"if ( typeof( obj._id ) == \"undefined\" ){\n"
-"var tmp = obj; // don't want to modify input\n"
-"obj = {_id: new ObjectId()};\n"
-"for (var key in tmp){\n"
-"obj[key] = tmp[key];\n"
-"}\n"
-"}\n"
-"this._mongo.insert( this._fullName , obj );\n"
-"this._lastID = obj._id;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.remove = function( t , justOne ){\n"
-"this._mongo.remove( this._fullName , this._massageObject( t ) , justOne ? true : false );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
-"assert( query , \"need a query\" );\n"
-"assert( obj , \"need an object\" );\n"
-"\n"
-"var firstKey = null;\n"
-"for (var k in obj) { firstKey = k; break; }\n"
-"\n"
-"if (firstKey != null && firstKey[0] == '$') {\n"
-"// for mods we only validate partially, for example keys may have dots\n"
-"this._validateObject( obj );\n"
-"} else {\n"
-"// we're basically inserting a brand new object, do full validation\n"
-"this._validateForStorage( obj );\n"
-"}\n"
-"this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.save = function( obj ){\n"
-"if ( obj == null || typeof( obj ) == \"undefined\" )\n"
-"throw \"can't save a null\";\n"
-"\n"
-"if ( typeof( obj._id ) == \"undefined\" ){\n"
-"obj._id = new ObjectId();\n"
-"return this.insert( obj );\n"
-"}\n"
-"else {\n"
-"return this.update( { _id : obj._id } , obj , true );\n"
-"}\n"
-"}\n"
-"\n"
-"DBCollection.prototype._genIndexName = function( keys ){\n"
-"var name = \"\";\n"
-"for ( var k in keys ){\n"
-"var v = keys[k];\n"
-"if ( typeof v == \"function\" )\n"
-"continue;\n"
-"\n"
-"if ( name.length > 0 )\n"
-"name += \"_\";\n"
-"name += k + \"_\";\n"
-"\n"
-"if ( typeof v == \"number\" )\n"
-"name += v;\n"
-"}\n"
-"return name;\n"
-"}\n"
-"\n"
-"DBCollection.prototype._indexSpec = function( keys, options ) {\n"
-"var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
-"\n"
-"if ( ! options ){\n"
-"}\n"
-"else if ( typeof ( options ) == \"string\" )\n"
-"ret.name = options;\n"
-"else if ( typeof ( options ) == \"boolean\" )\n"
-"ret.unique = true;\n"
-"else if ( typeof ( options ) == \"object\" ){\n"
-"if ( options.length ){\n"
-"var nb = 0;\n"
-"for ( var i=0; i<options.length; i++ ){\n"
-"if ( typeof ( options[i] ) == \"string\" )\n"
-"ret.name = options[i];\n"
-"else if ( typeof( options[i] ) == \"boolean\" ){\n"
-"if ( options[i] ){\n"
-"if ( nb == 0 )\n"
-"ret.unique = true;\n"
-"if ( nb == 1 )\n"
-"ret.dropDups = true;\n"
-"}\n"
-"nb++;\n"
-"}\n"
-"}\n"
-"}\n"
-"else {\n"
-"Object.extend( ret , options );\n"
-"}\n"
-"}\n"
-"else {\n"
-"throw \"can't handle: \" + typeof( options );\n"
-"}\n"
-"/*\n"
-"return ret;\n"
-"\n"
-"var name;\n"
-"var nTrue = 0;\n"
-"\n"
-"if ( ! isObject( options ) ) {\n"
-"options = [ options ];\n"
-"}\n"
-"\n"
-"if ( options.length ){\n"
-"for( var i = 0; i < options.length; ++i ) {\n"
-"var o = options[ i ];\n"
-"if ( isString( o ) ) {\n"
-"ret.name = o;\n"
-"} else if ( typeof( o ) == \"boolean\" ) {\n"
-"if ( o ) {\n"
-"++nTrue;\n"
-"}\n"
-"}\n"
-"}\n"
-"if ( nTrue > 0 ) {\n"
-"ret.unique = true;\n"
-"}\n"
-"if ( nTrue > 1 ) {\n"
-"ret.dropDups = true;\n"
-"}\n"
-"}\n"
-"*/\n"
-"return ret;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.createIndex = function( keys , options ){\n"
-"var o = this._indexSpec( keys, options );\n"
-"this._db.getCollection( \"system.indexes\" ).insert( o , true );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.ensureIndex = function( keys , options ){\n"
-"var name = this._indexSpec( keys, options ).name;\n"
-"this._indexCache = this._indexCache || {};\n"
-"if ( this._indexCache[ name ] ){\n"
-"return;\n"
-"}\n"
-"\n"
-"this.createIndex( keys , options );\n"
-"if ( this.getDB().getLastError() == \"\" ) {\n"
-"this._indexCache[name] = true;\n"
-"}\n"
-"}\n"
-"\n"
-"DBCollection.prototype.resetIndexCache = function(){\n"
-"this._indexCache = {};\n"
-"}\n"
-"\n"
-"DBCollection.prototype.reIndex = function() {\n"
-"return this._db.runCommand({ reIndex: this.getName() });\n"
-"}\n"
-"\n"
-"DBCollection.prototype.dropIndexes = function(){\n"
-"this.resetIndexCache();\n"
-"\n"
-"var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
-"assert( res , \"no result from dropIndex result\" );\n"
-"if ( res.ok )\n"
-"return res;\n"
-"\n"
-"if ( res.errmsg.match( /not found/ ) )\n"
-"return res;\n"
-"\n"
-"throw \"error dropping indexes : \" + tojson( res );\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype.drop = function(){\n"
-"if ( arguments.length > 0 )\n"
-"throw \"drop takes no argument\";\n"
-"this.resetIndexCache();\n"
-"var ret = this._db.runCommand( { drop: this.getName() } );\n"
-"if ( ! ret.ok ){\n"
-"if ( ret.errmsg == \"ns not found\" )\n"
-"return false;\n"
-"throw \"drop failed: \" + tojson( ret );\n"
-"}\n"
-"return true;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.findAndModify = function(args){\n"
-"var cmd = { findandmodify: this.getName() };\n"
-"for (var key in args){\n"
-"cmd[key] = args[key];\n"
-"}\n"
-"\n"
-"var ret = this._db.runCommand( cmd );\n"
-"if ( ! ret.ok ){\n"
-"if (ret.errmsg == \"No matching object found\"){\n"
-"return null;\n"
-"}\n"
-"throw \"findAndModifyFailed failed: \" + tojson( ret.errmsg );\n"
-"}\n"
-"return ret.value;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.renameCollection = function( newName , dropTarget ){\n"
-"return this._db._adminCommand( { renameCollection : this._fullName ,\n"
-"to : this._db._name + \".\" + newName ,\n"
-"dropTarget : dropTarget } )\n"
-"}\n"
-"\n"
-"DBCollection.prototype.validate = function() {\n"
-"var res = this._db.runCommand( { validate: this.getName() } );\n"
-"\n"
-"res.valid = false;\n"
-"\n"
-"var raw = res.result || res.raw;\n"
-"\n"
-"if ( raw ){\n"
-"var str = \"-\" + tojson( raw );\n"
-"res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
-"\n"
-"var p = /lastExtentSize:(\\d+)/;\n"
-"var r = p.exec( str );\n"
-"if ( r ){\n"
-"res.lastExtentSize = Number( r[1] );\n"
-"}\n"
-"}\n"
-"\n"
-"return res;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getShardVersion = function(){\n"
-"return this._db._adminCommand( { getShardVersion : this._fullName } );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getIndexes = function(){\n"
-"return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
-"DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
-"\n"
-"DBCollection.prototype.getIndexKeys = function(){\n"
-"return this.getIndexes().map(\n"
-"function(i){\n"
-"return i.key;\n"
-"}\n"
-");\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype.count = function( x ){\n"
-"return this.find( x ).count();\n"
-"}\n"
-"\n"
-"/**\n"
-"* Drop free lists. Normally not used.\n"
-"* Note this only does the collection itself, not the namespaces of its indexes (see cleanAll).\n"
-"*/\n"
-"DBCollection.prototype.clean = function() {\n"
-"return this._dbCommand( { clean: this.getName() } );\n"
-"}\n"
-"\n"
-"\n"
-"\n"
-"/**\n"
-"* <p>Drop a specified index.</p>\n"
-"*\n"
-"* <p>\n"
-"* Name is the name of the index in the system.indexes name field. (Run db.system.indexes.find() to\n"
-"* see example data.)\n"
-"* </p>\n"
-"*\n"
-"* <p>Note : alpha: space is not reclaimed </p>\n"
-"* @param {String} name of index to delete.\n"
-"* @return A result object. result.ok will be true if successful.\n"
-"*/\n"
-"DBCollection.prototype.dropIndex = function(index) {\n"
-"assert(index , \"need to specify index to dropIndex\" );\n"
-"\n"
-"if ( ! isString( index ) && isObject( index ) )\n"
-"index = this._genIndexName( index );\n"
-"\n"
-"var res = this._dbCommand( \"deleteIndexes\" ,{ index: index } );\n"
-"this.resetIndexCache();\n"
-"return res;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.copyTo = function( newName ){\n"
-"return this.getDB().eval(\n"
-"function( collName , newName ){\n"
-"var from = db[collName];\n"
-"var to = db[newName];\n"
-"to.ensureIndex( { _id : 1 } );\n"
-"var count = 0;\n"
-"\n"
-"var cursor = from.find();\n"
-"while ( cursor.hasNext() ){\n"
-"var o = cursor.next();\n"
-"count++;\n"
-"to.save( o );\n"
-"}\n"
-"\n"
-"return count;\n"
-"} , this.getName() , newName\n"
-");\n"
-"}\n"
-"\n"
-"DBCollection.prototype.getCollection = function( subName ){\n"
-"return this._db.getCollection( this._shortName + \".\" + subName );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.stats = function( scale ){\n"
-"return this._db.runCommand( { collstats : this._shortName , scale : scale } );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.dataSize = function(){\n"
-"return this.stats().size;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.storageSize = function(){\n"
-"return this.stats().storageSize;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.totalIndexSize = function( verbose ){\n"
-"var stats = this.stats();\n"
-"if (verbose){\n"
-"for (var ns in stats.indexSizes){\n"
-"print( ns + \"\\t\" + stats.indexSizes[ns] );\n"
-"}\n"
-"}\n"
-"return stats.totalIndexSize;\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype.totalSize = function(){\n"
-"var total = this.storageSize();\n"
-"var mydb = this._db;\n"
-"var shortName = this._shortName;\n"
-"this.getIndexes().forEach(\n"
-"function( spec ){\n"
-"var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
-"var mysize = coll.storageSize();\n"
-"//print( coll + \"\\t\" + mysize + \"\\t\" + tojson( coll.validate() ) );\n"
-"total += coll.dataSize();\n"
-"}\n"
-");\n"
-"return total;\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype.convertToCapped = function( bytes ){\n"
-"if ( ! bytes )\n"
-"throw \"have to specify # of bytes\";\n"
-"return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )\n"
-"}\n"
-"\n"
-"DBCollection.prototype.exists = function(){\n"
-"return this._db.system.namespaces.findOne( { name : this._fullName } );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.isCapped = function(){\n"
-"var e = this.exists();\n"
-"return ( e && e.options && e.options.capped ) ? true : false;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.distinct = function( keyString , query ){\n"
-"var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
-"if ( ! res.ok )\n"
-"throw \"distinct failed: \" + tojson( res );\n"
-"return res.values;\n"
-"}\n"
-"\n"
-"DBCollection.prototype.group = function( params ){\n"
-"params.ns = this._shortName;\n"
-"return this._db.group( params );\n"
-"}\n"
-"\n"
-"DBCollection.prototype.groupcmd = function( params ){\n"
-"params.ns = this._shortName;\n"
-"return this._db.groupcmd( params );\n"
-"}\n"
-"\n"
-"MapReduceResult = function( db , o ){\n"
-"Object.extend( this , o );\n"
-"this._o = o;\n"
-"this._keys = Object.keySet( o );\n"
-"this._db = db;\n"
-"this._coll = this._db.getCollection( this.result );\n"
-"}\n"
-"\n"
-"MapReduceResult.prototype._simpleKeys = function(){\n"
-"return this._o;\n"
-"}\n"
-"\n"
-"MapReduceResult.prototype.find = function(){\n"
-"if ( this.results )\n"
-"return this.results;\n"
-"return DBCollection.prototype.find.apply( this._coll , arguments );\n"
-"}\n"
-"\n"
-"MapReduceResult.prototype.drop = function(){\n"
-"return this._coll.drop();\n"
-"}\n"
-"\n"
-"/**\n"
-"* just for debugging really\n"
-"*/\n"
-"MapReduceResult.prototype.convertToSingleObject = function(){\n"
-"var z = {};\n"
-"this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
-"return z;\n"
-"}\n"
-"\n"
-"/**\n"
-"* @param optional object of optional fields;\n"
-"*/\n"
-"DBCollection.prototype.mapReduce = function( map , reduce , optionsOrOutString ){\n"
-"var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
-"assert( optionsOrOutString , \"need to an optionsOrOutString\" )\n"
-"\n"
-"if ( typeof( optionsOrOutString ) == \"string\" )\n"
-"c[\"out\"] = optionsOrOutString;\n"
-"else\n"
-"Object.extend( c , optionsOrOutString );\n"
-"\n"
-"var raw = this._db.runCommand( c );\n"
-"if ( ! raw.ok )\n"
-"throw \"map reduce failed: \" + tojson( raw );\n"
-"return new MapReduceResult( this._db , raw );\n"
-"\n"
-"}\n"
-"\n"
-"DBCollection.prototype.toString = function(){\n"
-"return this.getFullName();\n"
-"}\n"
-"\n"
-"DBCollection.prototype.toString = function(){\n"
-"return this.getFullName();\n"
-"}\n"
-"\n"
-"\n"
-"DBCollection.prototype.tojson = DBCollection.prototype.toString;\n"
-"\n"
-"DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
-"\n"
-"DBCollection.autocomplete = function(obj){\n"
-"var colls = DB.autocomplete(obj.getDB());\n"
-"var ret = [];\n"
-"for (var i=0; i<colls.length; i++){\n"
-"var c = colls[i];\n"
-"if (c.length <= obj.getName().length) continue;\n"
-"if (c.slice(0,obj.getName().length+1) != obj.getName()+'.') continue;\n"
-"\n"
-"ret.push(c.slice(obj.getName().length+1));\n"
-"}\n"
-"return ret;\n"
-"}\n"
-;
-extern const JSFile collection;
-const JSFile collection = { "shell/collection.js" , _jscode_raw_collection };
-} // namespace JSFiles
+ struct JSFile { const char* name; const StringData& source; };
+ namespace JSFiles {
+ const StringData _jscode_raw_utils =
+ "__quiet = false;\n"
+ "__magicNoPrint = { __magicNoPrint : 1111 }\n"
+ "\n"
+ "chatty = function(s){\n"
+ "if ( ! __quiet )\n"
+ "print( s );\n"
+ "}\n"
+ "\n"
+ "friendlyEqual = function( a , b ){\n"
+ "if ( a == b )\n"
+ "return true;\n"
+ "\n"
+ "if ( tojson( a ) == tojson( b ) )\n"
+ "return true;\n"
+ "\n"
+ "return false;\n"
+ "}\n"
+ "\n"
+ "printStackTrace = function(){\n"
+ "try{\n"
+ "throw new Error(\"Printing Stack Trace\");\n"
+ "} catch (e) {\n"
+ "print(e.stack);\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "doassert = function (msg) {\n"
+ "if (msg.indexOf(\"assert\") == 0)\n"
+ "print(msg);\n"
+ "else\n"
+ "print(\"assert: \" + msg);\n"
+ "printStackTrace();\n"
+ "throw msg;\n"
+ "}\n"
+ "\n"
+ "assert = function( b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( b )\n"
+ "return;\n"
+ "\n"
+ "doassert( msg == undefined ? \"assert failed\" : \"assert failed : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.automsg = function( b ) {\n"
+ "assert( eval( b ), b );\n"
+ "}\n"
+ "\n"
+ "assert._debug = false;\n"
+ "\n"
+ "assert.eq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( a == b )\n"
+ "return;\n"
+ "\n"
+ "if ( ( a != null && b != null ) && friendlyEqual( a , b ) )\n"
+ "return;\n"
+ "\n"
+ "doassert( \"[\" + tojson( a ) + \"] != [\" + tojson( b ) + \"] are not equal : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.eq.automsg = function( a, b ) {\n"
+ "assert.eq( eval( a ), eval( b ), \"[\" + a + \"] != [\" + b + \"]\" );\n"
+ "}\n"
+ "\n"
+ "assert.neq = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "if ( a != b )\n"
+ "return;\n"
+ "\n"
+ "doassert( \"[\" + a + \"] != [\" + b + \"] are equal : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.repeat = function( f, msg, timeout, interval ) {\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "var start = new Date();\n"
+ "timeout = timeout || 30000;\n"
+ "interval = interval || 200;\n"
+ "var last;\n"
+ "while( 1 ) {\n"
+ "\n"
+ "if ( typeof( f ) == \"string\" ){\n"
+ "if ( eval( f ) )\n"
+ "return;\n"
+ "}\n"
+ "else {\n"
+ "if ( f() )\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
+ "break;\n"
+ "sleep( interval );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "assert.soon = function( f, msg, timeout /*ms*/, interval ) {\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "var start = new Date();\n"
+ "timeout = timeout || 30000;\n"
+ "interval = interval || 200;\n"
+ "var last;\n"
+ "while( 1 ) {\n"
+ "\n"
+ "if ( typeof( f ) == \"string\" ){\n"
+ "if ( eval( f ) )\n"
+ "return;\n"
+ "}\n"
+ "else {\n"
+ "if ( f() )\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "if ( ( new Date() ).getTime() - start.getTime() > timeout )\n"
+ "doassert( \"assert.soon failed: \" + f + \", msg:\" + msg );\n"
+ "sleep( interval );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "assert.throws = function( func , params , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "try {\n"
+ "func.apply( null , params );\n"
+ "}\n"
+ "catch ( e ){\n"
+ "return e;\n"
+ "}\n"
+ "\n"
+ "doassert( \"did not throw exception: \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.throws.automsg = function( func, params ) {\n"
+ "assert.throws( func, params, func.toString() );\n"
+ "}\n"
+ "\n"
+ "assert.commandWorked = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( res.ok == 1 )\n"
+ "return;\n"
+ "\n"
+ "doassert( \"command failed: \" + tojson( res ) + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.commandFailed = function( res , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( res.ok == 0 )\n"
+ "return;\n"
+ "\n"
+ "doassert( \"command worked when it should have failed: \" + tojson( res ) + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.isnull = function( what , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( what == null )\n"
+ "return;\n"
+ "\n"
+ "doassert( \"supposed to null (\" + ( msg || \"\" ) + \") was: \" + tojson( what ) );\n"
+ "}\n"
+ "\n"
+ "assert.lt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( a < b )\n"
+ "return;\n"
+ "doassert( a + \" is not less than \" + b + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.gt = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( a > b )\n"
+ "return;\n"
+ "doassert( a + \" is not greater than \" + b + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.lte = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( a <= b )\n"
+ "return;\n"
+ "doassert( a + \" is not less than or eq \" + b + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "assert.gte = function( a , b , msg ){\n"
+ "if ( assert._debug && msg ) print( \"in assert for: \" + msg );\n"
+ "\n"
+ "if ( a >= b )\n"
+ "return;\n"
+ "doassert( a + \" is not greater than or eq \" + b + \" : \" + msg );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "assert.close = function( a , b , msg , places ){\n"
+ "if (places === undefined) {\n"
+ "places = 4;\n"
+ "}\n"
+ "if (Math.round((a - b) * Math.pow(10, places)) === 0) {\n"
+ "return;\n"
+ "}\n"
+ "doassert( a + \" is not equal to \" + b + \" within \" + places +\n"
+ "\" places, diff: \" + (a-b) + \" : \" + msg );\n"
+ "};\n"
+ "\n"
+ "Object.extend = function( dst , src , deep ){\n"
+ "for ( var k in src ){\n"
+ "var v = src[k];\n"
+ "if ( deep && typeof(v) == \"object\" ){\n"
+ "if ( \"floatApprox\" in v ) { // convert NumberLong properly\n"
+ "eval( \"v = \" + tojson( v ) );\n"
+ "} else {\n"
+ "v = Object.extend( typeof ( v.length ) == \"number\" ? [] : {} , v , true );\n"
+ "}\n"
+ "}\n"
+ "dst[k] = v;\n"
+ "}\n"
+ "return dst;\n"
+ "}\n"
+ "\n"
+ "argumentsToArray = function( a ){\n"
+ "var arr = [];\n"
+ "for ( var i=0; i<a.length; i++ )\n"
+ "arr[i] = a[i];\n"
+ "return arr;\n"
+ "}\n"
+ "\n"
+ "isString = function( x ){\n"
+ "return typeof( x ) == \"string\";\n"
+ "}\n"
+ "\n"
+ "isNumber = function(x){\n"
+ "return typeof( x ) == \"number\";\n"
+ "}\n"
+ "\n"
+ "isObject = function( x ){\n"
+ "return typeof( x ) == \"object\";\n"
+ "}\n"
+ "\n"
+ "String.prototype.trim = function() {\n"
+ "return this.replace(/^\\s+|\\s+$/g,\"\");\n"
+ "}\n"
+ "String.prototype.ltrim = function() {\n"
+ "return this.replace(/^\\s+/,\"\");\n"
+ "}\n"
+ "String.prototype.rtrim = function() {\n"
+ "return this.replace(/\\s+$/,\"\");\n"
+ "}\n"
+ "\n"
+ "Number.prototype.zeroPad = function(width) {\n"
+ "var str = this + '';\n"
+ "while (str.length < width)\n"
+ "str = '0' + str;\n"
+ "return str;\n"
+ "}\n"
+ "\n"
+ "Date.timeFunc = function( theFunc , numTimes ){\n"
+ "\n"
+ "var start = new Date();\n"
+ "\n"
+ "numTimes = numTimes || 1;\n"
+ "for ( var i=0; i<numTimes; i++ ){\n"
+ "theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );\n"
+ "}\n"
+ "\n"
+ "return (new Date()).getTime() - start.getTime();\n"
+ "}\n"
+ "\n"
+ "Date.prototype.tojson = function(){\n"
+ "\n"
+ "var UTC = Date.printAsUTC ? 'UTC' : '';\n"
+ "\n"
+ "var year = this['get'+UTC+'FullYear']().zeroPad(4);\n"
+ "var month = (this['get'+UTC+'Month']() + 1).zeroPad(2);\n"
+ "var date = this['get'+UTC+'Date']().zeroPad(2);\n"
+ "var hour = this['get'+UTC+'Hours']().zeroPad(2);\n"
+ "var minute = this['get'+UTC+'Minutes']().zeroPad(2);\n"
+ "var sec = this['get'+UTC+'Seconds']().zeroPad(2)\n"
+ "\n"
+ "if (this['get'+UTC+'Milliseconds']())\n"
+ "sec += '.' + this['get'+UTC+'Milliseconds']().zeroPad(3)\n"
+ "\n"
+ "var ofs = 'Z';\n"
+ "if (!Date.printAsUTC){\n"
+ "var ofsmin = this.getTimezoneOffset();\n"
+ "if (ofsmin != 0){\n"
+ "ofs = ofsmin > 0 ? '-' : '+'; // This is correct\n"
+ "ofs += (ofsmin/60).zeroPad(2)\n"
+ "ofs += (ofsmin%60).zeroPad(2)\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "return 'ISODate(\"'+year+'-'+month+'-'+date+'T'+hour+':'+minute+':'+sec+ofs+'\")';\n"
+ "}\n"
+ "\n"
+ "Date.printAsUTC = true;\n"
+ "\n"
+ "\n"
+ "ISODate = function(isoDateStr){\n"
+ "if (!isoDateStr)\n"
+ "return new Date();\n"
+ "\n"
+ "var isoDateRegex = /(\\d{4})-?(\\d{2})-?(\\d{2})([T ](\\d{2})(:?(\\d{2})(:?(\\d{2}(\\.\\d+)?))?)?(Z|([+-])(\\d{2}):?(\\d{2})?)?)?/;\n"
+ "var res = isoDateRegex.exec(isoDateStr);\n"
+ "\n"
+ "if (!res)\n"
+ "throw \"invalid ISO date\";\n"
+ "\n"
+ "var year = parseInt(res[1],10) || 1970; // this should always be present\n"
+ "var month = (parseInt(res[2],10) || 1) - 1;\n"
+ "var date = parseInt(res[3],10) || 0;\n"
+ "var hour = parseInt(res[5],10) || 0;\n"
+ "var min = parseInt(res[7],10) || 0;\n"
+ "var sec = parseFloat(res[9]) || 0;\n"
+ "var ms = Math.round((sec%1) * 1000)\n"
+ "sec -= ms/1000\n"
+ "\n"
+ "var time = Date.UTC(year, month, date, hour, min, sec, ms);\n"
+ "\n"
+ "if (res[11] && res[11] != 'Z'){\n"
+ "var ofs = 0;\n"
+ "ofs += (parseInt(res[13],10) || 0) * 60*60*1000; // hours\n"
+ "ofs += (parseInt(res[14],10) || 0) * 60*1000; // mins\n"
+ "if (res[12] == '+') // if ahead subtract\n"
+ "ofs *= -1;\n"
+ "\n"
+ "time += ofs\n"
+ "}\n"
+ "\n"
+ "return new Date(time);\n"
+ "}\n"
+ "\n"
+ "RegExp.prototype.tojson = RegExp.prototype.toString;\n"
+ "\n"
+ "Array.contains = function( a , x ){\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( a[i] == x )\n"
+ "return true;\n"
+ "}\n"
+ "return false;\n"
+ "}\n"
+ "\n"
+ "Array.unique = function( a ){\n"
+ "var u = [];\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "var o = a[i];\n"
+ "if ( ! Array.contains( u , o ) ){\n"
+ "u.push( o );\n"
+ "}\n"
+ "}\n"
+ "return u;\n"
+ "}\n"
+ "\n"
+ "Array.shuffle = function( arr ){\n"
+ "for ( var i=0; i<arr.length-1; i++ ){\n"
+ "var pos = i+Random.randInt(arr.length-i);\n"
+ "var save = arr[i];\n"
+ "arr[i] = arr[pos];\n"
+ "arr[pos] = save;\n"
+ "}\n"
+ "return arr;\n"
+ "}\n"
+ "\n"
+ "\n"
+ "Array.tojson = function( a , indent ){\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "\n"
+ "if (a.length == 0) {\n"
+ "return \"[ ]\";\n"
+ "}\n"
+ "\n"
+ "var s = \"[\\n\";\n"
+ "indent += \"\\t\";\n"
+ "for ( var i=0; i<a.length; i++){\n"
+ "s += indent + tojson( a[i], indent );\n"
+ "if ( i < a.length - 1 ){\n"
+ "s += \",\\n\";\n"
+ "}\n"
+ "}\n"
+ "if ( a.length == 0 ) {\n"
+ "s += indent;\n"
+ "}\n"
+ "\n"
+ "indent = indent.substring(1);\n"
+ "s += \"\\n\"+indent+\"]\";\n"
+ "return s;\n"
+ "}\n"
+ "\n"
+ "Array.fetchRefs = function( arr , coll ){\n"
+ "var n = [];\n"
+ "for ( var i=0; i<arr.length; i ++){\n"
+ "var z = arr[i];\n"
+ "if ( coll && coll != z.getCollection() )\n"
+ "continue;\n"
+ "n.push( z.fetch() );\n"
+ "}\n"
+ "\n"
+ "return n;\n"
+ "}\n"
+ "\n"
+ "Array.sum = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
+ "return null;\n"
+ "var s = arr[0];\n"
+ "for ( var i=1; i<arr.length; i++ )\n"
+ "s += arr[i];\n"
+ "return s;\n"
+ "}\n"
+ "\n"
+ "Array.avg = function( arr ){\n"
+ "if ( arr.length == 0 )\n"
+ "return null;\n"
+ "return Array.sum( arr ) / arr.length;\n"
+ "}\n"
+ "\n"
+ "Array.stdDev = function( arr ){\n"
+ "var avg = Array.avg( arr );\n"
+ "var sum = 0;\n"
+ "\n"
+ "for ( var i=0; i<arr.length; i++ ){\n"
+ "sum += Math.pow( arr[i] - avg , 2 );\n"
+ "}\n"
+ "\n"
+ "return Math.sqrt( sum / arr.length );\n"
+ "}\n"
+ "\n"
+ "//these two are helpers for Array.sort(func)\n"
+ "compare = function(l, r){ return (l == r ? 0 : (l < r ? -1 : 1)); }\n"
+ "\n"
+ "// arr.sort(compareOn('name'))\n"
+ "compareOn = function(field){\n"
+ "return function(l, r) { return compare(l[field], r[field]); }\n"
+ "}\n"
+ "\n"
+ "Object.keySet = function( o ) {\n"
+ "var ret = new Array();\n"
+ "for( i in o ) {\n"
+ "if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {\n"
+ "ret.push( i );\n"
+ "}\n"
+ "}\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "if ( ! NumberLong.prototype ) {\n"
+ "NumberLong.prototype = {}\n"
+ "}\n"
+ "\n"
+ "NumberLong.prototype.tojson = function() {\n"
+ "return this.toString();\n"
+ "}\n"
+ "\n"
+ "if ( ! ObjectId.prototype )\n"
+ "ObjectId.prototype = {}\n"
+ "\n"
+ "ObjectId.prototype.toString = function(){\n"
+ "return this.str;\n"
+ "}\n"
+ "\n"
+ "ObjectId.prototype.tojson = function(){\n"
+ "return \"ObjectId(\\\"\" + this.str + \"\\\")\";\n"
+ "}\n"
+ "\n"
+ "ObjectId.prototype.isObjectId = true;\n"
+ "\n"
+ "ObjectId.prototype.getTimestamp = function(){\n"
+ "return new Date(parseInt(this.toString().slice(0,8), 16)*1000);\n"
+ "}\n"
+ "\n"
+ "ObjectId.prototype.equals = function( other){\n"
+ "return this.str == other.str;\n"
+ "}\n"
+ "\n"
+ "if ( typeof( DBPointer ) != \"undefined\" ){\n"
+ "DBPointer.prototype.fetch = function(){\n"
+ "assert( this.ns , \"need a ns\" );\n"
+ "assert( this.id , \"need an id\" );\n"
+ "\n"
+ "return db[ this.ns ].findOne( { _id : this.id } );\n"
+ "}\n"
+ "\n"
+ "DBPointer.prototype.tojson = function(indent){\n"
+ "return tojson({\"ns\" : this.ns, \"id\" : this.id}, indent);\n"
+ "}\n"
+ "\n"
+ "DBPointer.prototype.getCollection = function(){\n"
+ "return this.ns;\n"
+ "}\n"
+ "\n"
+ "DBPointer.prototype.toString = function(){\n"
+ "return \"DBPointer \" + this.ns + \":\" + this.id;\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "print( \"warning: no DBPointer\" );\n"
+ "}\n"
+ "\n"
+ "if ( typeof( DBRef ) != \"undefined\" ){\n"
+ "DBRef.prototype.fetch = function(){\n"
+ "assert( this.$ref , \"need a ns\" );\n"
+ "assert( this.$id , \"need an id\" );\n"
+ "\n"
+ "return db[ this.$ref ].findOne( { _id : this.$id } );\n"
+ "}\n"
+ "\n"
+ "DBRef.prototype.tojson = function(indent){\n"
+ "return tojson({\"$ref\" : this.$ref, \"$id\" : this.$id}, indent);\n"
+ "}\n"
+ "\n"
+ "DBRef.prototype.getCollection = function(){\n"
+ "return this.$ref;\n"
+ "}\n"
+ "\n"
+ "DBRef.prototype.toString = function(){\n"
+ "return this.tojson();\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "print( \"warning: no DBRef\" );\n"
+ "}\n"
+ "\n"
+ "if ( typeof( BinData ) != \"undefined\" ){\n"
+ "BinData.prototype.tojson = function () {\n"
+ "//return \"BinData type: \" + this.type + \" len: \" + this.len;\n"
+ "return this.toString();\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "print( \"warning: no BinData class\" );\n"
+ "}\n"
+ "\n"
+ "if ( typeof( UUID ) != \"undefined\" ){\n"
+ "UUID.prototype.tojson = function () {\n"
+ "return this.toString();\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "if ( typeof _threadInject != \"undefined\" ){\n"
+ "print( \"fork() available!\" );\n"
+ "\n"
+ "Thread = function(){\n"
+ "this.init.apply( this, arguments );\n"
+ "}\n"
+ "_threadInject( Thread.prototype );\n"
+ "\n"
+ "ScopedThread = function() {\n"
+ "this.init.apply( this, arguments );\n"
+ "}\n"
+ "ScopedThread.prototype = new Thread( function() {} );\n"
+ "_scopedThreadInject( ScopedThread.prototype );\n"
+ "\n"
+ "fork = function() {\n"
+ "var t = new Thread( function() {} );\n"
+ "Thread.apply( t, arguments );\n"
+ "return t;\n"
+ "}\n"
+ "\n"
+ "// Helper class to generate a list of events which may be executed by a ParallelTester\n"
+ "EventGenerator = function( me, collectionName, mean ) {\n"
+ "this.mean = mean;\n"
+ "this.events = new Array( me, collectionName );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype._add = function( action ) {\n"
+ "this.events.push( [ Random.genExp( this.mean ), action ] );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype.addInsert = function( obj ) {\n"
+ "this._add( \"t.insert( \" + tojson( obj ) + \" )\" );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype.addRemove = function( obj ) {\n"
+ "this._add( \"t.remove( \" + tojson( obj ) + \" )\" );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype.addUpdate = function( objOld, objNew ) {\n"
+ "this._add( \"t.update( \" + tojson( objOld ) + \", \" + tojson( objNew ) + \" )\" );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {\n"
+ "query = query || {};\n"
+ "shouldPrint = shouldPrint || false;\n"
+ "checkQuery = checkQuery || false;\n"
+ "var action = \"assert.eq( \" + count + \", t.count( \" + tojson( query ) + \" ) );\"\n"
+ "if ( checkQuery ) {\n"
+ "action += \" assert.eq( \" + count + \", t.find( \" + tojson( query ) + \" ).toArray().length );\"\n"
+ "}\n"
+ "if ( shouldPrint ) {\n"
+ "action += \" print( me + ' ' + \" + count + \" );\";\n"
+ "}\n"
+ "this._add( action );\n"
+ "}\n"
+ "\n"
+ "EventGenerator.prototype.getEvents = function() {\n"
+ "return this.events;\n"
+ "}\n"
+ "\n"
+ "EventGenerator.dispatch = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var me = args.shift();\n"
+ "var collectionName = args.shift();\n"
+ "var m = new Mongo( db.getMongo().host );\n"
+ "var t = m.getDB( \"test\" )[ collectionName ];\n"
+ "for( var i in args ) {\n"
+ "sleep( args[ i ][ 0 ] );\n"
+ "eval( args[ i ][ 1 ] );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "// Helper class for running tests in parallel. It assembles a set of tests\n"
+ "// and then calls assert.parallelests to run them.\n"
+ "ParallelTester = function() {\n"
+ "this.params = new Array();\n"
+ "}\n"
+ "\n"
+ "ParallelTester.prototype.add = function( fun, args ) {\n"
+ "args = args || [];\n"
+ "args.unshift( fun );\n"
+ "this.params.push( args );\n"
+ "}\n"
+ "\n"
+ "ParallelTester.prototype.run = function( msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "assert.parallelTests( this.params, msg, newScopes );\n"
+ "}\n"
+ "\n"
+ "// creates lists of tests from jstests dir in a format suitable for use by\n"
+ "// ParallelTester.fileTester. The lists will be in random order.\n"
+ "// n: number of lists to split these tests into\n"
+ "ParallelTester.createJstestsLists = function( n ) {\n"
+ "var params = new Array();\n"
+ "for( var i = 0; i < n; ++i ) {\n"
+ "params.push( [] );\n"
+ "}\n"
+ "\n"
+ "var makeKeys = function( a ) {\n"
+ "var ret = {};\n"
+ "for( var i in a ) {\n"
+ "ret[ a[ i ] ] = 1;\n"
+ "}\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "// some tests can't run in parallel with most others\n"
+ "var skipTests = makeKeys( [ \"jstests/dbadmin.js\",\n"
+ "\"jstests/repair.js\",\n"
+ "\"jstests/cursor8.js\",\n"
+ "\"jstests/recstore.js\",\n"
+ "\"jstests/extent.js\",\n"
+ "\"jstests/indexb.js\",\n"
+ "\"jstests/profile1.js\",\n"
+ "\"jstests/mr3.js\",\n"
+ "\"jstests/indexh.js\",\n"
+ "\"jstests/apitest_db.js\",\n"
+ "\"jstests/evalb.js\",\n"
+ "\"jstests/evald.js\",\n"
+ "\"jstests/evalf.js\",\n"
+ "\"jstests/killop.js\",\n"
+ "\"jstests/run_program1.js\",\n"
+ "\"jstests/notablescan.js\"] );\n"
+ "\n"
+ "// some tests can't be run in parallel with each other\n"
+ "var serialTestsArr = [ \"jstests/fsync.js\",\n"
+ "\"jstests/fsync2.js\" ];\n"
+ "var serialTests = makeKeys( serialTestsArr );\n"
+ "\n"
+ "params[ 0 ] = serialTestsArr;\n"
+ "\n"
+ "var files = listFiles(\"jstests\");\n"
+ "files = Array.shuffle( files );\n"
+ "\n"
+ "var i = 0;\n"
+ "files.forEach(\n"
+ "function(x) {\n"
+ "\n"
+ "if ( ( /[\\/\\\\]_/.test(x.name) ) ||\n"
+ "( ! /\\.js$/.test(x.name ) ) ||\n"
+ "( x.name in skipTests ) ||\n"
+ "( x.name in serialTests ) ||\n"
+ "! /\\.js$/.test(x.name ) ){\n"
+ "print(\" >>>>>>>>>>>>>>> skipping \" + x.name);\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "params[ i % n ].push( x.name );\n"
+ "++i;\n"
+ "}\n"
+ ");\n"
+ "\n"
+ "// randomize ordering of the serialTests\n"
+ "params[ 0 ] = Array.shuffle( params[ 0 ] );\n"
+ "\n"
+ "for( var i in params ) {\n"
+ "params[ i ].unshift( i );\n"
+ "}\n"
+ "\n"
+ "return params;\n"
+ "}\n"
+ "\n"
+ "// runs a set of test files\n"
+ "// first argument is an identifier for this tester, remaining arguments are file names\n"
+ "ParallelTester.fileTester = function() {\n"
+ "var args = argumentsToArray( arguments );\n"
+ "var suite = args.shift();\n"
+ "args.forEach(\n"
+ "function( x ) {\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" ...\");\n"
+ "var time = Date.timeFunc( function() { load(x); }, 1);\n"
+ "print(\" S\" + suite + \" Test : \" + x + \" \" + time + \"ms\" );\n"
+ "}\n"
+ ");\n"
+ "}\n"
+ "\n"
+ "// params: array of arrays, each element of which consists of a function followed\n"
+ "// by zero or more arguments to that function. Each function and its arguments will\n"
+ "// be called in a separate thread.\n"
+ "// msg: failure message\n"
+ "// newScopes: if true, each thread starts in a fresh scope\n"
+ "assert.parallelTests = function( params, msg, newScopes ) {\n"
+ "newScopes = newScopes || false;\n"
+ "var wrapper = function( fun, argv ) {\n"
+ "eval (\n"
+ "\"var z = function() {\" +\n"
+ "\"var __parallelTests__fun = \" + fun.toString() + \";\" +\n"
+ "\"var __parallelTests__argv = \" + tojson( argv ) + \";\" +\n"
+ "\"var __parallelTests__passed = false;\" +\n"
+ "\"try {\" +\n"
+ "\"__parallelTests__fun.apply( 0, __parallelTests__argv );\" +\n"
+ "\"__parallelTests__passed = true;\" +\n"
+ "\"} catch ( e ) {\" +\n"
+ "\"print( e );\" +\n"
+ "\"}\" +\n"
+ "\"return __parallelTests__passed;\" +\n"
+ "\"}\"\n"
+ ");\n"
+ "return z;\n"
+ "}\n"
+ "var runners = new Array();\n"
+ "for( var i in params ) {\n"
+ "var param = params[ i ];\n"
+ "var test = param.shift();\n"
+ "var t;\n"
+ "if ( newScopes )\n"
+ "t = new ScopedThread( wrapper( test, param ) );\n"
+ "else\n"
+ "t = new Thread( wrapper( test, param ) );\n"
+ "runners.push( t );\n"
+ "}\n"
+ "\n"
+ "runners.forEach( function( x ) { x.start(); } );\n"
+ "var nFailed = 0;\n"
+ "// v8 doesn't like it if we exit before all threads are joined (SERVER-529)\n"
+ "runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );\n"
+ "assert.eq( 0, nFailed, msg );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "tojsononeline = function( x ){\n"
+ "return tojson( x , \" \" , true );\n"
+ "}\n"
+ "\n"
+ "tojson = function( x, indent , nolint ){\n"
+ "if ( x === null )\n"
+ "return \"null\";\n"
+ "\n"
+ "if ( x === undefined )\n"
+ "return \"undefined\";\n"
+ "\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "\n"
+ "switch ( typeof x ) {\n"
+ "case \"string\": {\n"
+ "var s = \"\\\"\";\n"
+ "for ( var i=0; i<x.length; i++ ){\n"
+ "switch (x[i]){\n"
+ "case '\"': s += '\\\\\"'; break;\n"
+ "case '\\\\': s += '\\\\\\\\'; break;\n"
+ "case '\\b': s += '\\\\b'; break;\n"
+ "case '\\f': s += '\\\\f'; break;\n"
+ "case '\\n': s += '\\\\n'; break;\n"
+ "case '\\r': s += '\\\\r'; break;\n"
+ "case '\\t': s += '\\\\t'; break;\n"
+ "\n"
+ "default: {\n"
+ "var code = x.charCodeAt(i);\n"
+ "if (code < 0x20){\n"
+ "s += (code < 0x10 ? '\\\\u000' : '\\\\u00') + code.toString(16);\n"
+ "} else {\n"
+ "s += x[i];\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "return s + \"\\\"\";\n"
+ "}\n"
+ "case \"number\":\n"
+ "case \"boolean\":\n"
+ "return \"\" + x;\n"
+ "case \"object\":{\n"
+ "var s = tojsonObject( x, indent , nolint );\n"
+ "if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){\n"
+ "s = s.replace( /[\\s\\r\\n ]+/gm , \" \" );\n"
+ "}\n"
+ "return s;\n"
+ "}\n"
+ "case \"function\":\n"
+ "return x.toString();\n"
+ "default:\n"
+ "throw \"tojson can't handle type \" + ( typeof x );\n"
+ "}\n"
+ "\n"
+ "}\n"
+ "\n"
+ "tojsonObject = function( x, indent , nolint ){\n"
+ "var lineEnding = nolint ? \" \" : \"\\n\";\n"
+ "var tabSpace = nolint ? \"\" : \"\\t\";\n"
+ "\n"
+ "assert.eq( ( typeof x ) , \"object\" , \"tojsonObject needs object, not [\" + ( typeof x ) + \"]\" );\n"
+ "\n"
+ "if (!indent)\n"
+ "indent = \"\";\n"
+ "\n"
+ "if ( typeof( x.tojson ) == \"function\" && x.tojson != tojson ) {\n"
+ "return x.tojson(indent,nolint);\n"
+ "}\n"
+ "\n"
+ "if ( x.constructor && typeof( x.constructor.tojson ) == \"function\" && x.constructor.tojson != tojson ) {\n"
+ "return x.constructor.tojson( x, indent , nolint );\n"
+ "}\n"
+ "\n"
+ "if ( x.toString() == \"[object MaxKey]\" )\n"
+ "return \"{ $maxKey : 1 }\";\n"
+ "if ( x.toString() == \"[object MinKey]\" )\n"
+ "return \"{ $minKey : 1 }\";\n"
+ "\n"
+ "var s = \"{\" + lineEnding;\n"
+ "\n"
+ "// push one level of indent\n"
+ "indent += tabSpace;\n"
+ "\n"
+ "var total = 0;\n"
+ "for ( var k in x ) total++;\n"
+ "if ( total == 0 ) {\n"
+ "s += indent + lineEnding;\n"
+ "}\n"
+ "\n"
+ "var keys = x;\n"
+ "if ( typeof( x._simpleKeys ) == \"function\" )\n"
+ "keys = x._simpleKeys();\n"
+ "var num = 1;\n"
+ "for ( var k in keys ){\n"
+ "\n"
+ "var val = x[k];\n"
+ "if ( val == DB.prototype || val == DBCollection.prototype )\n"
+ "continue;\n"
+ "\n"
+ "s += indent + \"\\\"\" + k + \"\\\" : \" + tojson( val, indent , nolint );\n"
+ "if (num != total) {\n"
+ "s += \",\";\n"
+ "num++;\n"
+ "}\n"
+ "s += lineEnding;\n"
+ "}\n"
+ "\n"
+ "// pop one level of indent\n"
+ "indent = indent.substring(1);\n"
+ "return s + indent + \"}\";\n"
+ "}\n"
+ "\n"
+ "shellPrint = function( x ){\n"
+ "it = x;\n"
+ "if ( x != undefined )\n"
+ "shellPrintHelper( x );\n"
+ "\n"
+ "if ( db ){\n"
+ "var e = db.getPrevError();\n"
+ "if ( e.err ) {\n"
+ "if( e.nPrev <= 1 )\n"
+ "print( \"error on last call: \" + tojson( e.err ) );\n"
+ "else\n"
+ "print( \"an error \" + tojson(e.err) + \" occurred \" + e.nPrev + \" operations back in the command invocation\" );\n"
+ "}\n"
+ "db.resetError();\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "printjson = function(x){\n"
+ "print( tojson( x ) );\n"
+ "}\n"
+ "\n"
+ "printjsononeline = function(x){\n"
+ "print( tojsononeline( x ) );\n"
+ "}\n"
+ "\n"
+ "shellPrintHelper = function (x) {\n"
+ "\n"
+ "if (typeof (x) == \"undefined\") {\n"
+ "\n"
+ "if (typeof (db) != \"undefined\" && db.getLastError) {\n"
+ "// explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad.\n"
+ "var e = db.getLastError(1);\n"
+ "if (e != null)\n"
+ "print(e);\n"
+ "}\n"
+ "\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "if (x == __magicNoPrint)\n"
+ "return;\n"
+ "\n"
+ "if (x == null) {\n"
+ "print(\"null\");\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "if (typeof x != \"object\")\n"
+ "return print(x);\n"
+ "\n"
+ "var p = x.shellPrint;\n"
+ "if (typeof p == \"function\")\n"
+ "return x.shellPrint();\n"
+ "\n"
+ "var p = x.tojson;\n"
+ "if (typeof p == \"function\")\n"
+ "print(x.tojson());\n"
+ "else\n"
+ "print(tojson(x));\n"
+ "}\n"
+ "\n"
+ "shellAutocomplete = function (/*prefix*/){ // outer scope function called on init. Actual function at end\n"
+ "\n"
+ "var universalMethods = \"constructor prototype toString valueOf toLocaleString hasOwnProperty propertyIsEnumerable\".split(' ');\n"
+ "\n"
+ "var builtinMethods = {}; // uses constructor objects as keys\n"
+ "builtinMethods[Array] = \"length concat join pop push reverse shift slice sort splice unshift indexOf lastIndexOf every filter forEach map some\".split(' ');\n"
+ "builtinMethods[Boolean] = \"\".split(' '); // nothing more than universal methods\n"
+ "builtinMethods[Date] = \"getDate getDay getFullYear getHours getMilliseconds getMinutes getMonth getSeconds getTime getTimezoneOffset getUTCDate getUTCDay getUTCFullYear getUTCHours getUTCMilliseconds getUTCMinutes getUTCMonth getUTCSeconds getYear parse setDate setFullYear setHours setMilliseconds setMinutes setMonth setSeconds setTime setUTCDate setUTCFullYear setUTCHours setUTCMilliseconds setUTCMinutes setUTCMonth setUTCSeconds setYear toDateString toGMTString toLocaleDateString toLocaleTimeString toTimeString toUTCString UTC\".split(' ');\n"
+ "builtinMethods[Math] = \"E LN2 LN10 LOG2E LOG10E PI SQRT1_2 SQRT2 abs acos asin atan atan2 ceil cos exp floor log max min pow random round sin sqrt tan\".split(' ');\n"
+ "builtinMethods[Number] = \"MAX_VALUE MIN_VALUE NEGATIVE_INFINITY POSITIVE_INFINITY toExponential toFixed toPrecision\".split(' ');\n"
+ "builtinMethods[RegExp] = \"global ignoreCase lastIndex multiline source compile exec test\".split(' ');\n"
+ "builtinMethods[String] = \"length charAt charCodeAt concat fromCharCode indexOf lastIndexOf match replace search slice split substr substring toLowerCase toUpperCase\".split(' ');\n"
+ "builtinMethods[Function] = \"call apply\".split(' ');\n"
+ "builtinMethods[Object] = \"bsonsize\".split(' ');\n"
+ "\n"
+ "builtinMethods[Mongo] = \"find update insert remove\".split(' ');\n"
+ "builtinMethods[BinData] = \"hex base64 length subtype\".split(' ');\n"
+ "builtinMethods[NumberLong] = \"toNumber\".split(' ');\n"
+ "\n"
+ "var extraGlobals = \"Infinity NaN undefined null true false decodeURI decodeURIComponent encodeURI encodeURIComponent escape eval isFinite isNaN parseFloat parseInt unescape Array Boolean Date Math Number RegExp String print load gc MinKey MaxKey Mongo NumberLong ObjectId DBPointer UUID BinData Map\".split(' ');\n"
+ "\n"
+ "var isPrivate = function(name){\n"
+ "if (shellAutocomplete.showPrivate) return false;\n"
+ "if (name == '_id') return false;\n"
+ "if (name[0] == '_') return true;\n"
+ "if (name[name.length-1] == '_') return true; // some native functions have an extra name_ method\n"
+ "return false;\n"
+ "}\n"
+ "\n"
+ "var customComplete = function(obj){\n"
+ "try {\n"
+ "if(obj.__proto__.constructor.autocomplete){\n"
+ "var ret = obj.constructor.autocomplete(obj);\n"
+ "if (ret.constructor != Array){\n"
+ "print(\"\\nautocompleters must return real Arrays\");\n"
+ "return [];\n"
+ "}\n"
+ "return ret;\n"
+ "} else {\n"
+ "return [];\n"
+ "}\n"
+ "} catch (e) {\n"
+ "// print(e); // uncomment if debugging custom completers\n"
+ "return [];\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "var worker = function( prefix ){\n"
+ "var global = (function(){return this;}).call(); // trick to get global object\n"
+ "\n"
+ "var curObj = global;\n"
+ "var parts = prefix.split('.');\n"
+ "for (var p=0; p < parts.length - 1; p++){ // doesn't include last part\n"
+ "curObj = curObj[parts[p]];\n"
+ "if (curObj == null)\n"
+ "return [];\n"
+ "}\n"
+ "\n"
+ "var lastPrefix = parts[parts.length-1] || '';\n"
+ "var begining = parts.slice(0, parts.length-1).join('.');\n"
+ "if (begining.length)\n"
+ "begining += '.';\n"
+ "\n"
+ "var possibilities = new Array().concat(\n"
+ "universalMethods,\n"
+ "Object.keySet(curObj),\n"
+ "Object.keySet(curObj.__proto__),\n"
+ "builtinMethods[curObj] || [], // curObj is a builtin constructor\n"
+ "builtinMethods[curObj.__proto__.constructor] || [], // curObj is made from a builtin constructor\n"
+ "curObj == global ? extraGlobals : [],\n"
+ "customComplete(curObj)\n"
+ ");\n"
+ "\n"
+ "var ret = [];\n"
+ "for (var i=0; i < possibilities.length; i++){\n"
+ "var p = possibilities[i];\n"
+ "if (typeof(curObj[p]) == \"undefined\" && curObj != global) continue; // extraGlobals aren't in the global object\n"
+ "if (p.length == 0 || p.length < lastPrefix.length) continue;\n"
+ "if (isPrivate(p)) continue;\n"
+ "if (p.match(/^[0-9]+$/)) continue; // don't array number indexes\n"
+ "if (p.substr(0, lastPrefix.length) != lastPrefix) continue;\n"
+ "\n"
+ "var completion = begining + p;\n"
+ "if(curObj[p] && curObj[p].constructor == Function && p != 'constructor')\n"
+ "completion += '(';\n"
+ "\n"
+ "ret.push(completion);\n"
+ "}\n"
+ "\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "// this is the actual function that gets assigned to shellAutocomplete\n"
+ "return function( prefix ){\n"
+ "try {\n"
+ "__autocomplete__ = worker(prefix).sort();\n"
+ "}catch (e){\n"
+ "print(\"exception durring autocomplete: \" + tojson(e.message));\n"
+ "__autocomplete__ = [];\n"
+ "}\n"
+ "}\n"
+ "}();\n"
+ "\n"
+ "shellAutocomplete.showPrivate = false; // toggle to show (useful when working on internals)\n"
+ "\n"
+ "shellHelper = function( command , rest , shouldPrint ){\n"
+ "command = command.trim();\n"
+ "var args = rest.trim().replace(/;$/,\"\").split( \"\\s+\" );\n"
+ "\n"
+ "if ( ! shellHelper[command] )\n"
+ "throw \"no command [\" + command + \"]\";\n"
+ "\n"
+ "var res = shellHelper[command].apply( null , args );\n"
+ "if ( shouldPrint ){\n"
+ "shellPrintHelper( res );\n"
+ "}\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "shellHelper.use = function (dbname) {\n"
+ "var s = \"\" + dbname;\n"
+ "if (s == \"\") {\n"
+ "print(\"bad use parameter\");\n"
+ "return;\n"
+ "}\n"
+ "db = db.getMongo().getDB(dbname);\n"
+ "print(\"switched to db \" + db.getName());\n"
+ "}\n"
+ "\n"
+ "shellHelper.it = function(){\n"
+ "if ( typeof( ___it___ ) == \"undefined\" || ___it___ == null ){\n"
+ "print( \"no cursor\" );\n"
+ "return;\n"
+ "}\n"
+ "shellPrintHelper( ___it___ );\n"
+ "}\n"
+ "\n"
+ "shellHelper.show = function (what) {\n"
+ "assert(typeof what == \"string\");\n"
+ "\n"
+ "if (what == \"profile\") {\n"
+ "if (db.system.profile.count() == 0) {\n"
+ "print(\"db.system.profile is empty\");\n"
+ "print(\"Use db.setProfilingLevel(2) will enable profiling\");\n"
+ "print(\"Use db.system.profile.find() to show raw profile entries\");\n"
+ "}\n"
+ "else {\n"
+ "print();\n"
+ "db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(function (x) { print(\"\" + x.millis + \"ms \" + String(x.ts).substring(0, 24)); print(x.info); print(\"\\n\"); })\n"
+ "}\n"
+ "return \"\";\n"
+ "}\n"
+ "\n"
+ "if (what == \"users\") {\n"
+ "db.system.users.find().forEach(printjson);\n"
+ "return \"\";\n"
+ "}\n"
+ "\n"
+ "if (what == \"collections\" || what == \"tables\") {\n"
+ "db.getCollectionNames().forEach(function (x) { print(x) });\n"
+ "return \"\";\n"
+ "}\n"
+ "\n"
+ "if (what == \"dbs\") {\n"
+ "var dbs = db.getMongo().getDBs();\n"
+ "var size = {};\n"
+ "dbs.databases.forEach(function (x) { size[x.name] = x.sizeOnDisk; });\n"
+ "var names = dbs.databases.map(function (z) { return z.name; }).sort();\n"
+ "names.forEach(function (n) {\n"
+ "if (size[n] > 1) {\n"
+ "print(n + \"\\t\" + size[n] / 1024 / 1024 / 1024 + \"GB\");\n"
+ "} else {\n"
+ "print(n + \"\\t(empty)\");\n"
+ "}\n"
+ "});\n"
+ "//db.getMongo().getDBNames().sort().forEach(function (x) { print(x) });\n"
+ "return \"\";\n"
+ "}\n"
+ "\n"
+ "throw \"don't know how to show [\" + what + \"]\";\n"
+ "\n"
+ "}\n"
+ "\n"
+ "if ( typeof( Map ) == \"undefined\" ){\n"
+ "Map = function(){\n"
+ "this._data = {};\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "Map.hash = function( val ){\n"
+ "if ( ! val )\n"
+ "return val;\n"
+ "\n"
+ "switch ( typeof( val ) ){\n"
+ "case 'string':\n"
+ "case 'number':\n"
+ "case 'date':\n"
+ "return val.toString();\n"
+ "case 'object':\n"
+ "case 'array':\n"
+ "var s = \"\";\n"
+ "for ( var k in val ){\n"
+ "s += k + val[k];\n"
+ "}\n"
+ "return s;\n"
+ "}\n"
+ "\n"
+ "throw \"can't hash : \" + typeof( val );\n"
+ "}\n"
+ "\n"
+ "Map.prototype.put = function( key , value ){\n"
+ "var o = this._get( key );\n"
+ "var old = o.value;\n"
+ "o.value = value;\n"
+ "return old;\n"
+ "}\n"
+ "\n"
+ "Map.prototype.get = function( key ){\n"
+ "return this._get( key ).value;\n"
+ "}\n"
+ "\n"
+ "Map.prototype._get = function( key ){\n"
+ "var h = Map.hash( key );\n"
+ "var a = this._data[h];\n"
+ "if ( ! a ){\n"
+ "a = [];\n"
+ "this._data[h] = a;\n"
+ "}\n"
+ "\n"
+ "for ( var i=0; i<a.length; i++ ){\n"
+ "if ( friendlyEqual( key , a[i].key ) ){\n"
+ "return a[i];\n"
+ "}\n"
+ "}\n"
+ "var o = { key : key , value : null };\n"
+ "a.push( o );\n"
+ "return o;\n"
+ "}\n"
+ "\n"
+ "Map.prototype.values = function(){\n"
+ "var all = [];\n"
+ "for ( var k in this._data ){\n"
+ "this._data[k].forEach( function(z){ all.push( z.value ); } );\n"
+ "}\n"
+ "return all;\n"
+ "}\n"
+ "\n"
+ "if ( typeof( gc ) == \"undefined\" ){\n"
+ "gc = function(){\n"
+ "print( \"warning: using noop gc()\" );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "\n"
+ "Math.sigFig = function( x , N ){\n"
+ "if ( ! N ){\n"
+ "N = 3;\n"
+ "}\n"
+ "var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );\n"
+ "return Math.round(x*p)/p;\n"
+ "}\n"
+ "\n"
+ "Random = function() {}\n"
+ "\n"
+ "// set random seed\n"
+ "Random.srand = function( s ) { _srand( s ); }\n"
+ "\n"
+ "// random number 0 <= r < 1\n"
+ "Random.rand = function() { return _rand(); }\n"
+ "\n"
+ "// random integer 0 <= r < n\n"
+ "Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }\n"
+ "\n"
+ "Random.setRandomSeed = function( s ) {\n"
+ "s = s || new Date().getTime();\n"
+ "print( \"setting random seed: \" + s );\n"
+ "Random.srand( s );\n"
+ "}\n"
+ "\n"
+ "// generate a random value from the exponential distribution with the specified mean\n"
+ "Random.genExp = function( mean ) {\n"
+ "return -Math.log( Random.rand() ) * mean;\n"
+ "}\n"
+ "\n"
+ "Geo = {};\n"
+ "Geo.distance = function( a , b ){\n"
+ "var ax = null;\n"
+ "var ay = null;\n"
+ "var bx = null;\n"
+ "var by = null;\n"
+ "\n"
+ "for ( var key in a ){\n"
+ "if ( ax == null )\n"
+ "ax = a[key];\n"
+ "else if ( ay == null )\n"
+ "ay = a[key];\n"
+ "}\n"
+ "\n"
+ "for ( var key in b ){\n"
+ "if ( bx == null )\n"
+ "bx = b[key];\n"
+ "else if ( by == null )\n"
+ "by = b[key];\n"
+ "}\n"
+ "\n"
+ "return Math.sqrt( Math.pow( by - ay , 2 ) +\n"
+ "Math.pow( bx - ax , 2 ) );\n"
+ "}\n"
+ "\n"
+ "Geo.sphereDistance = function( a , b ){\n"
+ "var ax = null;\n"
+ "var ay = null;\n"
+ "var bx = null;\n"
+ "var by = null;\n"
+ "\n"
+ "// TODO swap order of x and y when done on server\n"
+ "for ( var key in a ){\n"
+ "if ( ax == null )\n"
+ "ax = a[key] * (Math.PI/180);\n"
+ "else if ( ay == null )\n"
+ "ay = a[key] * (Math.PI/180);\n"
+ "}\n"
+ "\n"
+ "for ( var key in b ){\n"
+ "if ( bx == null )\n"
+ "bx = b[key] * (Math.PI/180);\n"
+ "else if ( by == null )\n"
+ "by = b[key] * (Math.PI/180);\n"
+ "}\n"
+ "\n"
+ "var sin_x1=Math.sin(ax), cos_x1=Math.cos(ax);\n"
+ "var sin_y1=Math.sin(ay), cos_y1=Math.cos(ay);\n"
+ "var sin_x2=Math.sin(bx), cos_x2=Math.cos(bx);\n"
+ "var sin_y2=Math.sin(by), cos_y2=Math.cos(by);\n"
+ "\n"
+ "var cross_prod =\n"
+ "(cos_y1*cos_x1 * cos_y2*cos_x2) +\n"
+ "(cos_y1*sin_x1 * cos_y2*sin_x2) +\n"
+ "(sin_y1 * sin_y2);\n"
+ "\n"
+ "if (cross_prod >= 1 || cross_prod <= -1){\n"
+ "// fun with floats\n"
+ "assert( Math.abs(cross_prod)-1 < 1e-6 );\n"
+ "return cross_prod > 0 ? 0 : Math.PI;\n"
+ "}\n"
+ "\n"
+ "return Math.acos(cross_prod);\n"
+ "}\n"
+ "\n"
+ "rs = function () { return \"try rs.help()\"; }\n"
+ "\n"
+ "rs.help = function () {\n"
+ "print(\"\\trs.status() { replSetGetStatus : 1 } checks repl set status\");\n"
+ "print(\"\\trs.initiate() { replSetInitiate : null } initiates set with default settings\");\n"
+ "print(\"\\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg\");\n"
+ "print(\"\\trs.conf() get the current configuration object from local.system.replset\");\n"
+ "print(\"\\trs.reconfig(cfg) updates the configuration of a running replica set with cfg (disconnects)\");\n"
+ "print(\"\\trs.add(hostportstr) add a new member to the set with default attributes (disconnects)\");\n"
+ "print(\"\\trs.add(membercfgobj) add a new member to the set with extra attributes (disconnects)\");\n"
+ "print(\"\\trs.addArb(hostportstr) add a new member which is arbiterOnly:true (disconnects)\");\n"
+ "print(\"\\trs.stepDown([secs]) step down as primary (momentarily) (disconnects)\");\n"
+ "print(\"\\trs.freeze(secs) make a node ineligible to become primary for the time specified\");\n"
+ "print(\"\\trs.remove(hostportstr) remove a host from the replica set (disconnects)\");\n"
+ "print(\"\\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()\");\n"
+ "print();\n"
+ "print(\"\\tdb.isMaster() check who is primary\");\n"
+ "print();\n"
+ "print(\"\\treconfiguration helpers disconnect from the database so the shell will display\");\n"
+ "print(\"\\tan error, even if the command succeeds.\");\n"
+ "print(\"\\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info\");\n"
+ "}\n"
+ "rs.slaveOk = function () { return db.getMongo().setSlaveOk(); }\n"
+ "rs.status = function () { return db._adminCommand(\"replSetGetStatus\"); }\n"
+ "rs.isMaster = function () { return db.isMaster(); }\n"
+ "rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }\n"
+ "rs.reconfig = function (cfg) {\n"
+ "cfg.version = rs.conf().version + 1;\n"
+ "var res = null;\n"
+ "try {\n"
+ "res = db.adminCommand({ replSetReconfig: cfg });\n"
+ "}\n"
+ "catch (e) {\n"
+ "print(\"shell got exception during reconfig: \" + e);\n"
+ "print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
+ "}\n"
+ "return res;\n"
+ "}\n"
+ "rs.add = function (hostport, arb) {\n"
+ "var cfg = hostport;\n"
+ "\n"
+ "var local = db.getSisterDB(\"local\");\n"
+ "assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
+ "var c = local.system.replset.findOne();\n"
+ "assert(c, \"no config object retrievable from local.system.replset\");\n"
+ "\n"
+ "c.version++;\n"
+ "\n"
+ "var max = 0;\n"
+ "for (var i in c.members)\n"
+ "if (c.members[i]._id > max) max = c.members[i]._id;\n"
+ "if (isString(hostport)) {\n"
+ "cfg = { _id: max + 1, host: hostport };\n"
+ "if (arb)\n"
+ "cfg.arbiterOnly = true;\n"
+ "}\n"
+ "c.members.push(cfg);\n"
+ "var res = null;\n"
+ "try {\n"
+ "res = db.adminCommand({ replSetReconfig: c });\n"
+ "}\n"
+ "catch (e) {\n"
+ "print(\"shell got exception during reconfig: \" + e);\n"
+ "print(\"in some circumstances, the primary steps down and closes connections on a reconfig\");\n"
+ "}\n"
+ "return res;\n"
+ "}\n"
+ "rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:secs||60}); }\n"
+ "rs.freeze = function (secs) { return db._adminCommand({replSetFreeze:secs}); }\n"
+ "rs.addArb = function (hn) { return this.add(hn, true); }\n"
+ "rs.conf = function () { return db.getSisterDB(\"local\").system.replset.findOne(); }\n"
+ "\n"
+ "rs.remove = function (hn) {\n"
+ "var local = db.getSisterDB(\"local\");\n"
+ "assert(local.system.replset.count() <= 1, \"error: local.system.replset has unexpected contents\");\n"
+ "var c = local.system.replset.findOne();\n"
+ "assert(c, \"no config object retrievable from local.system.replset\");\n"
+ "c.version++;\n"
+ "\n"
+ "for (var i in c.members) {\n"
+ "if (c.members[i].host == hn) {\n"
+ "c.members.splice(i, 1);\n"
+ "return db._adminCommand({ replSetReconfig : c});\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "return \"error: couldn't find \"+hn+\" in \"+tojson(c.members);\n"
+ "};\n"
+ "\n"
+ "help = shellHelper.help = function (x) {\n"
+ "if (x == \"mr\") {\n"
+ "print(\"\\nSee also http://www.mongodb.org/display/DOCS/MapReduce\");\n"
+ "print(\"\\nfunction mapf() {\");\n"
+ "print(\" // 'this' holds current document to inspect\");\n"
+ "print(\" emit(key, value);\");\n"
+ "print(\"}\");\n"
+ "print(\"\\nfunction reducef(key,value_array) {\");\n"
+ "print(\" return reduced_value;\");\n"
+ "print(\"}\");\n"
+ "print(\"\\ndb.mycollection.mapReduce(mapf, reducef[, options])\");\n"
+ "print(\"\\noptions\");\n"
+ "print(\"{[query : <query filter object>]\");\n"
+ "print(\" [, sort : <sort the query. useful for optimization>]\");\n"
+ "print(\" [, limit : <number of objects to return from collection>]\");\n"
+ "print(\" [, out : <output-collection name>]\");\n"
+ "print(\" [, keeptemp: <true|false>]\");\n"
+ "print(\" [, finalize : <finalizefunction>]\");\n"
+ "print(\" [, scope : <object where fields go into javascript global scope >]\");\n"
+ "print(\" [, verbose : true]}\\n\");\n"
+ "return;\n"
+ "} else if (x == \"connect\") {\n"
+ "print(\"\\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.\");\n"
+ "print(\"Additional connections may be opened:\\n\");\n"
+ "print(\" var x = new Mongo('host[:port]');\");\n"
+ "print(\" var mydb = x.getDB('mydb');\");\n"
+ "print(\" or\");\n"
+ "print(\" var mydb = connect('host[:port]/mydb');\");\n"
+ "print(\"\\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\\n\");\n"
+ "return;\n"
+ "}\n"
+ "else if (x == \"misc\") {\n"
+ "print(\"\\tb = new BinData(subtype,base64str) create a BSON BinData value\");\n"
+ "print(\"\\tb.subtype() the BinData subtype (0..255)\");\n"
+ "print(\"\\tb.length() length of the BinData data in bytes\");\n"
+ "print(\"\\tb.hex() the data as a hex encoded string\");\n"
+ "print(\"\\tb.base64() the data as a base 64 encoded string\");\n"
+ "print(\"\\tb.toString()\");\n"
+ "print();\n"
+ "print(\"\\to = new ObjectId() create a new ObjectId\");\n"
+ "print(\"\\to.getTimestamp() return timestamp derived from first 32 bits of the OID\");\n"
+ "print(\"\\to.isObjectId()\");\n"
+ "print(\"\\to.toString()\");\n"
+ "print(\"\\to.equals(otherid)\");\n"
+ "return;\n"
+ "}\n"
+ "else if (x == \"admin\") {\n"
+ "print(\"\\tls([path]) list files\");\n"
+ "print(\"\\tpwd() returns current directory\");\n"
+ "print(\"\\tlistFiles([path]) returns file list\");\n"
+ "print(\"\\thostname() returns name of this host\");\n"
+ "print(\"\\tcat(fname) returns contents of text file as a string\");\n"
+ "print(\"\\tremoveFile(f) delete a file or directory\");\n"
+ "print(\"\\tload(jsfilename) load and execute a .js file\");\n"
+ "print(\"\\trun(program[, args...]) spawn a program and wait for its completion\");\n"
+ "print(\"\\tsleep(m) sleep m milliseconds\");\n"
+ "print(\"\\tgetMemInfo() diagnostic\");\n"
+ "return;\n"
+ "}\n"
+ "else if (x == \"test\") {\n"
+ "print(\"\\tstartMongodEmpty(args) DELETES DATA DIR and then starts mongod\");\n"
+ "print(\"\\t returns a connection to the new server\");\n"
+ "print(\"\\tstartMongodTest(port,dir,options)\");\n"
+ "print(\"\\t DELETES DATA DIR\");\n"
+ "print(\"\\t automatically picks port #s starting at 27000 and increasing\");\n"
+ "print(\"\\t or you can specify the port as the first arg\");\n"
+ "print(\"\\t dir is /data/db/<port>/ if not specified as the 2nd arg\");\n"
+ "print(\"\\t returns a connection to the new server\");\n"
+ "print(\"\\tresetDbpath(dirpathstr) deletes everything under the dir specified including subdirs\");\n"
+ "print(\"\\tstopMongoProgram(port[, signal])\");\n"
+ "return;\n"
+ "}\n"
+ "else if (x == \"\") {\n"
+ "print(\"\\t\" + \"db.help() help on db methods\");\n"
+ "print(\"\\t\" + \"db.mycoll.help() help on collection methods\");\n"
+ "print(\"\\t\" + \"rs.help() help on replica set methods\");\n"
+ "print(\"\\t\" + \"help connect connecting to a db help\");\n"
+ "print(\"\\t\" + \"help admin administrative help\");\n"
+ "print(\"\\t\" + \"help misc misc things to know\");\n"
+ "print(\"\\t\" + \"help mr mapreduce help\");\n"
+ "print();\n"
+ "print(\"\\t\" + \"show dbs show database names\");\n"
+ "print(\"\\t\" + \"show collections show collections in current database\");\n"
+ "print(\"\\t\" + \"show users show users in current database\");\n"
+ "print(\"\\t\" + \"show profile show most recent system.profile entries with time >= 1ms\");\n"
+ "print(\"\\t\" + \"use <db_name> set current database\");\n"
+ "print(\"\\t\" + \"db.foo.find() list objects in collection foo\");\n"
+ "print(\"\\t\" + \"db.foo.find( { a : 1 } ) list objects in foo where a == 1\");\n"
+ "print(\"\\t\" + \"it result of the last line evaluated; use to further iterate\");\n"
+ "print(\"\\t\" + \"DBQuery.shellBatchSize = x set default number of items to display on shell\");\n"
+ "print(\"\\t\" + \"exit quit the mongo shell\");\n"
+ "}\n"
+ "else\n"
+ "print(\"unknown help option\");\n"
+ "}\n"
+ ;
+ extern const JSFile utils;
+ const JSFile utils = { "shell/utils.js" , _jscode_raw_utils };
+ const StringData _jscode_raw_db =
+ "// db.js\n"
+ "\n"
+ "if ( typeof DB == \"undefined\" ){\n"
+ "DB = function( mongo , name ){\n"
+ "this._mongo = mongo;\n"
+ "this._name = name;\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getMongo = function(){\n"
+ "assert( this._mongo , \"why no mongo!\" );\n"
+ "return this._mongo;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getSiblingDB = function( name ){\n"
+ "return this.getMongo().getDB( name );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getSisterDB = DB.prototype.getSiblingDB;\n"
+ "\n"
+ "DB.prototype.getName = function(){\n"
+ "return this._name;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.stats = function(){\n"
+ "return this.runCommand( { dbstats : 1 } );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getCollection = function( name ){\n"
+ "return new DBCollection( this._mongo , this , name , this._name + \".\" + name );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.commandHelp = function( name ){\n"
+ "var c = {};\n"
+ "c[name] = 1;\n"
+ "c.help = true;\n"
+ "return this.runCommand( c ).help;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.runCommand = function( obj ){\n"
+ "if ( typeof( obj ) == \"string\" ){\n"
+ "var n = {};\n"
+ "n[obj] = 1;\n"
+ "obj = n;\n"
+ "}\n"
+ "return this.getCollection( \"$cmd\" ).findOne( obj );\n"
+ "}\n"
+ "\n"
+ "DB.prototype._dbCommand = DB.prototype.runCommand;\n"
+ "\n"
+ "DB.prototype.adminCommand = function( obj ){\n"
+ "if ( this._name == \"admin\" )\n"
+ "return this.runCommand( obj );\n"
+ "return this.getSiblingDB( \"admin\" ).runCommand( obj );\n"
+ "}\n"
+ "\n"
+ "DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name\n"
+ "\n"
+ "DB.prototype.addUser = function( username , pass, readOnly ){\n"
+ "readOnly = readOnly || false;\n"
+ "var c = this.getCollection( \"system.users\" );\n"
+ "\n"
+ "var u = c.findOne( { user : username } ) || { user : username };\n"
+ "u.readOnly = readOnly;\n"
+ "u.pwd = hex_md5( username + \":mongo:\" + pass );\n"
+ "print( tojson( u ) );\n"
+ "\n"
+ "c.save( u );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.removeUser = function( username ){\n"
+ "this.getCollection( \"system.users\" ).remove( { user : username } );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.__pwHash = function( nonce, username, pass ) {\n"
+ "return hex_md5( nonce + username + hex_md5( username + \":mongo:\" + pass ) );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.auth = function( username , pass ){\n"
+ "var n = this.runCommand( { getnonce : 1 } );\n"
+ "\n"
+ "var a = this.runCommand(\n"
+ "{\n"
+ "authenticate : 1 ,\n"
+ "user : username ,\n"
+ "nonce : n.nonce ,\n"
+ "key : this.__pwHash( n.nonce, username, pass )\n"
+ "}\n"
+ ");\n"
+ "\n"
+ "return a.ok;\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "Create a new collection in the database. Normally, collection creation is automatic. You would\n"
+ "use this function if you wish to specify special options on creation.\n"
+ "\n"
+ "If the collection already exists, no action occurs.\n"
+ "\n"
+ "<p>Options:</p>\n"
+ "<ul>\n"
+ "<li>\n"
+ "size: desired initial extent size for the collection. Must be <= 1000000000.\n"
+ "for fixed size (capped) collections, this size is the total/max size of the\n"
+ "collection.\n"
+ "</li>\n"
+ "<li>\n"
+ "capped: if true, this is a capped collection (where old data rolls out).\n"
+ "</li>\n"
+ "<li> max: maximum number of objects if capped (optional).</li>\n"
+ "</ul>\n"
+ "\n"
+ "<p>Example: </p>\n"
+ "\n"
+ "<code>db.createCollection(\"movies\", { size: 10 * 1024 * 1024, capped:true } );</code>\n"
+ "\n"
+ "* @param {String} name Name of new collection to create\n"
+ "* @param {Object} options Object with options for call. Options are listed above.\n"
+ "* @return SOMETHING_FIXME\n"
+ "*/\n"
+ "DB.prototype.createCollection = function(name, opt) {\n"
+ "var options = opt || {};\n"
+ "var cmd = { create: name, capped: options.capped, size: options.size, max: options.max };\n"
+ "var res = this._dbCommand(cmd);\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* @deprecated use getProfilingStatus\n"
+ "* Returns the current profiling level of this database\n"
+ "* @return SOMETHING_FIXME or null on error\n"
+ "*/\n"
+ "DB.prototype.getProfilingLevel = function() {\n"
+ "var res = this._dbCommand( { profile: -1 } );\n"
+ "return res ? res.was : null;\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* @return the current profiling status\n"
+ "* example { was : 0, slowms : 100 }\n"
+ "* @return SOMETHING_FIXME or null on error\n"
+ "*/\n"
+ "DB.prototype.getProfilingStatus = function() {\n"
+ "var res = this._dbCommand( { profile: -1 } );\n"
+ "if ( ! res.ok )\n"
+ "throw \"profile command failed: \" + tojson( res );\n"
+ "delete res.ok\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "Erase the entire database. (!)\n"
+ "\n"
+ "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
+ "*/\n"
+ "DB.prototype.dropDatabase = function() {\n"
+ "if ( arguments.length )\n"
+ "throw \"dropDatabase doesn't take arguments\";\n"
+ "return this._dbCommand( { dropDatabase: 1 } );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DB.prototype.shutdownServer = function() {\n"
+ "if( \"admin\" != this._name ){\n"
+ "return \"shutdown command only works with the admin database; try 'use admin'\";\n"
+ "}\n"
+ "\n"
+ "try {\n"
+ "var res = this._dbCommand(\"shutdown\");\n"
+ "if( res )\n"
+ "throw \"shutdownServer failed: \" + res.errmsg;\n"
+ "throw \"shutdownServer failed\";\n"
+ "}\n"
+ "catch ( e ){\n"
+ "assert( tojson( e ).indexOf( \"error doing query: failed\" ) >= 0 , \"unexpected error: \" + tojson( e ) );\n"
+ "print( \"server should be down...\" );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "Clone database on another server to here.\n"
+ "<p>\n"
+ "Generally, you should dropDatabase() first as otherwise the cloned information will MERGE\n"
+ "into whatever data is already present in this database. (That is however a valid way to use\n"
+ "clone if you are trying to do something intentionally, such as union three non-overlapping\n"
+ "databases into one.)\n"
+ "<p>\n"
+ "This is a low level administrative function will is not typically used.\n"
+ "\n"
+ "* @param {String} from Where to clone from (dbhostname[:port]). May not be this database\n"
+ "(self) as you cannot clone to yourself.\n"
+ "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
+ "* See also: db.copyDatabase()\n"
+ "*/\n"
+ "DB.prototype.cloneDatabase = function(from) {\n"
+ "assert( isString(from) && from.length );\n"
+ "//this.resetIndexCache();\n"
+ "return this._dbCommand( { clone: from } );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "Clone collection on another server to here.\n"
+ "<p>\n"
+ "Generally, you should drop() first as otherwise the cloned information will MERGE\n"
+ "into whatever data is already present in this collection. (That is however a valid way to use\n"
+ "clone if you are trying to do something intentionally, such as union three non-overlapping\n"
+ "collections into one.)\n"
+ "<p>\n"
+ "This is a low level administrative function is not typically used.\n"
+ "\n"
+ "* @param {String} from mongod instance from which to clnoe (dbhostname:port). May\n"
+ "not be this mongod instance, as clone from self is not allowed.\n"
+ "* @param {String} collection name of collection to clone.\n"
+ "* @param {Object} query query specifying which elements of collection are to be cloned.\n"
+ "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
+ "* See also: db.cloneDatabase()\n"
+ "*/\n"
+ "DB.prototype.cloneCollection = function(from, collection, query) {\n"
+ "assert( isString(from) && from.length );\n"
+ "assert( isString(collection) && collection.length );\n"
+ "collection = this._name + \".\" + collection;\n"
+ "query = query || {};\n"
+ "//this.resetIndexCache();\n"
+ "return this._dbCommand( { cloneCollection:collection, from:from, query:query } );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "Copy database from one server or name to another server or name.\n"
+ "\n"
+ "Generally, you should dropDatabase() first as otherwise the copied information will MERGE\n"
+ "into whatever data is already present in this database (and you will get duplicate objects\n"
+ "in collections potentially.)\n"
+ "\n"
+ "For security reasons this function only works when executed on the \"admin\" db. However,\n"
+ "if you have access to said db, you can copy any database from one place to another.\n"
+ "\n"
+ "This method provides a way to \"rename\" a database by copying it to a new db name and\n"
+ "location. Additionally, it effectively provides a repair facility.\n"
+ "\n"
+ "* @param {String} fromdb database name from which to copy.\n"
+ "* @param {String} todb database name to copy to.\n"
+ "* @param {String} fromhost hostname of the database (and optionally, \":port\") from which to\n"
+ "copy the data. default if unspecified is to copy from self.\n"
+ "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
+ "* See also: db.clone()\n"
+ "*/\n"
+ "DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {\n"
+ "assert( isString(fromdb) && fromdb.length );\n"
+ "assert( isString(todb) && todb.length );\n"
+ "fromhost = fromhost || \"\";\n"
+ "if ( username && password ) {\n"
+ "var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );\n"
+ "} else {\n"
+ "return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "Repair database.\n"
+ "\n"
+ "* @return Object returned has member ok set to true if operation succeeds, false otherwise.\n"
+ "*/\n"
+ "DB.prototype.repairDatabase = function() {\n"
+ "return this._dbCommand( { repairDatabase: 1 } );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DB.prototype.help = function() {\n"
+ "print(\"DB methods:\");\n"
+ "print(\"\\tdb.addUser(username, password[, readOnly=false])\");\n"
+ "print(\"\\tdb.auth(username, password)\");\n"
+ "print(\"\\tdb.cloneDatabase(fromhost)\");\n"
+ "print(\"\\tdb.commandHelp(name) returns the help for the command\");\n"
+ "print(\"\\tdb.copyDatabase(fromdb, todb, fromhost)\");\n"
+ "print(\"\\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )\");\n"
+ "print(\"\\tdb.currentOp() displays the current operation in the db\");\n"
+ "print(\"\\tdb.dropDatabase()\");\n"
+ "print(\"\\tdb.eval(func, args) run code server-side\");\n"
+ "print(\"\\tdb.getCollection(cname) same as db['cname'] or db.cname\");\n"
+ "print(\"\\tdb.getCollectionNames()\");\n"
+ "print(\"\\tdb.getLastError() - just returns the err msg string\");\n"
+ "print(\"\\tdb.getLastErrorObj() - return full status object\");\n"
+ "print(\"\\tdb.getMongo() get the server connection object\");\n"
+ "print(\"\\tdb.getMongo().setSlaveOk() allow this connection to read from the nonmaster member of a replica pair\");\n"
+ "print(\"\\tdb.getName()\");\n"
+ "print(\"\\tdb.getPrevError()\");\n"
+ "print(\"\\tdb.getProfilingLevel() - deprecated\");\n"
+ "print(\"\\tdb.getProfilingStatus() - returns if profiling is on and slow threshold \");\n"
+ "print(\"\\tdb.getReplicationInfo()\");\n"
+ "print(\"\\tdb.getSiblingDB(name) get the db at the same server as this one\");\n"
+ "print(\"\\tdb.isMaster() check replica primary status\");\n"
+ "print(\"\\tdb.killOp(opid) kills the current operation in the db\");\n"
+ "print(\"\\tdb.listCommands() lists all the db commands\");\n"
+ "print(\"\\tdb.printCollectionStats()\");\n"
+ "print(\"\\tdb.printReplicationInfo()\");\n"
+ "print(\"\\tdb.printSlaveReplicationInfo()\");\n"
+ "print(\"\\tdb.printShardingStatus()\");\n"
+ "print(\"\\tdb.removeUser(username)\");\n"
+ "print(\"\\tdb.repairDatabase()\");\n"
+ "print(\"\\tdb.resetError()\");\n"
+ "print(\"\\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }\");\n"
+ "print(\"\\tdb.serverStatus()\");\n"
+ "print(\"\\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all\");\n"
+ "print(\"\\tdb.shutdownServer()\");\n"
+ "print(\"\\tdb.stats()\");\n"
+ "print(\"\\tdb.version() current version of the server\");\n"
+ "print(\"\\tdb.getMongo().setSlaveOk() allow queries on a replication slave server\");\n"
+ "\n"
+ "return __magicNoPrint;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.printCollectionStats = function(){\n"
+ "var mydb = this;\n"
+ "this.getCollectionNames().forEach(\n"
+ "function(z){\n"
+ "print( z );\n"
+ "printjson( mydb.getCollection(z).stats() );\n"
+ "print( \"---\" );\n"
+ "}\n"
+ ");\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* <p> Set profiling level for your db. Profiling gathers stats on query performance. </p>\n"
+ "*\n"
+ "* <p>Default is off, and resets to off on a database restart -- so if you want it on,\n"
+ "* turn it on periodically. </p>\n"
+ "*\n"
+ "* <p>Levels :</p>\n"
+ "* <ul>\n"
+ "* <li>0=off</li>\n"
+ "* <li>1=log very slow operations; optional argument slowms specifies slowness threshold</li>\n"
+ "* <li>2=log all</li>\n"
+ "* @param {String} level Desired level of profiling\n"
+ "* @param {String} slowms For slow logging, query duration that counts as slow (default 100ms)\n"
+ "* @return SOMETHING_FIXME or null on error\n"
+ "*/\n"
+ "DB.prototype.setProfilingLevel = function(level,slowms) {\n"
+ "\n"
+ "if (level < 0 || level > 2) {\n"
+ "throw { dbSetProfilingException : \"input level \" + level + \" is out of range [0..2]\" };\n"
+ "}\n"
+ "\n"
+ "var cmd = { profile: level };\n"
+ "if ( slowms )\n"
+ "cmd[\"slowms\"] = slowms;\n"
+ "return this._dbCommand( cmd );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "* <p> Evaluate a js expression at the database server.</p>\n"
+ "*\n"
+ "* <p>Useful if you need to touch a lot of data lightly; in such a scenario\n"
+ "* the network transfer of the data could be a bottleneck. A good example\n"
+ "* is \"select count(*)\" -- can be done server side via this mechanism.\n"
+ "* </p>\n"
+ "*\n"
+ "* <p>\n"
+ "* If the eval fails, an exception is thrown of the form:\n"
+ "* </p>\n"
+ "* <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg: str] } }</code>\n"
+ "*\n"
+ "* <p>Example: </p>\n"
+ "* <code>print( \"mycount: \" + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();} );</code>\n"
+ "*\n"
+ "* @param {Function} jsfunction Javascript function to run on server. Note this it not a closure, but rather just \"code\".\n"
+ "* @return result of your function, or null if error\n"
+ "*\n"
+ "*/\n"
+ "DB.prototype.eval = function(jsfunction) {\n"
+ "var cmd = { $eval : jsfunction };\n"
+ "if ( arguments.length > 1 ) {\n"
+ "cmd.args = argumentsToArray( arguments ).slice(1);\n"
+ "}\n"
+ "\n"
+ "var res = this._dbCommand( cmd );\n"
+ "\n"
+ "if (!res.ok)\n"
+ "throw tojson( res );\n"
+ "\n"
+ "return res.retval;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.dbEval = DB.prototype.eval;\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "*\n"
+ "* <p>\n"
+ "* Similar to SQL group by. For example: </p>\n"
+ "*\n"
+ "* <code>select a,b,sum(c) csum from coll where active=1 group by a,b</code>\n"
+ "*\n"
+ "* <p>\n"
+ "* corresponds to the following in 10gen:\n"
+ "* </p>\n"
+ "*\n"
+ "* <code>\n"
+ "db.group(\n"
+ "{\n"
+ "ns: \"coll\",\n"
+ "key: { a:true, b:true },\n"
+ "// keyf: ...,\n"
+ "cond: { active:1 },\n"
+ "reduce: function(obj,prev) { prev.csum += obj.c; } ,\n"
+ "initial: { csum: 0 }\n"
+ "});\n"
+ "</code>\n"
+ "*\n"
+ "*\n"
+ "* <p>\n"
+ "* An array of grouped items is returned. The array must fit in RAM, thus this function is not\n"
+ "* suitable when the return set is extremely large.\n"
+ "* </p>\n"
+ "* <p>\n"
+ "* To order the grouped data, simply sort it client side upon return.\n"
+ "* <p>\n"
+ "Defaults\n"
+ "cond may be null if you want to run against all rows in the collection\n"
+ "keyf is a function which takes an object and returns the desired key. set either key or keyf (not both).\n"
+ "* </p>\n"
+ "*/\n"
+ "DB.prototype.groupeval = function(parmsObj) {\n"
+ "\n"
+ "var groupFunction = function() {\n"
+ "var parms = args[0];\n"
+ "var c = db[parms.ns].find(parms.cond||{});\n"
+ "var map = new Map();\n"
+ "var pks = parms.key ? Object.keySet( parms.key ) : null;\n"
+ "var pkl = pks ? pks.length : 0;\n"
+ "var key = {};\n"
+ "\n"
+ "while( c.hasNext() ) {\n"
+ "var obj = c.next();\n"
+ "if ( pks ) {\n"
+ "for( var i=0; i<pkl; i++ ){\n"
+ "var k = pks[i];\n"
+ "key[k] = obj[k];\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "key = parms.$keyf(obj);\n"
+ "}\n"
+ "\n"
+ "var aggObj = map.get(key);\n"
+ "if( aggObj == null ) {\n"
+ "var newObj = Object.extend({}, key); // clone\n"
+ "aggObj = Object.extend(newObj, parms.initial)\n"
+ "map.put( key , aggObj );\n"
+ "}\n"
+ "parms.$reduce(obj, aggObj);\n"
+ "}\n"
+ "\n"
+ "return map.values();\n"
+ "}\n"
+ "\n"
+ "return this.eval(groupFunction, this._groupFixParms( parmsObj ));\n"
+ "}\n"
+ "\n"
+ "DB.prototype.groupcmd = function( parmsObj ){\n"
+ "var ret = this.runCommand( { \"group\" : this._groupFixParms( parmsObj ) } );\n"
+ "if ( ! ret.ok ){\n"
+ "throw \"group command failed: \" + tojson( ret );\n"
+ "}\n"
+ "return ret.retval;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.group = DB.prototype.groupcmd;\n"
+ "\n"
+ "DB.prototype._groupFixParms = function( parmsObj ){\n"
+ "var parms = Object.extend({}, parmsObj);\n"
+ "\n"
+ "if( parms.reduce ) {\n"
+ "parms.$reduce = parms.reduce; // must have $ to pass to db\n"
+ "delete parms.reduce;\n"
+ "}\n"
+ "\n"
+ "if( parms.keyf ) {\n"
+ "parms.$keyf = parms.keyf;\n"
+ "delete parms.keyf;\n"
+ "}\n"
+ "\n"
+ "return parms;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.resetError = function(){\n"
+ "return this.runCommand( { reseterror : 1 } );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.forceError = function(){\n"
+ "return this.runCommand( { forceerror : 1 } );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getLastError = function( w , wtimeout ){\n"
+ "var res = this.getLastErrorObj( w , wtimeout );\n"
+ "if ( ! res.ok )\n"
+ "throw \"getlasterror failed: \" + tojson( res );\n"
+ "return res.err;\n"
+ "}\n"
+ "DB.prototype.getLastErrorObj = function( w , wtimeout ){\n"
+ "var cmd = { getlasterror : 1 };\n"
+ "if ( w ){\n"
+ "cmd.w = w;\n"
+ "if ( wtimeout )\n"
+ "cmd.wtimeout = wtimeout;\n"
+ "}\n"
+ "var res = this.runCommand( cmd );\n"
+ "\n"
+ "if ( ! res.ok )\n"
+ "throw \"getlasterror failed: \" + tojson( res );\n"
+ "return res;\n"
+ "}\n"
+ "DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;\n"
+ "\n"
+ "\n"
+ "/* Return the last error which has occurred, even if not the very last error.\n"
+ "\n"
+ "Returns:\n"
+ "{ err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }\n"
+ "\n"
+ "result.err will be null if no error has occurred.\n"
+ "*/\n"
+ "DB.prototype.getPrevError = function(){\n"
+ "return this.runCommand( { getpreverror : 1 } );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.getCollectionNames = function(){\n"
+ "var all = [];\n"
+ "\n"
+ "var nsLength = this._name.length + 1;\n"
+ "\n"
+ "var c = this.getCollection( \"system.namespaces\" ).find();\n"
+ "while ( c.hasNext() ){\n"
+ "var name = c.next().name;\n"
+ "\n"
+ "if ( name.indexOf( \"$\" ) >= 0 && name.indexOf( \".oplog.$\" ) < 0 )\n"
+ "continue;\n"
+ "\n"
+ "all.push( name.substring( nsLength ) );\n"
+ "}\n"
+ "\n"
+ "return all.sort();\n"
+ "}\n"
+ "\n"
+ "DB.prototype.tojson = function(){\n"
+ "return this._name;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.toString = function(){\n"
+ "return this._name;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.isMaster = function () { return this.runCommand(\"isMaster\"); }\n"
+ "\n"
+ "DB.prototype.currentOp = function(){\n"
+ "return db.$cmd.sys.inprog.findOne();\n"
+ "}\n"
+ "DB.prototype.currentOP = DB.prototype.currentOp;\n"
+ "\n"
+ "DB.prototype.killOp = function(op) {\n"
+ "if( !op )\n"
+ "throw \"no opNum to kill specified\";\n"
+ "return db.$cmd.sys.killop.findOne({'op':op});\n"
+ "}\n"
+ "DB.prototype.killOP = DB.prototype.killOp;\n"
+ "\n"
+ "DB.tsToSeconds = function(x){\n"
+ "if ( x.t && x.i )\n"
+ "return x.t / 1000;\n"
+ "return x / 4294967296; // low 32 bits are ordinal #s within a second\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "Get a replication log information summary.\n"
+ "<p>\n"
+ "This command is for the database/cloud administer and not applicable to most databases.\n"
+ "It is only used with the local database. One might invoke from the JS shell:\n"
+ "<pre>\n"
+ "use local\n"
+ "db.getReplicationInfo();\n"
+ "</pre>\n"
+ "It is assumed that this database is a replication master -- the information returned is\n"
+ "about the operation log stored at local.oplog.$main on the replication master. (It also\n"
+ "works on a machine in a replica pair: for replica pairs, both machines are \"masters\" from\n"
+ "an internal database perspective.\n"
+ "<p>\n"
+ "* @return Object timeSpan: time span of the oplog from start to end if slave is more out\n"
+ "* of date than that, it can't recover without a complete resync\n"
+ "*/\n"
+ "DB.prototype.getReplicationInfo = function() {\n"
+ "var db = this.getSiblingDB(\"local\");\n"
+ "\n"
+ "var result = { };\n"
+ "var oplog;\n"
+ "if (db.system.namespaces.findOne({name:\"local.oplog.rs\"}) != null) {\n"
+ "oplog = 'oplog.rs';\n"
+ "}\n"
+ "else if (db.system.namespaces.findOne({name:\"local.oplog.$main\"}) != null) {\n"
+ "oplog = 'oplog.$main';\n"
+ "}\n"
+ "else {\n"
+ "result.errmsg = \"neither master/slave nor replica set replication detected\";\n"
+ "return result;\n"
+ "}\n"
+ "\n"
+ "var ol_entry = db.system.namespaces.findOne({name:\"local.\"+oplog});\n"
+ "if( ol_entry && ol_entry.options ) {\n"
+ "result.logSizeMB = ol_entry.options.size / ( 1024 * 1024 );\n"
+ "} else {\n"
+ "result.errmsg = \"local.\"+oplog+\", or its options, not found in system.namespaces collection\";\n"
+ "return result;\n"
+ "}\n"
+ "ol = db.getCollection(oplog);\n"
+ "\n"
+ "result.usedMB = ol.stats().size / ( 1024 * 1024 );\n"
+ "result.usedMB = Math.ceil( result.usedMB * 100 ) / 100;\n"
+ "\n"
+ "var firstc = ol.find().sort({$natural:1}).limit(1);\n"
+ "var lastc = ol.find().sort({$natural:-1}).limit(1);\n"
+ "if( !firstc.hasNext() || !lastc.hasNext() ) {\n"
+ "result.errmsg = \"objects not found in local.oplog.$main -- is this a new and empty db instance?\";\n"
+ "result.oplogMainRowCount = ol.count();\n"
+ "return result;\n"
+ "}\n"
+ "\n"
+ "var first = firstc.next();\n"
+ "var last = lastc.next();\n"
+ "{\n"
+ "var tfirst = first.ts;\n"
+ "var tlast = last.ts;\n"
+ "\n"
+ "if( tfirst && tlast ) {\n"
+ "tfirst = DB.tsToSeconds( tfirst );\n"
+ "tlast = DB.tsToSeconds( tlast );\n"
+ "result.timeDiff = tlast - tfirst;\n"
+ "result.timeDiffHours = Math.round(result.timeDiff / 36)/100;\n"
+ "result.tFirst = (new Date(tfirst*1000)).toString();\n"
+ "result.tLast = (new Date(tlast*1000)).toString();\n"
+ "result.now = Date();\n"
+ "}\n"
+ "else {\n"
+ "result.errmsg = \"ts element not found in oplog objects\";\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "return result;\n"
+ "};\n"
+ "\n"
+ "DB.prototype.printReplicationInfo = function() {\n"
+ "var result = this.getReplicationInfo();\n"
+ "if( result.errmsg ) {\n"
+ "print(tojson(result));\n"
+ "return;\n"
+ "}\n"
+ "print(\"configured oplog size: \" + result.logSizeMB + \"MB\");\n"
+ "print(\"log length start to end: \" + result.timeDiff + \"secs (\" + result.timeDiffHours + \"hrs)\");\n"
+ "print(\"oplog first event time: \" + result.tFirst);\n"
+ "print(\"oplog last event time: \" + result.tLast);\n"
+ "print(\"now: \" + result.now);\n"
+ "}\n"
+ "\n"
+ "DB.prototype.printSlaveReplicationInfo = function() {\n"
+ "function getReplLag(st) {\n"
+ "var now = new Date();\n"
+ "print(\"\\t syncedTo: \" + st.toString() );\n"
+ "var ago = (now-st)/1000;\n"
+ "var hrs = Math.round(ago/36)/100;\n"
+ "print(\"\\t\\t = \" + Math.round(ago) + \"secs ago (\" + hrs + \"hrs)\");\n"
+ "};\n"
+ "\n"
+ "function g(x) {\n"
+ "assert( x , \"how could this be null (printSlaveReplicationInfo gx)\" )\n"
+ "print(\"source: \" + x.host);\n"
+ "if ( x.syncedTo ){\n"
+ "var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );\n"
+ "getReplLag(st);\n"
+ "}\n"
+ "else {\n"
+ "print( \"\\t doing initial sync\" );\n"
+ "}\n"
+ "};\n"
+ "\n"
+ "function r(x) {\n"
+ "assert( x , \"how could this be null (printSlaveReplicationInfo rx)\" );\n"
+ "if ( x.state == 1 ) {\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "print(\"source: \" + x.name);\n"
+ "if ( x.optime ) {\n"
+ "getReplLag(x.optimeDate);\n"
+ "}\n"
+ "else {\n"
+ "print( \"\\t no replication info, yet. State: \" + x.stateStr );\n"
+ "}\n"
+ "};\n"
+ "\n"
+ "var L = this.getSiblingDB(\"local\");\n"
+ "if( L.sources.count() != 0 ) {\n"
+ "L.sources.find().forEach(g);\n"
+ "}\n"
+ "else if (L.system.replset.count() != 0) {\n"
+ "var status = this.adminCommand({'replSetGetStatus' : 1});\n"
+ "status.members.forEach(r);\n"
+ "}\n"
+ "else {\n"
+ "print(\"local.sources is empty; is this db a --slave?\");\n"
+ "return;\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DB.prototype.serverBuildInfo = function(){\n"
+ "return this._adminCommand( \"buildinfo\" );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.serverStatus = function(){\n"
+ "return this._adminCommand( \"serverStatus\" );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.serverCmdLineOpts = function(){\n"
+ "return this._adminCommand( \"getCmdLineOpts\" );\n"
+ "}\n"
+ "\n"
+ "DB.prototype.version = function(){\n"
+ "return this.serverBuildInfo().version;\n"
+ "}\n"
+ "\n"
+ "DB.prototype.listCommands = function(){\n"
+ "var x = this.runCommand( \"listCommands\" );\n"
+ "for ( var name in x.commands ){\n"
+ "var c = x.commands[name];\n"
+ "\n"
+ "var s = name + \": \";\n"
+ "\n"
+ "switch ( c.lockType ){\n"
+ "case -1: s += \"read-lock\"; break;\n"
+ "case 0: s += \"no-lock\"; break;\n"
+ "case 1: s += \"write-lock\"; break;\n"
+ "default: s += c.lockType;\n"
+ "}\n"
+ "\n"
+ "if (c.adminOnly) s += \" adminOnly \";\n"
+ "if (c.adminOnly) s += \" slaveOk \";\n"
+ "\n"
+ "s += \"\\n \";\n"
+ "s += c.help.replace(/\\n/g, '\\n ');\n"
+ "s += \"\\n\";\n"
+ "\n"
+ "print( s );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DB.prototype.printShardingStatus = function(){\n"
+ "printShardingStatus( this.getSiblingDB( \"config\" ) );\n"
+ "}\n"
+ "\n"
+ "DB.autocomplete = function(obj){\n"
+ "var colls = obj.getCollectionNames();\n"
+ "var ret=[];\n"
+ "for (var i=0; i<colls.length; i++){\n"
+ "if (colls[i].match(/^[a-zA-Z0-9_.\\$]+$/))\n"
+ "ret.push(colls[i]);\n"
+ "}\n"
+ "return ret;\n"
+ "}\n"
+ ;
+ extern const JSFile db;
+ const JSFile db = { "shell/db.js" , _jscode_raw_db };
+ const StringData _jscode_raw_mongo =
+ "// mongo.js\n"
+ "\n"
+ "// NOTE 'Mongo' may be defined here or in MongoJS.cpp. Add code to init, not to this constructor.\n"
+ "if ( typeof Mongo == \"undefined\" ){\n"
+ "Mongo = function( host ){\n"
+ "this.init( host );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "if ( ! Mongo.prototype ){\n"
+ "throw \"Mongo.prototype not defined\";\n"
+ "}\n"
+ "\n"
+ "if ( ! Mongo.prototype.find )\n"
+ "Mongo.prototype.find = function( ns , query , fields , limit , skip ){ throw \"find not implemented\"; }\n"
+ "if ( ! Mongo.prototype.insert )\n"
+ "Mongo.prototype.insert = function( ns , obj ){ throw \"insert not implemented\"; }\n"
+ "if ( ! Mongo.prototype.remove )\n"
+ "Mongo.prototype.remove = function( ns , pattern ){ throw \"remove not implemented;\" }\n"
+ "if ( ! Mongo.prototype.update )\n"
+ "Mongo.prototype.update = function( ns , query , obj , upsert ){ throw \"update not implemented;\" }\n"
+ "\n"
+ "if ( typeof mongoInject == \"function\" ){\n"
+ "mongoInject( Mongo.prototype );\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.setSlaveOk = function() {\n"
+ "this.slaveOk = true;\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.getDB = function( name ){\n"
+ "return new DB( this , name );\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.getDBs = function(){\n"
+ "var res = this.getDB( \"admin\" ).runCommand( { \"listDatabases\" : 1 } );\n"
+ "if ( ! res.ok )\n"
+ "throw \"listDatabases failed:\" + tojson( res );\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.adminCommand = function( cmd ){\n"
+ "return this.getDB( \"admin\" ).runCommand( cmd );\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.getDBNames = function(){\n"
+ "return this.getDBs().databases.map(\n"
+ "function(z){\n"
+ "return z.name;\n"
+ "}\n"
+ ");\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.getCollection = function(ns){\n"
+ "var idx = ns.indexOf( \".\" );\n"
+ "if ( idx < 0 )\n"
+ "throw \"need . in ns\";\n"
+ "var db = ns.substring( 0 , idx );\n"
+ "var c = ns.substring( idx + 1 );\n"
+ "return this.getDB( db ).getCollection( c );\n"
+ "}\n"
+ "\n"
+ "Mongo.prototype.toString = function(){\n"
+ "return \"connection to \" + this.host;\n"
+ "}\n"
+ "Mongo.prototype.tojson = Mongo.prototype.toString;\n"
+ "\n"
+ "connect = function( url , user , pass ){\n"
+ "chatty( \"connecting to: \" + url )\n"
+ "\n"
+ "if ( user && ! pass )\n"
+ "throw \"you specified a user and not a password. either you need a password, or you're using the old connect api\";\n"
+ "\n"
+ "var idx = url.lastIndexOf( \"/\" );\n"
+ "\n"
+ "var db;\n"
+ "\n"
+ "if ( idx < 0 )\n"
+ "db = new Mongo().getDB( url );\n"
+ "else\n"
+ "db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );\n"
+ "\n"
+ "if ( user && pass ){\n"
+ "if ( ! db.auth( user , pass ) ){\n"
+ "throw \"couldn't login\";\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "return db;\n"
+ "}\n"
+ ;
+ extern const JSFile mongo;
+ const JSFile mongo = { "shell/mongo.js" , _jscode_raw_mongo };
+ const StringData _jscode_raw_mr =
+ "// mr.js\n"
+ "\n"
+ "MR = {};\n"
+ "\n"
+ "MR.init = function(){\n"
+ "$max = 0;\n"
+ "$arr = [];\n"
+ "emit = MR.emit;\n"
+ "$numEmits = 0;\n"
+ "$numReduces = 0;\n"
+ "$numReducesToDB = 0;\n"
+ "gc(); // this is just so that keep memory size sane\n"
+ "}\n"
+ "\n"
+ "MR.cleanup = function(){\n"
+ "MR.init();\n"
+ "gc();\n"
+ "}\n"
+ "\n"
+ "MR.emit = function(k,v){\n"
+ "$numEmits++;\n"
+ "var num = nativeHelper.apply( get_num_ , [ k ] );\n"
+ "var data = $arr[num];\n"
+ "if ( ! data ){\n"
+ "data = { key : k , values : new Array(1000) , count : 0 };\n"
+ "$arr[num] = data;\n"
+ "}\n"
+ "data.values[data.count++] = v;\n"
+ "$max = Math.max( $max , data.count );\n"
+ "}\n"
+ "\n"
+ "MR.doReduce = function( useDB ){\n"
+ "$numReduces++;\n"
+ "if ( useDB )\n"
+ "$numReducesToDB++;\n"
+ "$max = 0;\n"
+ "for ( var i=0; i<$arr.length; i++){\n"
+ "var data = $arr[i];\n"
+ "if ( ! data )\n"
+ "continue;\n"
+ "\n"
+ "if ( useDB ){\n"
+ "var x = tempcoll.findOne( { _id : data.key } );\n"
+ "if ( x ){\n"
+ "data.values[data.count++] = x.value;\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "var r = $reduce( data.key , data.values.slice( 0 , data.count ) );\n"
+ "if ( r && r.length && r[0] ){\n"
+ "data.values = r;\n"
+ "data.count = r.length;\n"
+ "}\n"
+ "else{\n"
+ "data.values[0] = r;\n"
+ "data.count = 1;\n"
+ "}\n"
+ "\n"
+ "$max = Math.max( $max , data.count );\n"
+ "\n"
+ "if ( useDB ){\n"
+ "if ( data.count == 1 ){\n"
+ "tempcoll.save( { _id : data.key , value : data.values[0] } );\n"
+ "}\n"
+ "else {\n"
+ "tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "MR.check = function(){\n"
+ "if ( $max < 2000 && $arr.length < 1000 ){\n"
+ "return 0;\n"
+ "}\n"
+ "MR.doReduce();\n"
+ "if ( $max < 2000 && $arr.length < 1000 ){\n"
+ "return 1;\n"
+ "}\n"
+ "MR.doReduce( true );\n"
+ "$arr = [];\n"
+ "$max = 0;\n"
+ "reset_num();\n"
+ "gc();\n"
+ "return 2;\n"
+ "}\n"
+ "\n"
+ "MR.finalize = function(){\n"
+ "tempcoll.find().forEach(\n"
+ "function(z){\n"
+ "z.value = $finalize( z._id , z.value );\n"
+ "tempcoll.save( z );\n"
+ "}\n"
+ ");\n"
+ "}\n"
+ ;
+ extern const JSFile mr;
+ const JSFile mr = { "shell/mr.js" , _jscode_raw_mr };
+ const StringData _jscode_raw_query =
+ "// query.js\n"
+ "\n"
+ "if ( typeof DBQuery == \"undefined\" ){\n"
+ "DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize ){\n"
+ "\n"
+ "this._mongo = mongo; // 0\n"
+ "this._db = db; // 1\n"
+ "this._collection = collection; // 2\n"
+ "this._ns = ns; // 3\n"
+ "\n"
+ "this._query = query || {}; // 4\n"
+ "this._fields = fields; // 5\n"
+ "this._limit = limit || 0; // 6\n"
+ "this._skip = skip || 0; // 7\n"
+ "this._batchSize = batchSize || 0;\n"
+ "\n"
+ "this._cursor = null;\n"
+ "this._numReturned = 0;\n"
+ "this._special = false;\n"
+ "this._prettyShell = false;\n"
+ "}\n"
+ "print( \"DBQuery probably won't have array access \" );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.help = function () {\n"
+ "print(\"find() modifiers\")\n"
+ "print(\"\\t.sort( {...} )\")\n"
+ "print(\"\\t.limit( n )\")\n"
+ "print(\"\\t.skip( n )\")\n"
+ "print(\"\\t.count() - total # of objects matching query, ignores skip,limit\")\n"
+ "print(\"\\t.size() - total # of objects cursor would return, honors skip,limit\")\n"
+ "print(\"\\t.explain([verbose])\")\n"
+ "print(\"\\t.hint(...)\")\n"
+ "print(\"\\t.showDiskLoc() - adds a $diskLoc field to each returned object\")\n"
+ "print(\"\\nCursor methods\");\n"
+ "print(\"\\t.forEach( func )\")\n"
+ "print(\"\\t.print() - output to console in full pretty format\")\n"
+ "print(\"\\t.map( func )\")\n"
+ "print(\"\\t.hasNext()\")\n"
+ "print(\"\\t.next()\")\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.clone = function(){\n"
+ "var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,\n"
+ "this._query , this._fields ,\n"
+ "this._limit , this._skip , this._batchSize );\n"
+ "q._special = this._special;\n"
+ "return q;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype._ensureSpecial = function(){\n"
+ "if ( this._special )\n"
+ "return;\n"
+ "\n"
+ "var n = { query : this._query };\n"
+ "this._query = n;\n"
+ "this._special = true;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype._checkModify = function(){\n"
+ "if ( this._cursor )\n"
+ "throw \"query already executed\";\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype._exec = function(){\n"
+ "if ( ! this._cursor ){\n"
+ "assert.eq( 0 , this._numReturned );\n"
+ "this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize );\n"
+ "this._cursorSeen = 0;\n"
+ "}\n"
+ "return this._cursor;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.limit = function( limit ){\n"
+ "this._checkModify();\n"
+ "this._limit = limit;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.batchSize = function( batchSize ){\n"
+ "this._checkModify();\n"
+ "this._batchSize = batchSize;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBQuery.prototype.skip = function( skip ){\n"
+ "this._checkModify();\n"
+ "this._skip = skip;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.hasNext = function(){\n"
+ "this._exec();\n"
+ "\n"
+ "if ( this._limit > 0 && this._cursorSeen >= this._limit )\n"
+ "return false;\n"
+ "var o = this._cursor.hasNext();\n"
+ "return o;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.next = function(){\n"
+ "this._exec();\n"
+ "\n"
+ "var o = this._cursor.hasNext();\n"
+ "if ( o )\n"
+ "this._cursorSeen++;\n"
+ "else\n"
+ "throw \"error hasNext: \" + o;\n"
+ "\n"
+ "var ret = this._cursor.next();\n"
+ "if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )\n"
+ "throw \"error: \" + tojson( ret );\n"
+ "\n"
+ "this._numReturned++;\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.objsLeftInBatch = function(){\n"
+ "this._exec();\n"
+ "\n"
+ "var ret = this._cursor.objsLeftInBatch();\n"
+ "if ( ret.$err )\n"
+ "throw \"error: \" + tojson( ret );\n"
+ "\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.toArray = function(){\n"
+ "if ( this._arr )\n"
+ "return this._arr;\n"
+ "\n"
+ "var a = [];\n"
+ "while ( this.hasNext() )\n"
+ "a.push( this.next() );\n"
+ "this._arr = a;\n"
+ "return a;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.count = function( applySkipLimit ){\n"
+ "var cmd = { count: this._collection.getName() };\n"
+ "if ( this._query ){\n"
+ "if ( this._special )\n"
+ "cmd.query = this._query.query;\n"
+ "else\n"
+ "cmd.query = this._query;\n"
+ "}\n"
+ "cmd.fields = this._fields || {};\n"
+ "\n"
+ "if ( applySkipLimit ){\n"
+ "if ( this._limit )\n"
+ "cmd.limit = this._limit;\n"
+ "if ( this._skip )\n"
+ "cmd.skip = this._skip;\n"
+ "}\n"
+ "\n"
+ "var res = this._db.runCommand( cmd );\n"
+ "if( res && res.n != null ) return res.n;\n"
+ "throw \"count failed: \" + tojson( res );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.size = function(){\n"
+ "return this.count( true );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.countReturn = function(){\n"
+ "var c = this.count();\n"
+ "\n"
+ "if ( this._skip )\n"
+ "c = c - this._skip;\n"
+ "\n"
+ "if ( this._limit > 0 && this._limit < c )\n"
+ "return this._limit;\n"
+ "\n"
+ "return c;\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* iterative count - only for testing\n"
+ "*/\n"
+ "DBQuery.prototype.itcount = function(){\n"
+ "var num = 0;\n"
+ "while ( this.hasNext() ){\n"
+ "num++;\n"
+ "this.next();\n"
+ "}\n"
+ "return num;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.length = function(){\n"
+ "return this.toArray().length;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype._addSpecial = function( name , value ){\n"
+ "this._ensureSpecial();\n"
+ "this._query[name] = value;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.sort = function( sortBy ){\n"
+ "return this._addSpecial( \"orderby\" , sortBy );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.hint = function( hint ){\n"
+ "return this._addSpecial( \"$hint\" , hint );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.min = function( min ) {\n"
+ "return this._addSpecial( \"$min\" , min );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.max = function( max ) {\n"
+ "return this._addSpecial( \"$max\" , max );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.showDiskLoc = function() {\n"
+ "return this._addSpecial( \"$showDiskLoc\" , true);\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.forEach = function( func ){\n"
+ "while ( this.hasNext() )\n"
+ "func( this.next() );\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.map = function( func ){\n"
+ "var a = [];\n"
+ "while ( this.hasNext() )\n"
+ "a.push( func( this.next() ) );\n"
+ "return a;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.arrayAccess = function( idx ){\n"
+ "return this.toArray()[idx];\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.explain = function (verbose) {\n"
+ "/* verbose=true --> include allPlans, oldPlan fields */\n"
+ "var n = this.clone();\n"
+ "n._ensureSpecial();\n"
+ "n._query.$explain = true;\n"
+ "n._limit = Math.abs(n._limit) * -1;\n"
+ "var e = n.next();\n"
+ "\n"
+ "function cleanup(obj){\n"
+ "if (typeof(obj) != 'object'){\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "delete obj.allPlans;\n"
+ "delete obj.oldPlan;\n"
+ "\n"
+ "if (typeof(obj.length) == 'number'){\n"
+ "for (var i=0; i < obj.length; i++){\n"
+ "cleanup(obj[i]);\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "if (obj.shards){\n"
+ "for (var key in obj.shards){\n"
+ "cleanup(obj.shards[key]);\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "if (obj.clauses){\n"
+ "cleanup(obj.clauses);\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "if (!verbose)\n"
+ "cleanup(e);\n"
+ "\n"
+ "return e;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.snapshot = function(){\n"
+ "this._ensureSpecial();\n"
+ "this._query.$snapshot = true;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.pretty = function(){\n"
+ "this._prettyShell = true;\n"
+ "return this;\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.shellPrint = function(){\n"
+ "try {\n"
+ "var n = 0;\n"
+ "while ( this.hasNext() && n < DBQuery.shellBatchSize ){\n"
+ "var s = this._prettyShell ? tojson( this.next() ) : tojson( this.next() , \"\" , true );\n"
+ "print( s );\n"
+ "n++;\n"
+ "}\n"
+ "if ( this.hasNext() ){\n"
+ "print( \"has more\" );\n"
+ "___it___ = this;\n"
+ "}\n"
+ "else {\n"
+ "___it___ = null;\n"
+ "}\n"
+ "}\n"
+ "catch ( e ){\n"
+ "print( e );\n"
+ "}\n"
+ "\n"
+ "}\n"
+ "\n"
+ "DBQuery.prototype.toString = function(){\n"
+ "return \"DBQuery: \" + this._ns + \" -> \" + tojson( this.query );\n"
+ "}\n"
+ "\n"
+ "DBQuery.shellBatchSize = 20;\n"
+ ;
+ extern const JSFile query;
+ const JSFile query = { "shell/query.js" , _jscode_raw_query };
+ const StringData _jscode_raw_collection =
+ "// @file collection.js - DBCollection support in the mongo shell\n"
+ "// db.colName is a DBCollection object\n"
+ "// or db[\"colName\"]\n"
+ "\n"
+ "if ( ( typeof DBCollection ) == \"undefined\" ){\n"
+ "DBCollection = function( mongo , db , shortName , fullName ){\n"
+ "this._mongo = mongo;\n"
+ "this._db = db;\n"
+ "this._shortName = shortName;\n"
+ "this._fullName = fullName;\n"
+ "\n"
+ "this.verify();\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.verify = function(){\n"
+ "assert( this._fullName , \"no fullName\" );\n"
+ "assert( this._shortName , \"no shortName\" );\n"
+ "assert( this._db , \"no db\" );\n"
+ "\n"
+ "assert.eq( this._fullName , this._db._name + \".\" + this._shortName , \"name mismatch\" );\n"
+ "\n"
+ "assert( this._mongo , \"no mongo in DBCollection\" );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getName = function(){\n"
+ "return this._shortName;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.help = function () {\n"
+ "var shortName = this.getName();\n"
+ "print(\"DBCollection help\");\n"
+ "print(\"\\tdb.\" + shortName + \".find().help() - show DBCursor help\");\n"
+ "print(\"\\tdb.\" + shortName + \".count()\");\n"
+ "print(\"\\tdb.\" + shortName + \".dataSize()\");\n"
+ "print(\"\\tdb.\" + shortName + \".distinct( key ) - eg. db.\" + shortName + \".distinct( 'x' )\");\n"
+ "print(\"\\tdb.\" + shortName + \".drop() drop the collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".dropIndex(name)\");\n"
+ "print(\"\\tdb.\" + shortName + \".dropIndexes()\");\n"
+ "print(\"\\tdb.\" + shortName + \".ensureIndex(keypattern[,options]) - options is an object with these possible fields: name, unique, dropDups\");\n"
+ "print(\"\\tdb.\" + shortName + \".reIndex()\");\n"
+ "print(\"\\tdb.\" + shortName + \".find([query],[fields]) - query is an optional query filter. fields is optional set of fields to return.\");\n"
+ "print(\"\\t e.g. db.\" + shortName + \".find( {x:77} , {name:1, x:1} )\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).count()\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).limit(n)\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).skip(n)\");\n"
+ "print(\"\\tdb.\" + shortName + \".find(...).sort(...)\");\n"
+ "print(\"\\tdb.\" + shortName + \".findOne([query])\");\n"
+ "print(\"\\tdb.\" + shortName + \".findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )\");\n"
+ "print(\"\\tdb.\" + shortName + \".getDB() get DB object associated with collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".getIndexes()\");\n"
+ "print(\"\\tdb.\" + shortName + \".group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )\");\n"
+ "print(\"\\tdb.\" + shortName + \".mapReduce( mapFunction , reduceFunction , <optional params> )\");\n"
+ "print(\"\\tdb.\" + shortName + \".remove(query)\");\n"
+ "print(\"\\tdb.\" + shortName + \".renameCollection( newName , <dropTarget> ) renames the collection.\");\n"
+ "print(\"\\tdb.\" + shortName + \".runCommand( name , <options> ) runs a db command with the given name where the first param is the collection name\");\n"
+ "print(\"\\tdb.\" + shortName + \".save(obj)\");\n"
+ "print(\"\\tdb.\" + shortName + \".stats()\");\n"
+ "print(\"\\tdb.\" + shortName + \".storageSize() - includes free space allocated to this collection\");\n"
+ "print(\"\\tdb.\" + shortName + \".totalIndexSize() - size in bytes of all the indexes\");\n"
+ "print(\"\\tdb.\" + shortName + \".totalSize() - storage allocated for all data and indexes\");\n"
+ "print(\"\\tdb.\" + shortName + \".update(query, object[, upsert_bool, multi_bool])\");\n"
+ "print(\"\\tdb.\" + shortName + \".validate() - SLOW\");\n"
+ "print(\"\\tdb.\" + shortName + \".getShardVersion() - only for use with sharding\");\n"
+ "return __magicNoPrint;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getFullName = function(){\n"
+ "return this._fullName;\n"
+ "}\n"
+ "DBCollection.prototype.getMongo = function(){\n"
+ "return this._db.getMongo();\n"
+ "}\n"
+ "DBCollection.prototype.getDB = function(){\n"
+ "return this._db;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype._dbCommand = function( cmd , params ){\n"
+ "if ( typeof( cmd ) == \"object\" )\n"
+ "return this._db._dbCommand( cmd );\n"
+ "\n"
+ "var c = {};\n"
+ "c[cmd] = this.getName();\n"
+ "if ( params )\n"
+ "Object.extend( c , params );\n"
+ "return this._db._dbCommand( c );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;\n"
+ "\n"
+ "DBCollection.prototype._massageObject = function( q ){\n"
+ "if ( ! q )\n"
+ "return {};\n"
+ "\n"
+ "var type = typeof q;\n"
+ "\n"
+ "if ( type == \"function\" )\n"
+ "return { $where : q };\n"
+ "\n"
+ "if ( q.isObjectId )\n"
+ "return { _id : q };\n"
+ "\n"
+ "if ( type == \"object\" )\n"
+ "return q;\n"
+ "\n"
+ "if ( type == \"string\" ){\n"
+ "if ( q.length == 24 )\n"
+ "return { _id : q };\n"
+ "\n"
+ "return { $where : q };\n"
+ "}\n"
+ "\n"
+ "throw \"don't know how to massage : \" + type;\n"
+ "\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype._validateObject = function( o ){\n"
+ "if ( o._ensureSpecial && o._checkModify )\n"
+ "throw \"can't save a DBQuery object\";\n"
+ "}\n"
+ "\n"
+ "DBCollection._allowedFields = { $id : 1 , $ref : 1 };\n"
+ "\n"
+ "DBCollection.prototype._validateForStorage = function( o ){\n"
+ "this._validateObject( o );\n"
+ "for ( var k in o ){\n"
+ "if ( k.indexOf( \".\" ) >= 0 ) {\n"
+ "throw \"can't have . in field names [\" + k + \"]\" ;\n"
+ "}\n"
+ "\n"
+ "if ( k.indexOf( \"$\" ) == 0 && ! DBCollection._allowedFields[k] ) {\n"
+ "throw \"field names cannot start with $ [\" + k + \"]\";\n"
+ "}\n"
+ "\n"
+ "if ( o[k] !== null && typeof( o[k] ) === \"object\" ) {\n"
+ "this._validateForStorage( o[k] );\n"
+ "}\n"
+ "}\n"
+ "};\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.find = function( query , fields , limit , skip ){\n"
+ "return new DBQuery( this._mongo , this._db , this ,\n"
+ "this._fullName , this._massageObject( query ) , fields , limit , skip );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.findOne = function( query , fields ){\n"
+ "var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields , -1 , 0 , 0 );\n"
+ "if ( ! cursor.hasNext() )\n"
+ "return null;\n"
+ "var ret = cursor.next();\n"
+ "if ( cursor.hasNext() ) throw \"findOne has more than 1 result!\";\n"
+ "if ( ret.$err )\n"
+ "throw \"error \" + tojson( ret );\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.insert = function( obj , _allow_dot ){\n"
+ "if ( ! obj )\n"
+ "throw \"no object passed to insert!\";\n"
+ "if ( ! _allow_dot ) {\n"
+ "this._validateForStorage( obj );\n"
+ "}\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "var tmp = obj; // don't want to modify input\n"
+ "obj = {_id: new ObjectId()};\n"
+ "for (var key in tmp){\n"
+ "obj[key] = tmp[key];\n"
+ "}\n"
+ "}\n"
+ "this._mongo.insert( this._fullName , obj );\n"
+ "this._lastID = obj._id;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.remove = function( t , justOne ){\n"
+ "this._mongo.remove( this._fullName , this._massageObject( t ) , justOne ? true : false );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.update = function( query , obj , upsert , multi ){\n"
+ "assert( query , \"need a query\" );\n"
+ "assert( obj , \"need an object\" );\n"
+ "\n"
+ "var firstKey = null;\n"
+ "for (var k in obj) { firstKey = k; break; }\n"
+ "\n"
+ "if (firstKey != null && firstKey[0] == '$') {\n"
+ "// for mods we only validate partially, for example keys may have dots\n"
+ "this._validateObject( obj );\n"
+ "} else {\n"
+ "// we're basically inserting a brand new object, do full validation\n"
+ "this._validateForStorage( obj );\n"
+ "}\n"
+ "this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.save = function( obj ){\n"
+ "if ( obj == null || typeof( obj ) == \"undefined\" )\n"
+ "throw \"can't save a null\";\n"
+ "\n"
+ "if ( typeof( obj._id ) == \"undefined\" ){\n"
+ "obj._id = new ObjectId();\n"
+ "return this.insert( obj );\n"
+ "}\n"
+ "else {\n"
+ "return this.update( { _id : obj._id } , obj , true );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype._genIndexName = function( keys ){\n"
+ "var name = \"\";\n"
+ "for ( var k in keys ){\n"
+ "var v = keys[k];\n"
+ "if ( typeof v == \"function\" )\n"
+ "continue;\n"
+ "\n"
+ "if ( name.length > 0 )\n"
+ "name += \"_\";\n"
+ "name += k + \"_\";\n"
+ "\n"
+ "if ( typeof v == \"number\" )\n"
+ "name += v;\n"
+ "}\n"
+ "return name;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype._indexSpec = function( keys, options ) {\n"
+ "var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };\n"
+ "\n"
+ "if ( ! options ){\n"
+ "}\n"
+ "else if ( typeof ( options ) == \"string\" )\n"
+ "ret.name = options;\n"
+ "else if ( typeof ( options ) == \"boolean\" )\n"
+ "ret.unique = true;\n"
+ "else if ( typeof ( options ) == \"object\" ){\n"
+ "if ( options.length ){\n"
+ "var nb = 0;\n"
+ "for ( var i=0; i<options.length; i++ ){\n"
+ "if ( typeof ( options[i] ) == \"string\" )\n"
+ "ret.name = options[i];\n"
+ "else if ( typeof( options[i] ) == \"boolean\" ){\n"
+ "if ( options[i] ){\n"
+ "if ( nb == 0 )\n"
+ "ret.unique = true;\n"
+ "if ( nb == 1 )\n"
+ "ret.dropDups = true;\n"
+ "}\n"
+ "nb++;\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "Object.extend( ret , options );\n"
+ "}\n"
+ "}\n"
+ "else {\n"
+ "throw \"can't handle: \" + typeof( options );\n"
+ "}\n"
+ "/*\n"
+ "return ret;\n"
+ "\n"
+ "var name;\n"
+ "var nTrue = 0;\n"
+ "\n"
+ "if ( ! isObject( options ) ) {\n"
+ "options = [ options ];\n"
+ "}\n"
+ "\n"
+ "if ( options.length ){\n"
+ "for( var i = 0; i < options.length; ++i ) {\n"
+ "var o = options[ i ];\n"
+ "if ( isString( o ) ) {\n"
+ "ret.name = o;\n"
+ "} else if ( typeof( o ) == \"boolean\" ) {\n"
+ "if ( o ) {\n"
+ "++nTrue;\n"
+ "}\n"
+ "}\n"
+ "}\n"
+ "if ( nTrue > 0 ) {\n"
+ "ret.unique = true;\n"
+ "}\n"
+ "if ( nTrue > 1 ) {\n"
+ "ret.dropDups = true;\n"
+ "}\n"
+ "}\n"
+ "*/\n"
+ "return ret;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.createIndex = function( keys , options ){\n"
+ "var o = this._indexSpec( keys, options );\n"
+ "this._db.getCollection( \"system.indexes\" ).insert( o , true );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.ensureIndex = function( keys , options ){\n"
+ "var name = this._indexSpec( keys, options ).name;\n"
+ "this._indexCache = this._indexCache || {};\n"
+ "if ( this._indexCache[ name ] ){\n"
+ "return;\n"
+ "}\n"
+ "\n"
+ "this.createIndex( keys , options );\n"
+ "if ( this.getDB().getLastError() == \"\" ) {\n"
+ "this._indexCache[name] = true;\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.resetIndexCache = function(){\n"
+ "this._indexCache = {};\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.reIndex = function() {\n"
+ "return this._db.runCommand({ reIndex: this.getName() });\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.dropIndexes = function(){\n"
+ "this.resetIndexCache();\n"
+ "\n"
+ "var res = this._db.runCommand( { deleteIndexes: this.getName(), index: \"*\" } );\n"
+ "assert( res , \"no result from dropIndex result\" );\n"
+ "if ( res.ok )\n"
+ "return res;\n"
+ "\n"
+ "if ( res.errmsg.match( /not found/ ) )\n"
+ "return res;\n"
+ "\n"
+ "throw \"error dropping indexes : \" + tojson( res );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.drop = function(){\n"
+ "if ( arguments.length > 0 )\n"
+ "throw \"drop takes no argument\";\n"
+ "this.resetIndexCache();\n"
+ "var ret = this._db.runCommand( { drop: this.getName() } );\n"
+ "if ( ! ret.ok ){\n"
+ "if ( ret.errmsg == \"ns not found\" )\n"
+ "return false;\n"
+ "throw \"drop failed: \" + tojson( ret );\n"
+ "}\n"
+ "return true;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.findAndModify = function(args){\n"
+ "var cmd = { findandmodify: this.getName() };\n"
+ "for (var key in args){\n"
+ "cmd[key] = args[key];\n"
+ "}\n"
+ "\n"
+ "var ret = this._db.runCommand( cmd );\n"
+ "if ( ! ret.ok ){\n"
+ "if (ret.errmsg == \"No matching object found\"){\n"
+ "return null;\n"
+ "}\n"
+ "throw \"findAndModifyFailed failed: \" + tojson( ret.errmsg );\n"
+ "}\n"
+ "return ret.value;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.renameCollection = function( newName , dropTarget ){\n"
+ "return this._db._adminCommand( { renameCollection : this._fullName ,\n"
+ "to : this._db._name + \".\" + newName ,\n"
+ "dropTarget : dropTarget } )\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.validate = function() {\n"
+ "var res = this._db.runCommand( { validate: this.getName() } );\n"
+ "\n"
+ "res.valid = false;\n"
+ "\n"
+ "var raw = res.result || res.raw;\n"
+ "\n"
+ "if ( raw ){\n"
+ "var str = \"-\" + tojson( raw );\n"
+ "res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );\n"
+ "\n"
+ "var p = /lastExtentSize:(\\d+)/;\n"
+ "var r = p.exec( str );\n"
+ "if ( r ){\n"
+ "res.lastExtentSize = Number( r[1] );\n"
+ "}\n"
+ "}\n"
+ "\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getShardVersion = function(){\n"
+ "return this._db._adminCommand( { getShardVersion : this._fullName } );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getIndexes = function(){\n"
+ "return this.getDB().getCollection( \"system.indexes\" ).find( { ns : this.getFullName() } ).toArray();\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;\n"
+ "DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;\n"
+ "\n"
+ "DBCollection.prototype.getIndexKeys = function(){\n"
+ "return this.getIndexes().map(\n"
+ "function(i){\n"
+ "return i.key;\n"
+ "}\n"
+ ");\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.count = function( x ){\n"
+ "return this.find( x ).count();\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* Drop free lists. Normally not used.\n"
+ "* Note this only does the collection itself, not the namespaces of its indexes (see cleanAll).\n"
+ "*/\n"
+ "DBCollection.prototype.clean = function() {\n"
+ "return this._dbCommand( { clean: this.getName() } );\n"
+ "}\n"
+ "\n"
+ "\n"
+ "\n"
+ "/**\n"
+ "* <p>Drop a specified index.</p>\n"
+ "*\n"
+ "* <p>\n"
+ "* Name is the name of the index in the system.indexes name field. (Run db.system.indexes.find() to\n"
+ "* see example data.)\n"
+ "* </p>\n"
+ "*\n"
+ "* <p>Note : alpha: space is not reclaimed </p>\n"
+ "* @param {String} name of index to delete.\n"
+ "* @return A result object. result.ok will be true if successful.\n"
+ "*/\n"
+ "DBCollection.prototype.dropIndex = function(index) {\n"
+ "assert(index , \"need to specify index to dropIndex\" );\n"
+ "\n"
+ "if ( ! isString( index ) && isObject( index ) )\n"
+ "index = this._genIndexName( index );\n"
+ "\n"
+ "var res = this._dbCommand( \"deleteIndexes\" ,{ index: index } );\n"
+ "this.resetIndexCache();\n"
+ "return res;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.copyTo = function( newName ){\n"
+ "return this.getDB().eval(\n"
+ "function( collName , newName ){\n"
+ "var from = db[collName];\n"
+ "var to = db[newName];\n"
+ "to.ensureIndex( { _id : 1 } );\n"
+ "var count = 0;\n"
+ "\n"
+ "var cursor = from.find();\n"
+ "while ( cursor.hasNext() ){\n"
+ "var o = cursor.next();\n"
+ "count++;\n"
+ "to.save( o );\n"
+ "}\n"
+ "\n"
+ "return count;\n"
+ "} , this.getName() , newName\n"
+ ");\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.getCollection = function( subName ){\n"
+ "return this._db.getCollection( this._shortName + \".\" + subName );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.stats = function( scale ){\n"
+ "return this._db.runCommand( { collstats : this._shortName , scale : scale } );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.dataSize = function(){\n"
+ "return this.stats().size;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.storageSize = function(){\n"
+ "return this.stats().storageSize;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.totalIndexSize = function( verbose ){\n"
+ "var stats = this.stats();\n"
+ "if (verbose){\n"
+ "for (var ns in stats.indexSizes){\n"
+ "print( ns + \"\\t\" + stats.indexSizes[ns] );\n"
+ "}\n"
+ "}\n"
+ "return stats.totalIndexSize;\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.totalSize = function(){\n"
+ "var total = this.storageSize();\n"
+ "var mydb = this._db;\n"
+ "var shortName = this._shortName;\n"
+ "this.getIndexes().forEach(\n"
+ "function( spec ){\n"
+ "var coll = mydb.getCollection( shortName + \".$\" + spec.name );\n"
+ "var mysize = coll.storageSize();\n"
+ "//print( coll + \"\\t\" + mysize + \"\\t\" + tojson( coll.validate() ) );\n"
+ "total += coll.dataSize();\n"
+ "}\n"
+ ");\n"
+ "return total;\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.convertToCapped = function( bytes ){\n"
+ "if ( ! bytes )\n"
+ "throw \"have to specify # of bytes\";\n"
+ "return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.exists = function(){\n"
+ "return this._db.system.namespaces.findOne( { name : this._fullName } );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.isCapped = function(){\n"
+ "var e = this.exists();\n"
+ "return ( e && e.options && e.options.capped ) ? true : false;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.distinct = function( keyString , query ){\n"
+ "var res = this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );\n"
+ "if ( ! res.ok )\n"
+ "throw \"distinct failed: \" + tojson( res );\n"
+ "return res.values;\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.group = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.group( params );\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.groupcmd = function( params ){\n"
+ "params.ns = this._shortName;\n"
+ "return this._db.groupcmd( params );\n"
+ "}\n"
+ "\n"
+ "MapReduceResult = function( db , o ){\n"
+ "Object.extend( this , o );\n"
+ "this._o = o;\n"
+ "this._keys = Object.keySet( o );\n"
+ "this._db = db;\n"
+ "this._coll = this._db.getCollection( this.result );\n"
+ "}\n"
+ "\n"
+ "MapReduceResult.prototype._simpleKeys = function(){\n"
+ "return this._o;\n"
+ "}\n"
+ "\n"
+ "MapReduceResult.prototype.find = function(){\n"
+ "if ( this.results )\n"
+ "return this.results;\n"
+ "return DBCollection.prototype.find.apply( this._coll , arguments );\n"
+ "}\n"
+ "\n"
+ "MapReduceResult.prototype.drop = function(){\n"
+ "return this._coll.drop();\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* just for debugging really\n"
+ "*/\n"
+ "MapReduceResult.prototype.convertToSingleObject = function(){\n"
+ "var z = {};\n"
+ "this._coll.find().forEach( function(a){ z[a._id] = a.value; } );\n"
+ "return z;\n"
+ "}\n"
+ "\n"
+ "/**\n"
+ "* @param optional object of optional fields;\n"
+ "*/\n"
+ "DBCollection.prototype.mapReduce = function( map , reduce , optionsOrOutString ){\n"
+ "var c = { mapreduce : this._shortName , map : map , reduce : reduce };\n"
+ "assert( optionsOrOutString , \"need to an optionsOrOutString\" )\n"
+ "\n"
+ "if ( typeof( optionsOrOutString ) == \"string\" )\n"
+ "c[\"out\"] = optionsOrOutString;\n"
+ "else\n"
+ "Object.extend( c , optionsOrOutString );\n"
+ "\n"
+ "var raw = this._db.runCommand( c );\n"
+ "if ( ! raw.ok )\n"
+ "throw \"map reduce failed: \" + tojson( raw );\n"
+ "return new MapReduceResult( this._db , raw );\n"
+ "\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();\n"
+ "}\n"
+ "\n"
+ "DBCollection.prototype.toString = function(){\n"
+ "return this.getFullName();\n"
+ "}\n"
+ "\n"
+ "\n"
+ "DBCollection.prototype.tojson = DBCollection.prototype.toString;\n"
+ "\n"
+ "DBCollection.prototype.shellPrint = DBCollection.prototype.toString;\n"
+ "\n"
+ "DBCollection.autocomplete = function(obj){\n"
+ "var colls = DB.autocomplete(obj.getDB());\n"
+ "var ret = [];\n"
+ "for (var i=0; i<colls.length; i++){\n"
+ "var c = colls[i];\n"
+ "if (c.length <= obj.getName().length) continue;\n"
+ "if (c.slice(0,obj.getName().length+1) != obj.getName()+'.') continue;\n"
+ "\n"
+ "ret.push(c.slice(obj.getName().length+1));\n"
+ "}\n"
+ "return ret;\n"
+ "}\n"
+ ;
+ extern const JSFile collection;
+ const JSFile collection = { "shell/collection.js" , _jscode_raw_collection };
+ } // namespace JSFiles
} // namespace mongo
diff --git a/shell/shell_utils.cpp b/shell/shell_utils.cpp
index 981e2508577..d0d4429db5f 100644
--- a/shell/shell_utils.cpp
+++ b/shell/shell_utils.cpp
@@ -50,10 +50,10 @@
#include "../util/file.h"
namespace mongo {
-
+
DBClientWithCommands *latestConn = 0;
extern bool dbexitCalled;
-
+
#ifdef _WIN32
inline int close(int fd) { return _close(fd); }
inline int read(int fd, void* buf, size_t size) { return _read(fd, buf, size); }
@@ -64,29 +64,29 @@ namespace mongo {
namespace shellUtils {
Scope* theScope = 0;
-
+
std::string _dbConnect;
std::string _dbAuth;
-
+
const char *argv0 = 0;
void RecordMyLocation( const char *_argv0 ) { argv0 = _argv0; }
-
+
// helpers
-
+
BSONObj makeUndefined() {
BSONObjBuilder b;
b.appendUndefined( "" );
return b.obj();
}
const BSONObj undefined_ = makeUndefined();
-
+
BSONObj encapsulate( const BSONObj &obj ) {
return BSON( "" << obj );
}
-
+
// real methods
- mongo::BSONObj JSSleep(const mongo::BSONObj &args){
+ mongo::BSONObj JSSleep(const mongo::BSONObj &args) {
assert( args.nFields() == 1 );
assert( args.firstElement().isNumber() );
int ms = int( args.firstElement().number() );
@@ -107,52 +107,52 @@ namespace mongo {
return undefined_;
}
- BSONObj JSGetMemInfo( const BSONObj& args ){
+ BSONObj JSGetMemInfo( const BSONObj& args ) {
ProcessInfo pi;
uassert( 10258 , "processinfo not supported" , pi.supported() );
-
+
BSONObjBuilder e;
e.append( "virtual" , pi.getVirtualMemorySize() );
e.append( "resident" , pi.getResidentSize() );
-
+
BSONObjBuilder b;
b.append( "ret" , e.obj() );
-
+
return b.obj();
}
#ifndef MONGO_SAFE_SHELL
- BSONObj listFiles(const BSONObj& _args){
+ BSONObj listFiles(const BSONObj& _args) {
static BSONObj cd = BSON( "0" << "." );
BSONObj args = _args.isEmpty() ? cd : _args;
uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
-
+
BSONObjBuilder lst;
-
+
string rootname = args.firstElement().valuestrsafe();
path root( rootname );
stringstream ss;
ss << "listFiles: no such directory: " << rootname;
string msg = ss.str();
uassert( 12581, msg.c_str(), boost::filesystem::exists( root ) );
-
+
directory_iterator end;
directory_iterator i( root);
-
+
int num =0;
- while ( i != end ){
+ while ( i != end ) {
path p = *i;
BSONObjBuilder b;
b << "name" << p.string();
b.appendBool( "isDirectory", is_directory( p ) );
- if ( ! is_directory( p ) ){
- try {
+ if ( ! is_directory( p ) ) {
+ try {
b.append( "size" , (double)file_size( p ) );
}
- catch ( ... ){
+ catch ( ... ) {
i++;
continue;
}
@@ -165,16 +165,16 @@ namespace mongo {
num++;
i++;
}
-
+
BSONObjBuilder ret;
ret.appendArray( "", lst.done() );
return ret.obj();
}
- BSONObj ls(const BSONObj& args) {
+ BSONObj ls(const BSONObj& args) {
BSONObj o = listFiles(args);
if( !o.isEmpty() ) {
- for( BSONObj::iterator i = o.firstElement().Obj().begin(); i.more(); ) {
+ for( BSONObj::iterator i = o.firstElement().Obj().begin(); i.more(); ) {
BSONObj f = i.next().Obj();
cout << f["name"].String();
if( f["isDirectory"].trueValue() ) cout << '/';
@@ -185,38 +185,38 @@ namespace mongo {
return BSONObj();
}
- BSONObj cd(const BSONObj& args) {
+ BSONObj cd(const BSONObj& args) {
#if defined(_WIN32)
std::wstring dir = toWideString( args.firstElement().String().c_str() );
if( SetCurrentDirectory(dir.c_str()) )
return BSONObj();
#else
string dir = args.firstElement().String();
-/* if( chdir(dir.c_str) ) == 0 )
- return BSONObj();
- */
+ /* if( chdir(dir.c_str) ) == 0 )
+ return BSONObj();
+ */
if( 1 ) return BSON(""<<"implementation not done for posix");
#endif
return BSON( "" << "change directory failed" );
}
- BSONObj pwd(const BSONObj&) {
+ BSONObj pwd(const BSONObj&) {
boost::filesystem::path p = boost::filesystem::current_path();
return BSON( "" << p.string() );
}
- BSONObj hostname(const BSONObj&) {
+ BSONObj hostname(const BSONObj&) {
return BSON( "" << getHostName() );
}
- static BSONElement oneArg(const BSONObj& args) {
+ static BSONElement oneArg(const BSONObj& args) {
uassert( 12597 , "need to specify 1 argument" , args.nFields() == 1 );
return args.firstElement();
}
const int CANT_OPEN_FILE = 13300;
- BSONObj cat(const BSONObj& args){
+ BSONObj cat(const BSONObj& args) {
BSONElement e = oneArg(args);
stringstream ss;
ifstream f(e.valuestrsafe());
@@ -235,7 +235,7 @@ namespace mongo {
return BSON( "" << ss.str() );
}
- BSONObj md5sumFile(const BSONObj& args){
+ BSONObj md5sumFile(const BSONObj& args) {
BSONElement e = oneArg(args);
stringstream ss;
FILE* f = fopen(e.valuestrsafe(), "rb");
@@ -256,17 +256,17 @@ namespace mongo {
return BSON( "" << digestToString( d ) );
}
- BSONObj mkdir(const BSONObj& args){
+ BSONObj mkdir(const BSONObj& args) {
boost::filesystem::create_directories(args.firstElement().String());
return BSON( "" << true );
}
- BSONObj removeFile(const BSONObj& args){
+ BSONObj removeFile(const BSONObj& args) {
BSONElement e = oneArg(args);
bool found = false;
-
+
path root( args.firstElement().valuestrsafe() );
- if ( boost::filesystem::exists( root ) ){
+ if ( boost::filesystem::exists( root ) ) {
found = true;
boost::filesystem::remove_all( root );
}
@@ -280,31 +280,31 @@ namespace mongo {
* @param args - [ name, byte index ]
* In this initial implementation, all bits in the specified byte are flipped.
*/
- BSONObj fuzzFile(const BSONObj& args){
+ BSONObj fuzzFile(const BSONObj& args) {
uassert( 13619, "fuzzFile takes 2 arguments", args.nFields() == 2 );
shared_ptr< File > f( new File() );
f->open( args.getStringField( "0" ) );
uassert( 13620, "couldn't open file to fuzz", !f->bad() && f->is_open() );
-
+
char c;
f->read( args.getIntField( "1" ), &c, 1 );
c = ~c;
f->write( args.getIntField( "1" ), &c, 1 );
return undefined_;
- // f close is implicit
- }
-
+ // f close is implicit
+ }
+
map< int, pair< pid_t, int > > dbs;
map< pid_t, int > shells;
#ifdef _WIN32
map< pid_t, HANDLE > handles;
#endif
-
+
mongo::mutex mongoProgramOutputMutex("mongoProgramOutputMutex");
stringstream mongoProgramOutput_;
- void goingAwaySoon() {
+ void goingAwaySoon() {
mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
mongo::dbexitCalled = true;
}
@@ -320,7 +320,7 @@ namespace mongo {
cout << buf.str() << endl;
mongoProgramOutput_ << buf.str() << endl;
}
-
+
// only returns last 100000 characters
BSONObj RawMongoProgramOutput( const BSONObj &args ) {
mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
@@ -336,7 +336,7 @@ namespace mongo {
mongoProgramOutput_.str( "" );
return undefined_;
}
-
+
class ProgramRunner {
vector<string> argv_;
int port_;
@@ -346,13 +346,13 @@ namespace mongo {
pid_t pid() const { return pid_; }
int port() const { return port_; }
- boost::filesystem::path find(string prog) {
+ boost::filesystem::path find(string prog) {
boost::filesystem::path p = prog;
#ifdef _WIN32
p = change_extension(p, ".exe");
#endif
- if( boost::filesystem::exists(p) ){
+ if( boost::filesystem::exists(p) ) {
#ifndef _WIN32
p = boost::filesystem::initial_path() / p;
#endif
@@ -371,23 +371,23 @@ namespace mongo {
if( boost::filesystem::exists(t) ) return t;
}
}
- } catch(...) { }
+ }
+ catch(...) { }
{
boost::filesystem::path t = boost::filesystem::initial_path() / p;
if( boost::filesystem::exists(t) ) return t;
}
return p; // not found; might find via system path
- }
+ }
- ProgramRunner( const BSONObj &args , bool isMongoProgram=true)
- {
+ ProgramRunner( const BSONObj &args , bool isMongoProgram=true) {
assert( !args.isEmpty() );
string program( args.firstElement().valuestrsafe() );
assert( !program.empty() );
boost::filesystem::path programPath = find(program);
- if (isMongoProgram){
+ if (isMongoProgram) {
#if 0
if (program == "mongos") {
argv_.push_back("valgrind");
@@ -401,9 +401,9 @@ namespace mongo {
}
argv_.push_back( programPath.native_file_string() );
-
+
port_ = -1;
-
+
BSONObjIterator j( args );
j.next(); // skip program name (handled above)
while(j.more()) {
@@ -413,7 +413,8 @@ namespace mongo {
stringstream ss;
ss << e.number();
str = ss.str();
- } else {
+ }
+ else {
assert( e.type() == mongo::String );
str = e.valuestr();
}
@@ -423,7 +424,7 @@ namespace mongo {
port_ = strtol( str.c_str(), 0, 10 );
argv_.push_back(str);
}
-
+
if ( program != "mongod" && program != "mongos" && program != "mongobridge" )
port_ = 0;
else {
@@ -431,19 +432,19 @@ namespace mongo {
cout << "error: a port number is expected when running mongod (etc.) from the shell" << endl;
assert( port_ > 0 );
}
- if ( port_ > 0 && dbs.count( port_ ) != 0 ){
+ if ( port_ > 0 && dbs.count( port_ ) != 0 ) {
cerr << "count for port: " << port_ << " is not 0 is: " << dbs.count( port_ ) << endl;
- assert( dbs.count( port_ ) == 0 );
+ assert( dbs.count( port_ ) == 0 );
}
}
-
+
void start() {
int pipeEnds[ 2 ];
assert( pipe( pipeEnds ) != -1 );
-
+
fflush( 0 );
launch_process(pipeEnds[1]); //sets pid_
-
+
{
stringstream ss;
ss << "shell: started program";
@@ -459,52 +460,54 @@ namespace mongo {
shells.insert( make_pair( pid_, pipeEnds[ 1 ] ) );
pipe_ = pipeEnds[ 0 ];
}
-
+
// Continue reading output
void operator()() {
try {
- // This assumes there aren't any 0's in the mongo program output.
- // Hope that's ok.
- const unsigned bufSize = 64000;
- char buf[ bufSize ];
- char temp[ bufSize ];
- char *start = buf;
- while( 1 ) {
- int lenToRead = ( bufSize - 1 ) - ( start - buf );
- assert( lenToRead > 0 );
- int ret = read( pipe_, (void *)start, lenToRead );
- if( mongo::dbexitCalled )
- break;
- assert( ret != -1 );
- start[ ret ] = '\0';
- if ( strlen( start ) != unsigned( ret ) )
- writeMongoProgramOutputLine( port_, pid_, "WARNING: mongod wrote null bytes to output" );
- char *last = buf;
- for( char *i = strchr( buf, '\n' ); i; last = i + 1, i = strchr( last, '\n' ) ) {
- *i = '\0';
- writeMongoProgramOutputLine( port_, pid_, last );
- }
- if ( ret == 0 ) {
- if ( *last )
+ // This assumes there aren't any 0's in the mongo program output.
+ // Hope that's ok.
+ const unsigned bufSize = 64000;
+ char buf[ bufSize ];
+ char temp[ bufSize ];
+ char *start = buf;
+ while( 1 ) {
+ int lenToRead = ( bufSize - 1 ) - ( start - buf );
+ assert( lenToRead > 0 );
+ int ret = read( pipe_, (void *)start, lenToRead );
+ if( mongo::dbexitCalled )
+ break;
+ assert( ret != -1 );
+ start[ ret ] = '\0';
+ if ( strlen( start ) != unsigned( ret ) )
+ writeMongoProgramOutputLine( port_, pid_, "WARNING: mongod wrote null bytes to output" );
+ char *last = buf;
+ for( char *i = strchr( buf, '\n' ); i; last = i + 1, i = strchr( last, '\n' ) ) {
+ *i = '\0';
writeMongoProgramOutputLine( port_, pid_, last );
- close( pipe_ );
- break;
- }
- if ( last != buf ) {
- strcpy( temp, last );
- strcpy( buf, temp );
- } else {
- assert( strlen( buf ) < bufSize );
+ }
+ if ( ret == 0 ) {
+ if ( *last )
+ writeMongoProgramOutputLine( port_, pid_, last );
+ close( pipe_ );
+ break;
+ }
+ if ( last != buf ) {
+ strcpy( temp, last );
+ strcpy( buf, temp );
+ }
+ else {
+ assert( strlen( buf ) < bufSize );
+ }
+ start = buf + strlen( buf );
}
- start = buf + strlen( buf );
- }
- } catch(...) {
+ }
+ catch(...) {
}
}
- void launch_process(int child_stdout){
+ void launch_process(int child_stdout) {
#ifdef _WIN32
stringstream ss;
- for( unsigned i=0; i < argv_.size(); i++ ){
+ for( unsigned i=0; i < argv_.size(); i++ ) {
if (i) ss << ' ';
if (argv_[i].find(' ') == string::npos)
ss << argv_[i];
@@ -513,7 +516,7 @@ namespace mongo {
}
string args = ss.str();
-
+
boost::scoped_array<TCHAR> args_tchar (new TCHAR[args.size() + 1]);
size_t i;
for(i=0; i < args.size(); i++)
@@ -545,24 +548,23 @@ namespace mongo {
pid_ = pi.dwProcessId;
handles.insert( make_pair( pid_, pi.hProcess ) );
-
+
#else
pid_ = fork();
assert( pid_ != -1 );
-
+
if ( pid_ == 0 ) {
// DON'T ASSERT IN THIS BLOCK - very bad things will happen
const char** argv = new const char* [argv_.size()+1]; // don't need to free - in child
- for (unsigned i=0; i < argv_.size(); i++){
+ for (unsigned i=0; i < argv_.size(); i++) {
argv[i] = argv_[i].c_str();
}
argv[argv_.size()] = 0;
-
+
if ( dup2( child_stdout, STDOUT_FILENO ) == -1 ||
- dup2( child_stdout, STDERR_FILENO ) == -1 )
- {
+ dup2( child_stdout, STDERR_FILENO ) == -1 ) {
cout << "Unable to dup2 child output: " << errnoWithDescription() << endl;
::_Exit(-1); //do not pass go, do not call atexit handlers
}
@@ -574,7 +576,7 @@ namespace mongo {
env[1] = NULL;
// Heap-check for mongos only. 'argv[0]' must be in the path format.
- if ( argv_[0].find("mongos") != string::npos){
+ if ( argv_[0].find("mongos") != string::npos) {
execvpe( argv[ 0 ], const_cast<char**>(argv) , const_cast<char**>(env) );
}
#endif // HEAP_CHECKING
@@ -588,9 +590,9 @@ namespace mongo {
#endif
}
};
-
+
//returns true if process exited
- bool wait_for_pid(pid_t pid, bool block=true, int* exit_code=NULL){
+ bool wait_for_pid(pid_t pid, bool block=true, int* exit_code=NULL) {
#ifdef _WIN32
assert(handles.count(pid));
HANDLE h = handles[pid];
@@ -599,13 +601,14 @@ namespace mongo {
WaitForSingleObject(h, INFINITE);
DWORD tmp;
- if(GetExitCodeProcess(h, &tmp)){
+ if(GetExitCodeProcess(h, &tmp)) {
CloseHandle(h);
handles.erase(pid);
if (exit_code)
*exit_code = tmp;
return true;
- }else{
+ }
+ else {
return false;
}
#else
@@ -614,11 +617,11 @@ namespace mongo {
if (exit_code)
*exit_code = WEXITSTATUS(tmp);
return ret;
-
+
#endif
}
- BSONObj WaitProgram( const BSONObj& a ){
+ BSONObj WaitProgram( const BSONObj& a ) {
int pid = a.firstElement().numberInt();
BSONObj x = BSON( "" << wait_for_pid( pid ) );
shells.erase( pid );
@@ -641,7 +644,8 @@ namespace mongo {
wait_for_pid( r.pid(), true, &exit_code );
if ( r.port() > 0 ) {
dbs.erase( r.port() );
- } else {
+ }
+ else {
shells.erase( r.pid() );
}
return BSON( string( "" ) << exit_code );
@@ -663,10 +667,10 @@ namespace mongo {
assert( !path.empty() );
if ( boost::filesystem::exists( path ) )
boost::filesystem::remove_all( path );
- boost::filesystem::create_directory( path );
+ boost::filesystem::create_directory( path );
return undefined_;
}
-
+
void copyDir( const path &from, const path &to ) {
directory_iterator end;
directory_iterator i( from );
@@ -677,14 +681,15 @@ namespace mongo {
path newDir = to / p.leaf();
boost::filesystem::create_directory( newDir );
copyDir( p, newDir );
- } else {
+ }
+ else {
boost::filesystem::copy_file( p, to / p.leaf() );
}
}
++i;
- }
+ }
}
-
+
// NOTE target dbpath will be cleared first
BSONObj CopyDbpath( const BSONObj &a ) {
assert( a.nFields() == 2 );
@@ -700,24 +705,26 @@ namespace mongo {
return undefined_;
}
- inline void kill_wrapper(pid_t pid, int sig, int port){
+ inline void kill_wrapper(pid_t pid, int sig, int port) {
#ifdef _WIN32
- if (sig == SIGKILL || port == 0){
+ if (sig == SIGKILL || port == 0) {
assert( handles.count(pid) );
TerminateProcess(handles[pid], 1); // returns failure for "zombie" processes.
- }else{
+ }
+ else {
DBClientConnection conn;
conn.connect("127.0.0.1:" + BSONObjBuilder::numStr(port));
try {
conn.simpleCommand("admin", NULL, "shutdown");
- } catch (...) {
+ }
+ catch (...) {
//Do nothing. This command never returns data to the client and the driver doesn't like that.
}
}
#else
int x = kill( pid, sig );
- if ( x ){
- if ( errno == ESRCH ){
+ if ( x ) {
+ if ( errno == ESRCH ) {
}
else {
cout << "killFailed: " << errnoWithDescription() << endl;
@@ -726,8 +733,8 @@ namespace mongo {
}
#endif
- }
-
+ }
+
int killDb( int port, pid_t _pid, int signal ) {
pid_t pid;
int exitCode = 0;
@@ -737,12 +744,13 @@ namespace mongo {
return 0;
}
pid = dbs[ port ].first;
- } else {
+ }
+ else {
pid = _pid;
}
-
+
kill_wrapper( pid, signal, port );
-
+
int i = 0;
for( ; i < 130; ++i ) {
if ( i == 30 ) {
@@ -751,7 +759,7 @@ namespace mongo {
now[ 20 ] = 0;
cout << now << " process on port " << port << ", with pid " << pid << " not terminated, sending sigkill" << endl;
kill_wrapper( pid, SIGKILL, port );
- }
+ }
if(wait_for_pid(pid, false, &exitCode))
break;
sleepmillis( 1000 );
@@ -767,7 +775,8 @@ namespace mongo {
if ( port > 0 ) {
close( dbs[ port ].second );
dbs.erase( port );
- } else {
+ }
+ else {
close( shells[ pid ] );
shells.erase( pid );
}
@@ -776,7 +785,7 @@ namespace mongo {
if ( i > 4 || signal == SIGKILL ) {
sleepmillis( 4000 ); // allow operating system to reclaim resources
}
-
+
return exitCode;
}
@@ -791,7 +800,7 @@ namespace mongo {
}
return ret;
}
-
+
/** stopMongoProgram(port[, signal]) */
BSONObj StopMongoProgram( const BSONObj &a ) {
assert( a.nFields() == 1 || a.nFields() == 2 );
@@ -800,23 +809,23 @@ namespace mongo {
int code = killDb( port, 0, getSignal( a ) );
cout << "shell: stopped mongo program on port " << port << endl;
return BSON( "" << code );
- }
-
+ }
+
BSONObj StopMongoProgramByPid( const BSONObj &a ) {
assert( a.nFields() == 1 || a.nFields() == 2 );
assert( a.firstElement().isNumber() );
- int pid = int( a.firstElement().number() );
+ int pid = int( a.firstElement().number() );
int code = killDb( 0, pid, getSignal( a ) );
cout << "shell: stopped mongo program on pid " << pid << endl;
return BSON( "" << code );
}
-
+
void KillMongoProgramInstances() {
vector< int > ports;
for( map< int, pair< pid_t, int > >::iterator i = dbs.begin(); i != dbs.end(); ++i )
ports.push_back( i->first );
for( vector< int >::iterator i = ports.begin(); i != ports.end(); ++i )
- killDb( *i, 0, SIGTERM );
+ killDb( *i, 0, SIGTERM );
vector< pid_t > pids;
for( map< pid_t, int >::iterator i = shells.begin(); i != shells.end(); ++i )
pids.push_back( i->first );
@@ -826,7 +835,7 @@ namespace mongo {
#else // ndef MONGO_SAFE_SHELL
void KillMongoProgramInstances() {}
#endif
-
+
MongoProgramScope::~MongoProgramScope() {
DESTRUCTOR_GUARD(
KillMongoProgramInstances();
@@ -835,14 +844,14 @@ namespace mongo {
}
unsigned _randomSeed;
-
+
BSONObj JSSrand( const BSONObj &a ) {
uassert( 12518, "srand requires a single numeric argument",
- a.nFields() == 1 && a.firstElement().isNumber() );
+ a.nFields() == 1 && a.firstElement().isNumber() );
_randomSeed = (unsigned)a.firstElement().numberLong(); // grab least significant digits
return undefined_;
}
-
+
BSONObj JSRand( const BSONObj &a ) {
uassert( 12519, "rand accepts no arguments", a.nFields() == 0 );
unsigned r;
@@ -854,7 +863,7 @@ namespace mongo {
return BSON( "" << double( r ) / ( double( RAND_MAX ) + 1 ) );
}
- BSONObj isWindows(const BSONObj& a){
+ BSONObj isWindows(const BSONObj& a) {
uassert( 13006, "isWindows accepts no arguments", a.nFields() == 0 );
#ifdef _WIN32
return BSON( "" << true );
@@ -863,7 +872,7 @@ namespace mongo {
#endif
}
- BSONObj getHostName(const BSONObj& a){
+ BSONObj getHostName(const BSONObj& a) {
uassert( 13411, "getHostName accepts no arguments", a.nFields() == 0 );
char buf[260]; // HOST_NAME_MAX is usually 255
assert(gethostname(buf, 260) == 0);
@@ -871,8 +880,8 @@ namespace mongo {
return BSON("" << buf);
}
-
- void installShellUtils( Scope& scope ){
+
+ void installShellUtils( Scope& scope ) {
theScope = &scope;
scope.injectNative( "sleep" , JSSleep );
scope.injectNative( "quit", Quit );
@@ -888,8 +897,8 @@ namespace mongo {
scope.injectNative( "run", RunProgram );
scope.injectNative( "runMongoProgram", RunMongoProgram );
scope.injectNative( "stopMongod", StopMongoProgram );
- scope.injectNative( "stopMongoProgram", StopMongoProgram );
- scope.injectNative( "stopMongoProgramByPid", StopMongoProgramByPid );
+ scope.injectNative( "stopMongoProgram", StopMongoProgram );
+ scope.injectNative( "stopMongoProgramByPid", StopMongoProgramByPid );
scope.injectNative( "rawMongoProgramOutput", RawMongoProgramOutput );
scope.injectNative( "clearRawMongoProgramOutput", ClearRawMongoProgramOutput );
scope.injectNative( "waitProgram" , WaitProgram );
@@ -914,7 +923,7 @@ namespace mongo {
scope.externalSetup();
mongo::shellUtils::installShellUtils( scope );
scope.execSetup(JSFiles::servers);
-
+
if ( !_dbConnect.empty() ) {
uassert( 12513, "connect failed", scope.exec( _dbConnect , "(connect)" , false , true , false ) );
if ( !_dbAuth.empty() ) {
@@ -923,7 +932,7 @@ namespace mongo {
}
}
}
-
+
// connstr, myuris
map< string, set<string> > _allMyUris;
mongo::mutex _allMyUrisMutex("_allMyUrisMutex");
diff --git a/shell/utils.h b/shell/utils.h
index 0a7c5c3d2f7..03b3f97d7ae 100644
--- a/shell/utils.h
+++ b/shell/utils.h
@@ -21,7 +21,7 @@
#include "../scripting/engine.h"
namespace mongo {
-
+
namespace shellUtils {
extern std::string _dbConnect;
@@ -31,7 +31,7 @@ namespace mongo {
void RecordMyLocation( const char *_argv0 );
void installShellUtils( Scope& scope );
-
+
// Scoped management of mongo program instances. Simple implementation:
// destructor kills all mongod instances created by the shell.
struct MongoProgramScope {
@@ -39,7 +39,7 @@ namespace mongo {
~MongoProgramScope();
};
void KillMongoProgramInstances();
-
+
void initScope( Scope &scope );
void onConnect( DBClientWithCommands &c );
}
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index 26c0322bbcb..86dea0a83b8 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -45,7 +45,7 @@ public:
mp_.shutdown();
break;
}
-
+
int oldId = m.header()->id;
if ( m.operation() == dbQuery || m.operation() == dbMsg || m.operation() == dbGetMore ) {
bool exhaust = false;
@@ -63,15 +63,18 @@ public:
if ( qr->cursorId ) {
response.reset();
dest.port().recv( response );
- mp_.reply( m, response ); // m argument is ignored anyway
- } else {
+ mp_.reply( m, response ); // m argument is ignored anyway
+ }
+ else {
exhaust = false;
}
}
- } else {
+ }
+ else {
dest.port().say( m, oldId );
}
- } catch ( ... ) {
+ }
+ catch ( ... ) {
log() << "caught exception in Forwarder, continuing" << endl;
}
}
@@ -94,7 +97,7 @@ public:
auto_ptr< MyListener > listener;
-#if !defined(_WIN32)
+#if !defined(_WIN32)
void cleanup( int sig ) {
ListeningSockets::get()->closeAll();
for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
@@ -136,7 +139,7 @@ void check( bool b ) {
int main( int argc, char **argv ) {
static StaticObserver staticObserver;
-
+
setupSignals();
check( argc == 5 );
@@ -145,9 +148,11 @@ int main( int argc, char **argv ) {
check( i % 2 != 0 );
if ( strcmp( argv[ i ], "--port" ) == 0 ) {
port = strtol( argv[ ++i ], 0, 10 );
- } else if ( strcmp( argv[ i ], "--dest" ) == 0 ) {
+ }
+ else if ( strcmp( argv[ i ], "--dest" ) == 0 ) {
destUri = argv[ ++i ];
- } else {
+ }
+ else {
check( false );
}
}
diff --git a/tools/bsondump.cpp b/tools/bsondump.cpp
index 131335e3efd..506a4e7d3b9 100644
--- a/tools/bsondump.cpp
+++ b/tools/bsondump.cpp
@@ -35,23 +35,23 @@ class BSONDump : public BSONTool {
enum OutputType { JSON , DEBUG } _type;
public:
-
- BSONDump() : BSONTool( "bsondump", NONE ){
+
+ BSONDump() : BSONTool( "bsondump", NONE ) {
add_options()
- ("type" , po::value<string>()->default_value("json") , "type of output: json,debug" )
- ;
+ ("type" , po::value<string>()->default_value("json") , "type of output: json,debug" )
+ ;
add_hidden_options()
- ("file" , po::value<string>() , ".bson file" )
- ;
+ ("file" , po::value<string>() , ".bson file" )
+ ;
addPositionArg( "file" , 1 );
_noconnection = true;
}
-
+
virtual void printExtraHelp(ostream& out) {
out << "usage: " << _name << " [options] <bson filename>" << endl;
}
-
- virtual int doRun(){
+
+ virtual int doRun() {
{
string t = getParam( "type" );
if ( t == "json" )
@@ -63,64 +63,64 @@ public:
return 1;
}
}
-
+
path root = getParam( "file" );
if ( root == "" ) {
printExtraHelp(cout);
return 1;
}
-
+
processFile( root );
return 0;
}
-
- bool debug( const BSONObj& o , int depth=0){
+
+ bool debug( const BSONObj& o , int depth=0) {
string prefix = "";
- for ( int i=0; i<depth; i++ ){
+ for ( int i=0; i<depth; i++ ) {
prefix += "\t\t\t";
}
-
+
int read = 4;
try {
cout << prefix << "--- new object ---\n";
cout << prefix << "\t size : " << o.objsize() << "\n";
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
cout << prefix << "\t\t " << e.fieldName() << "\n" << prefix << "\t\t\t type:" << setw(3) << e.type() << " size: " << e.size() << endl;
- if ( ( read + e.size() ) > o.objsize() ){
+ if ( ( read + e.size() ) > o.objsize() ) {
cout << prefix << " SIZE DOES NOT WORK" << endl;
return false;
}
read += e.size();
try {
e.validate();
- if ( e.isABSONObj() ){
+ if ( e.isABSONObj() ) {
if ( ! debug( e.Obj() , depth + 1 ) )
return false;
}
- else if ( e.type() == String && ! isValidUTF8( e.valuestr() ) ){
+ else if ( e.type() == String && ! isValidUTF8( e.valuestr() ) ) {
cout << prefix << "\t\t\t" << "bad utf8 String!" << endl;
}
- else if ( logLevel > 0 ){
+ else if ( logLevel > 0 ) {
cout << prefix << "\t\t\t" << e << endl;
}
-
+
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << prefix << "\t\t\t bad value: " << e.what() << endl;
}
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << prefix << "\t" << e.what() << endl;
}
return true;
}
- virtual void gotObject( const BSONObj& o ){
- switch ( _type ){
+ virtual void gotObject( const BSONObj& o ) {
+ switch ( _type ) {
case JSON:
cout << o.jsonString( TenGen ) << endl;
break;
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 2c820e44379..4038f099be7 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -28,16 +28,16 @@ namespace po = boost::program_options;
class Dump : public Tool {
public:
- Dump() : Tool( "dump" , ALL , "*" , "*" , false ){
+ Dump() : Tool( "dump" , ALL , "*" , "*" , false ) {
add_options()
- ("out,o", po::value<string>()->default_value("dump"), "output directory or \"-\" for stdout")
- ("query,q", po::value<string>() , "json query" )
- ("oplog", "Use oplog for point-in-time snapshotting" )
- ;
+ ("out,o", po::value<string>()->default_value("dump"), "output directory or \"-\" for stdout")
+ ("query,q", po::value<string>() , "json query" )
+ ("oplog", "Use oplog for point-in-time snapshotting" )
+ ;
}
// This is a functor that writes a BSONObj to a file
- struct Writer{
+ struct Writer {
Writer(ostream& out, ProgressMeter* m) :_out(out), _m(m) {}
void operator () (const BSONObj& obj) {
@@ -68,11 +68,12 @@ public:
Writer writer(out, m);
// use low-latency "exhaust" mode if going over the network
- if (typeid(connBase) == typeid(DBClientConnection&)){
+ if (typeid(connBase) == typeid(DBClientConnection&)) {
DBClientConnection& conn = static_cast<DBClientConnection&>(connBase);
boost::function<void(const BSONObj&)> castedWriter(writer); // needed for overload resolution
conn.query( castedWriter, coll.c_str() , q , NULL, queryOptions | QueryOption_Exhaust);
- } else {
+ }
+ else {
//This branch should only be taken with DBDirectClient which doesn't support exhaust mode
scoped_ptr<DBClientCursor> cursor(connBase.query( coll.c_str() , q , 0 , 0 , 0 , queryOptions ));
while ( cursor->more() ) {
@@ -83,7 +84,7 @@ public:
void writeCollectionFile( const string coll , path outputFile ) {
cout << "\t" << coll << " to " << outputFile.string() << endl;
-
+
ofstream out;
out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
assertStreamGood( 10262 , "couldn't open file" , out );
@@ -107,7 +108,7 @@ public:
create_directories( outdir );
string sns = db + ".system.namespaces";
-
+
auto_ptr<DBClientCursor> cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->next();
@@ -125,9 +126,9 @@ public:
}
}
-
- int run(){
-
+
+ int run() {
+
{
string q = getParam("query");
if ( q.size() )
@@ -137,20 +138,21 @@ public:
string opLogName = "";
unsigned long long opLogStart = 0;
if (hasParam("oplog")) {
- if (hasParam("query") || hasParam("db") || hasParam("collection")){
+ if (hasParam("query") || hasParam("db") || hasParam("collection")) {
cout << "oplog mode is only supported on full dumps" << endl;
return -1;
}
-
+
BSONObj isMaster;
conn("true").simpleCommand("admin", &isMaster, "isMaster");
if (isMaster.hasField("hosts")) { // if connected to replica set member
opLogName = "local.oplog.rs";
- } else {
+ }
+ else {
opLogName = "local.oplog.$main";
- if ( ! isMaster["ismaster"].trueValue() ){
+ if ( ! isMaster["ismaster"].trueValue() ) {
cout << "oplog mode is only supported on master or replica set member" << endl;
return -1;
}
@@ -161,12 +163,12 @@ public:
cout << "No operations in oplog. Please ensure you are connecting to a master." << endl;
return -1;
}
-
+
assert(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
-
+
// check if we're outputting to stdout
string out = getParam("out");
@@ -184,7 +186,7 @@ public:
path root( out );
string db = _db;
- if ( db == "*" ){
+ if ( db == "*" ) {
cout << "all dbs" << endl;
auth( "admin" );
@@ -209,7 +211,7 @@ public:
go( db , root / db );
}
- if (!opLogName.empty()){
+ if (!opLogName.empty()) {
BSONObjBuilder b;
b.appendTimestamp("$gt", opLogStart);
diff --git a/tools/export.cpp b/tools/export.cpp
index f6fd3bed4fd..b1b0f5f1ac9 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -33,34 +33,34 @@ namespace po = boost::program_options;
class Export : public Tool {
public:
- Export() : Tool( "export" ){
+ Export() : Tool( "export" ) {
addFieldOptions();
add_options()
- ("query,q" , po::value<string>() , "query filter, as a JSON string" )
- ("csv","export to csv instead of json")
- ("out,o", po::value<string>(), "output file; if not specified, stdout is used")
- ("jsonArray", "output to a json array rather than one object per line")
- ;
+ ("query,q" , po::value<string>() , "query filter, as a JSON string" )
+ ("csv","export to csv instead of json")
+ ("out,o", po::value<string>(), "output file; if not specified, stdout is used")
+ ("jsonArray", "output to a json array rather than one object per line")
+ ;
_usesstdout = false;
}
-
- int run(){
+
+ int run() {
string ns;
const bool csv = hasParam( "csv" );
const bool jsonArray = hasParam( "jsonArray" );
ostream *outPtr = &cout;
string outfile = getParam( "out" );
auto_ptr<ofstream> fileStream;
- if ( hasParam( "out" ) ){
+ if ( hasParam( "out" ) ) {
size_t idx = outfile.rfind( "/" );
- if ( idx != string::npos ){
+ if ( idx != string::npos ) {
string dir = outfile.substr( 0 , idx + 1 );
create_directories( dir );
}
ofstream * s = new ofstream( outfile.c_str() , ios_base::out );
fileStream.reset( s );
outPtr = s;
- if ( ! s->good() ){
+ if ( ! s->good() ) {
cerr << "couldn't open [" << outfile << "]" << endl;
return -1;
}
@@ -72,20 +72,21 @@ public:
try {
ns = getNS();
- } catch (...) {
+ }
+ catch (...) {
printHelp(cerr);
return 1;
}
auth();
- if ( hasParam( "fields" ) || csv ){
+ if ( hasParam( "fields" ) || csv ) {
needFields();
fieldsToReturn = &_fieldsObj;
}
- if ( csv && _fields.size() == 0 ){
+ if ( csv && _fields.size() == 0 ) {
cerr << "csv mode requires a field list" << endl;
return -1;
}
@@ -96,15 +97,15 @@ public:
auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
- if ( csv ){
- for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
if ( i != _fields.begin() )
out << ",";
out << *i;
}
out << endl;
}
-
+
if (jsonArray)
out << '[';
@@ -112,12 +113,12 @@ public:
while ( cursor->more() ) {
num++;
BSONObj obj = cursor->next();
- if ( csv ){
- for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
if ( i != _fields.begin() )
out << ",";
const BSONElement & e = obj.getFieldDotted(i->c_str());
- if ( ! e.eoo() ){
+ if ( ! e.eoo() ) {
out << e.jsonString( Strict , false );
}
}
@@ -136,7 +137,7 @@ public:
if (jsonArray)
out << ']' << endl;
-
+
cerr << "exported " << num << " records" << endl;
return 0;
diff --git a/tools/files.cpp b/tools/files.cpp
index 0d43e08c924..b44e7fe3e62 100644
--- a/tools/files.cpp
+++ b/tools/files.cpp
@@ -33,21 +33,21 @@ namespace po = boost::program_options;
class Files : public Tool {
public:
- Files() : Tool( "files" ){
+ Files() : Tool( "files" ) {
add_options()
- ( "local,l", po::value<string>(), "local filename for put|get (default is to use the same name as 'gridfs filename')")
- ( "type,t", po::value<string>(), "MIME type for put (default is to omit)")
- ( "replace,r", "Remove other files with same name after PUT")
- ;
+ ( "local,l", po::value<string>(), "local filename for put|get (default is to use the same name as 'gridfs filename')")
+ ( "type,t", po::value<string>(), "MIME type for put (default is to omit)")
+ ( "replace,r", "Remove other files with same name after PUT")
+ ;
add_hidden_options()
- ( "command" , po::value<string>() , "command (list|search|put|get)" )
- ( "file" , po::value<string>() , "filename for get|put" )
- ;
+ ( "command" , po::value<string>() , "command (list|search|put|get)" )
+ ( "file" , po::value<string>() , "filename for get|put" )
+ ;
addPositionArg( "command" , 1 );
addPositionArg( "file" , 2 );
}
- virtual void printExtraHelp( ostream & out ){
+ virtual void printExtraHelp( ostream & out ) {
out << "usage: " << _name << " [options] command [gridfs filename]" << endl;
out << "command:" << endl;
out << " one of (list|search|put|get)" << endl;
@@ -60,20 +60,20 @@ public:
out << " delete - delete all files with filename 'gridfs filename'" << endl;
}
- void display( GridFS * grid , BSONObj obj ){
+ void display( GridFS * grid , BSONObj obj ) {
auto_ptr<DBClientCursor> c = grid->list( obj );
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj obj = c->next();
cout
- << obj["filename"].str() << "\t"
- << (long)obj["length"].number()
- << endl;
+ << obj["filename"].str() << "\t"
+ << (long)obj["length"].number()
+ << endl;
}
}
- int run(){
+ int run() {
string cmd = getParam( "command" );
- if ( cmd.size() == 0 ){
+ if ( cmd.size() == 0 ) {
cerr << "ERROR: need command" << endl << endl;
printHelp(cout);
return -1;
@@ -84,7 +84,7 @@ public:
string filename = getParam( "file" );
- if ( cmd == "list" ){
+ if ( cmd == "list" ) {
BSONObjBuilder b;
if ( filename.size() )
b.appendRegex( "filename" , ( (string)"^" + filename ) );
@@ -92,22 +92,22 @@ public:
return 0;
}
- if ( filename.size() == 0 ){
+ if ( filename.size() == 0 ) {
cerr << "ERROR: need a filename" << endl << endl;
printHelp(cout);
return -1;
}
- if ( cmd == "search" ){
+ if ( cmd == "search" ) {
BSONObjBuilder b;
b.appendRegex( "filename" , filename );
display( &g , b.obj() );
return 0;
}
- if ( cmd == "get" ){
+ if ( cmd == "get" ) {
GridFile f = g.findFile( filename );
- if ( ! f.exists() ){
+ if ( ! f.exists() ) {
cerr << "ERROR: file not found" << endl;
return -2;
}
@@ -121,16 +121,16 @@ public:
return 0;
}
- if ( cmd == "put" ){
+ if ( cmd == "put" ) {
const string& infile = getParam("local", filename);
const string& type = getParam("type", "");
BSONObj file = g.storeFile(infile, filename, type);
cout << "added file: " << file << endl;
- if (hasParam("replace")){
+ if (hasParam("replace")) {
auto_ptr<DBClientCursor> cursor = conn().query(_db+".fs.files", BSON("filename" << filename << "_id" << NE << file["_id"] ));
- while (cursor->more()){
+ while (cursor->more()) {
BSONObj o = cursor->nextSafe();
conn().remove(_db+".fs.files", BSON("_id" << o["_id"]));
conn().remove(_db+".fs.chunks", BSON("_id" << o["_id"]));
@@ -144,7 +144,7 @@ public:
return 0;
}
- if ( cmd == "delete" ){
+ if ( cmd == "delete" ) {
g.removeFile(filename);
conn().getLastError();
cout << "done!" << endl;
diff --git a/tools/import.cpp b/tools/import.cpp
index 0d7da9b8101..a3d9166ccf7 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -33,7 +33,7 @@ using namespace mongo;
namespace po = boost::program_options;
class Import : public Tool {
-
+
enum Type { JSON , CSV , TSV };
Type _type;
@@ -44,36 +44,36 @@ class Import : public Tool {
bool _doimport;
bool _jsonArray;
vector<string> _upsertFields;
-
- void _append( BSONObjBuilder& b , const string& fieldName , const string& data ){
+
+ void _append( BSONObjBuilder& b , const string& fieldName , const string& data ) {
if ( b.appendAsNumber( fieldName , data ) )
return;
-
+
if ( _ignoreBlanks && data.size() == 0 )
return;
// TODO: other types?
b.append( fieldName , data );
}
-
- BSONObj parseLine( char * line ){
+
+ BSONObj parseLine( char * line ) {
uassert(13289, "Invalid UTF8 character detected", isValidUTF8(line));
- if ( _type == JSON ){
+ if ( _type == JSON ) {
char * end = ( line + strlen( line ) ) - 1;
- while ( isspace(*end) ){
+ while ( isspace(*end) ) {
*end = 0;
end--;
}
return fromjson( line );
}
-
+
BSONObjBuilder b;
unsigned int pos=0;
- while ( line[0] ){
+ while ( line[0] ) {
string name;
- if ( pos < _fields.size() ){
+ if ( pos < _fields.size() ) {
name = _fields[pos];
}
else {
@@ -82,76 +82,81 @@ class Import : public Tool {
name = ss.str();
}
pos++;
-
+
bool done = false;
string data;
char * end;
- if ( _type == CSV && line[0] == '"' ){
+ if ( _type == CSV && line[0] == '"' ) {
line++; //skip first '"'
while (true) {
end = strchr( line , '"' );
- if (!end){
+ if (!end) {
data += line;
done = true;
break;
- } else if (end[1] == '"') {
+ }
+ else if (end[1] == '"') {
// two '"'s get appended as one
data.append(line, end-line+1); //include '"'
line = end+2; //skip both '"'s
- } else if (end[-1] == '\\') {
+ }
+ else if (end[-1] == '\\') {
// "\\\"" gets appended as '"'
data.append(line, end-line-1); //exclude '\\'
data.append("\"");
line = end+1; //skip the '"'
- } else {
+ }
+ else {
data.append(line, end-line);
line = end+2; //skip '"' and ','
break;
}
}
- } else {
+ }
+ else {
end = strstr( line , _sep );
- if ( ! end ){
+ if ( ! end ) {
done = true;
data = string( line );
- } else {
+ }
+ else {
data = string( line , end - line );
line = end+1;
}
}
- if ( _headerLine ){
+ if ( _headerLine ) {
while ( isspace( data[0] ) )
data = data.substr( 1 );
_fields.push_back( data );
}
else
_append( b , name , data );
-
+
if ( done )
break;
}
return b.obj();
}
-
+
public:
- Import() : Tool( "import" ){
+ Import() : Tool( "import" ) {
addFieldOptions();
add_options()
- ("ignoreBlanks","if given, empty fields in csv and tsv will be ignored")
- ("type",po::value<string>() , "type of file to import. default: json (json,csv,tsv)")
- ("file",po::value<string>() , "file to import from; if not specified stdin is used" )
- ("drop", "drop collection first " )
- ("headerline","CSV,TSV only - use first line as headers")
- ("upsert", "insert or update objects that already exist" )
- ("upsertFields", po::value<string>(), "comma-separated fields for the query part of the upsert. You should make sure this is indexed" )
- ("stopOnError", "stop importing at first error rather than continuing" )
- ("jsonArray", "load a json array, not one item per line. Currently limited to 4MB." )
- ;
+ ("ignoreBlanks","if given, empty fields in csv and tsv will be ignored")
+ ("type",po::value<string>() , "type of file to import. default: json (json,csv,tsv)")
+ ("file",po::value<string>() , "file to import from; if not specified stdin is used" )
+ ("drop", "drop collection first " )
+ ("headerline","CSV,TSV only - use first line as headers")
+ ("upsert", "insert or update objects that already exist" )
+ ("upsertFields", po::value<string>(), "comma-separated fields for the query part of the upsert. You should make sure this is indexed" )
+ ("stopOnError", "stop importing at first error rather than continuing" )
+ ("jsonArray", "load a json array, not one item per line. Currently limited to 4MB." )
+ ;
add_hidden_options()
- ("noimport", "don't actually import. useful for benchmarking parser" )
- ;
+ ("noimport", "don't actually import. useful for benchmarking parser" )
+ ;
addPositionArg( "file" , 1 );
_type = JSON;
_ignoreBlanks = false;
@@ -160,8 +165,8 @@ public:
_doimport = true;
_jsonArray = false;
}
-
- int run(){
+
+ int run() {
string filename = getParam( "file" );
long long fileSize = -1;
@@ -169,8 +174,8 @@ public:
ifstream file( filename.c_str() , ios_base::in);
- if ( filename.size() > 0 && filename != "-" ){
- if ( ! exists( filename ) ){
+ if ( filename.size() > 0 && filename != "-" ) {
+ if ( ! exists( filename ) ) {
cerr << "file doesn't exist: " << filename << endl;
return -1;
}
@@ -182,53 +187,55 @@ public:
if (!isMaster()) {
return -1;
}
-
+
string ns;
try {
ns = getNS();
- } catch (...) {
+ }
+ catch (...) {
printHelp(cerr);
return -1;
}
-
+
log(1) << "ns: " << ns << endl;
-
+
auth();
- if ( hasParam( "drop" ) ){
+ if ( hasParam( "drop" ) ) {
cout << "dropping: " << ns << endl;
conn().dropCollection( ns.c_str() );
}
- if ( hasParam( "ignoreBlanks" ) ){
+ if ( hasParam( "ignoreBlanks" ) ) {
_ignoreBlanks = true;
}
- if ( hasParam( "upsert" ) || hasParam( "upsertFields" )){
+ if ( hasParam( "upsert" ) || hasParam( "upsertFields" )) {
_upsert = true;
string uf = getParam("upsertFields");
- if (uf.empty()){
+ if (uf.empty()) {
_upsertFields.push_back("_id");
- } else {
+ }
+ else {
StringSplitter(uf.c_str(), ",").split(_upsertFields);
}
}
- if ( hasParam( "noimport" ) ){
+ if ( hasParam( "noimport" ) ) {
_doimport = false;
}
- if ( hasParam( "type" ) ){
+ if ( hasParam( "type" ) ) {
string type = getParam( "type" );
if ( type == "json" )
_type = JSON;
- else if ( type == "csv" ){
+ else if ( type == "csv" ) {
_type = CSV;
_sep = ",";
}
- else if ( type == "tsv" ){
+ else if ( type == "tsv" ) {
_type = TSV;
_sep = "\t";
}
@@ -237,21 +244,21 @@ public:
return -1;
}
}
-
- if ( _type == CSV || _type == TSV ){
+
+ if ( _type == CSV || _type == TSV ) {
_headerLine = hasParam( "headerline" );
if ( ! _headerLine )
needFields();
}
- if (_type == JSON && hasParam("jsonArray")){
+ if (_type == JSON && hasParam("jsonArray")) {
_jsonArray = true;
}
int errors = 0;
-
+
int num = 0;
-
+
time_t start = time(0);
log(1) << "filesize: " << fileSize << endl;
@@ -259,37 +266,39 @@ public:
const int BUF_SIZE = 1024 * 1024 * 4;
boost::scoped_array<char> line(new char[BUF_SIZE+2]);
char * buf = line.get();
- while ( _jsonArray || in->rdstate() == 0 ){
- if (_jsonArray){
- if (buf == line.get()){ //first pass
+ while ( _jsonArray || in->rdstate() == 0 ) {
+ if (_jsonArray) {
+ if (buf == line.get()) { //first pass
in->read(buf, BUF_SIZE);
uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
buf[ in->gcount() ] = '\0';
}
- } else {
+ }
+ else {
buf = line.get();
in->getline( buf , BUF_SIZE );
log(1) << "got line:" << buf << endl;
}
uassert( 10263 , "unknown error reading file" ,
- (!(in->rdstate() & ios_base::badbit)) &&
- (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
+ (!(in->rdstate() & ios_base::badbit)) &&
+ (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
int len = 0;
- if (strncmp("\xEF\xBB\xBF", buf, 3) == 0){ // UTF-8 BOM (notepad is stupid)
+ if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
buf += 3;
len += 3;
}
- if (_jsonArray){
+ if (_jsonArray) {
while (buf[0] != '{' && buf[0] != '\0') {
len++;
buf++;
}
if (buf[0] == '\0')
break;
- } else {
- while (isspace( buf[0] )){
+ }
+ else {
+ while (isspace( buf[0] )) {
len++;
buf++;
}
@@ -300,24 +309,26 @@ public:
try {
BSONObj o;
- if (_jsonArray){
+ if (_jsonArray) {
int jslen;
o = fromjson(buf, &jslen);
len += jslen;
buf += jslen;
- } else {
+ }
+ else {
o = parseLine( buf );
}
- if ( _headerLine ){
+ if ( _headerLine ) {
_headerLine = false;
- } else if (_doimport) {
+ }
+ else if (_doimport) {
bool doUpsert = _upsert;
BSONObjBuilder b;
- if (_upsert){
- for (vector<string>::const_iterator it=_upsertFields.begin(), end=_upsertFields.end(); it!=end; ++it){
+ if (_upsert) {
+ for (vector<string>::const_iterator it=_upsertFields.begin(), end=_upsertFields.end(); it!=end; ++it) {
BSONElement e = o.getFieldDotted(it->c_str());
- if (e.eoo()){
+ if (e.eoo()) {
doUpsert = false;
break;
}
@@ -325,25 +336,26 @@ public:
}
}
- if (doUpsert){
+ if (doUpsert) {
conn().update(ns, Query(b.obj()), o, true);
- } else {
+ }
+ else {
conn().insert( ns.c_str() , o );
}
}
num++;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "exception:" << e.what() << endl;
cout << buf << endl;
errors++;
-
+
if (hasParam("stopOnError") || _jsonArray)
break;
}
- if ( pm.hit( len + 1 ) ){
+ if ( pm.hit( len + 1 ) ) {
cout << "\t\t\t" << num << "\t" << ( num / ( time(0) - start ) ) << "/second" << endl;
}
}
@@ -351,10 +363,10 @@ public:
cout << "imported " << num << " objects" << endl;
conn().getLastError();
-
+
if ( errors == 0 )
return 0;
-
+
cerr << "encountered " << errors << " error" << ( errors == 1 ? "" : "s" ) << endl;
return -1;
}
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 368efeb0890..9a18c00e93a 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -36,20 +36,20 @@ namespace {
class Restore : public BSONTool {
public:
-
+
bool _drop;
string _curns;
string _curdb;
- Restore() : BSONTool( "restore" ) , _drop(false){
+ Restore() : BSONTool( "restore" ) , _drop(false) {
add_options()
- ("drop" , "drop each collection before import" )
- ("oplogReplay" , "replay oplog for point-in-time restore")
- ;
+ ("drop" , "drop each collection before import" )
+ ("oplogReplay" , "replay oplog for point-in-time restore")
+ ;
add_hidden_options()
- ("dir", po::value<string>()->default_value("dump"), "directory to restore from")
- ("indexesLast" , "wait to add indexes (now default)") // left in for backwards compatibility
- ;
+ ("dir", po::value<string>()->default_value("dump"), "directory to restore from")
+ ("indexesLast" , "wait to add indexes (now default)") // left in for backwards compatibility
+ ;
addPositionArg("dir", 1);
}
@@ -57,7 +57,7 @@ public:
out << "usage: " << _name << " [options] [directory or filename to restore from]" << endl;
}
- virtual int doRun(){
+ virtual int doRun() {
auth();
path root = getParam("dir");
@@ -65,32 +65,32 @@ public:
if (!isMaster()) {
return -1;
}
-
+
_drop = hasParam( "drop" );
bool doOplog = hasParam( "oplogReplay" );
- if (doOplog){
+ if (doOplog) {
// fail early if errors
- if (_db != ""){
+ if (_db != "") {
cout << "Can only replay oplog on full restore" << endl;
return -1;
}
- if ( ! exists(root / "oplog.bson") ){
+ if ( ! exists(root / "oplog.bson") ) {
cout << "No oplog file to replay. Make sure you run mongodump with --oplog." << endl;
return -1;
}
BSONObj out;
- if (! conn().simpleCommand("admin", &out, "buildinfo")){
+ if (! conn().simpleCommand("admin", &out, "buildinfo")) {
cout << "buildinfo command failed: " << out["errmsg"].String() << endl;
return -1;
}
StringData version = out["version"].valuestr();
- if (versionCmp(version, "1.7.4-pre-") < 0){
+ if (versionCmp(version, "1.7.4-pre-") < 0) {
cout << "Can only replay oplog to server version >= 1.7.4" << endl;
return -1;
}
@@ -108,7 +108,7 @@ public:
drillDown(root, _db != "", _coll != "", true);
conn().getLastError();
- if (doOplog){
+ if (doOplog) {
out() << "\t Replaying oplog" << endl;
_curns = OPLOG_SENTINEL;
processFile( root / "oplog.bson" );
@@ -174,7 +174,7 @@ public:
log() << root.string() << endl;
- if ( root.leaf() == "system.profile.bson" ){
+ if ( root.leaf() == "system.profile.bson" ) {
log() << "\t skipping" << endl;
return;
}
@@ -182,23 +182,24 @@ public:
string ns;
if (use_db) {
ns += _db;
- }
+ }
else {
string dir = root.branch_path().string();
if ( dir.find( "/" ) == string::npos )
ns += dir;
else
ns += dir.substr( dir.find_last_of( "/" ) + 1 );
-
+
if ( ns.size() == 0 )
ns = "test";
}
-
+
assert( ns.size() );
if (use_coll) {
ns += "." + _coll;
- } else {
+ }
+ else {
string l = root.leaf();
l = l.substr( 0 , l.find_last_of( "." ) );
ns += "." + l;
@@ -206,17 +207,17 @@ public:
out() << "\t going into namespace [" << ns << "]" << endl;
- if ( _drop ){
+ if ( _drop ) {
out() << "\t dropping" << endl;
conn().dropCollection( ns );
}
-
+
_curns = ns.c_str();
_curdb = NamespaceString(_curns).db;
processFile( root );
}
- virtual void gotObject( const BSONObj& obj ){
+ virtual void gotObject( const BSONObj& obj ) {
if (_curns == OPLOG_SENTINEL) { // intentional ptr compare
if (obj["op"].valuestr()[0] == 'n') // skip no-ops
return;
@@ -227,7 +228,7 @@ public:
BSONObj cmd = BSON( "applyOps" << BSON_ARRAY( obj ) );
BSONObj out;
conn().runCommand(db, cmd, out);
- }
+ }
else if ( endsWith( _curns.c_str() , ".system.indexes" )) {
/* Index construction is slightly special: when restoring
indexes, we must ensure that the ns attribute is
@@ -237,13 +238,14 @@ public:
data. */
BSONObjBuilder bo;
BSONObjIterator i(obj);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if (strcmp(e.fieldName(), "ns") == 0) {
NamespaceString n(e.String());
string s = _curdb + "." + n.coll;
bo.append("ns", s);
- } else {
+ }
+ else {
bo.append(e);
}
}
@@ -257,13 +259,13 @@ public:
cerr << "To resume index restoration, run " << _name << " on file" << _fileName << " manually." << endl;
abort();
}
- }
+ }
else {
conn().insert( _curns , obj );
}
}
-
+
};
int main( int argc , char ** argv ) {
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 52b2eba9bd3..0422f87399e 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -157,11 +157,11 @@ map< Connection, map< long long, long long > > mapCursor;
void processMessage( Connection& c , Message& d );
-void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet){
+void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet) {
const struct sniff_ip* ip = (struct sniff_ip*)(packet + captureHeaderSize);
int size_ip = IP_HL(ip)*4;
- if ( size_ip < 20 ){
+ if ( size_ip < 20 ) {
cerr << "*** Invalid IP header length: " << size_ip << " bytes" << endl;
return;
}
@@ -170,13 +170,13 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
const struct sniff_tcp* tcp = (struct sniff_tcp*)(packet + captureHeaderSize + size_ip);
int size_tcp = TH_OFF(tcp)*4;
- if (size_tcp < 20){
+ if (size_tcp < 20) {
cerr << "*** Invalid TCP header length: " << size_tcp << " bytes" << endl;
return;
}
if ( ! ( serverPorts.count( ntohs( tcp->th_sport ) ) ||
- serverPorts.count( ntohs( tcp->th_dport ) ) ) ){
+ serverPorts.count( ntohs( tcp->th_dport ) ) ) ) {
return;
}
@@ -199,7 +199,8 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
if ( expectedSeq[ c ] != ntohl( tcp->th_seq ) ) {
cerr << "Warning: sequence # mismatch, there may be dropped packets" << endl;
}
- } else {
+ }
+ else {
seen[ c ] = true;
}
@@ -223,7 +224,8 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
return;
}
- } else {
+ }
+ else {
bytesRemainingInMessage[ c ] -= size_payload;
messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
if ( bytesRemainingInMessage[ c ] < 0 ) {
@@ -264,70 +266,71 @@ public:
}
};
-void processMessage( Connection& c , Message& m ){
+void processMessage( Connection& c , Message& m ) {
AuditingDbMessage d(m);
-
+
if ( m.operation() == mongo::opReply )
out() << " - " << (unsigned)m.header()->responseTo;
out() << endl;
try {
- switch( m.operation() ){
- case mongo::opReply:{
- mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
- out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
- if ( r->nReturned ){
- mongo::BSONObj o( r->data() , 0 );
- out() << "\t" << o << endl;
- }
- break;
- }
- case mongo::dbQuery:{
- mongo::QueryMessage q(d);
- out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
- break;
+ switch( m.operation() ) {
+ case mongo::opReply: {
+ mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
+ out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
+ if ( r->nReturned ) {
+ mongo::BSONObj o( r->data() , 0 );
+ out() << "\t" << o << endl;
}
- case mongo::dbUpdate:{
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj( "update" );
- BSONObj o = d.nextJsObj( "update" );
- out() << "\tupdate flags:" << flags << " q:" << q << " o:" << o << endl;
- break;
- }
- case mongo::dbInsert:{
- out() << "\tinsert: " << d.nextJsObj( "insert" ) << endl;
- while ( d.moreJSObjs() ) {
- out() << "\t\t" << d.nextJsObj( "insert" ) << endl;
- }
- break;
- }
- case mongo::dbGetMore:{
- int nToReturn = d.pullInt();
- long long cursorId = d.pullInt64();
- out() << "\tgetMore nToReturn: " << nToReturn << " cursorId: " << cursorId << endl;
- break;
- }
- case mongo::dbDelete:{
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj( "delete" );
- out() << "\tdelete flags: " << flags << " q: " << q << endl;
- break;
- }
- case mongo::dbKillCursors:{
- int *x = (int *) m.singleData()->_data;
- x++; // reserved
- int n = *x;
- out() << "\tkillCursors n: " << n << endl;
- break;
+ break;
+ }
+ case mongo::dbQuery: {
+ mongo::QueryMessage q(d);
+ out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
+ break;
+ }
+ case mongo::dbUpdate: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "update" );
+ BSONObj o = d.nextJsObj( "update" );
+ out() << "\tupdate flags:" << flags << " q:" << q << " o:" << o << endl;
+ break;
+ }
+ case mongo::dbInsert: {
+ out() << "\tinsert: " << d.nextJsObj( "insert" ) << endl;
+ while ( d.moreJSObjs() ) {
+ out() << "\t\t" << d.nextJsObj( "insert" ) << endl;
}
- default:
- cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
+ break;
+ }
+ case mongo::dbGetMore: {
+ int nToReturn = d.pullInt();
+ long long cursorId = d.pullInt64();
+ out() << "\tgetMore nToReturn: " << nToReturn << " cursorId: " << cursorId << endl;
+ break;
+ }
+ case mongo::dbDelete: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "delete" );
+ out() << "\tdelete flags: " << flags << " q: " << q << endl;
+ break;
}
- } catch ( ... ) {
+ case mongo::dbKillCursors: {
+ int *x = (int *) m.singleData()->_data;
+ x++; // reserved
+ int n = *x;
+ out() << "\tkillCursors n: " << n << endl;
+ break;
+ }
+ default:
+ cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
+ }
+ }
+ catch ( ... ) {
cerr << "Error parsing message for operation: " << m.operation() << endl;
}
-
-
+
+
if ( !forwardAddress.empty() ) {
if ( m.operation() != mongo::opReply ) {
boost::shared_ptr<DBClientConnection> conn = forwarder[ c ];
@@ -353,10 +356,12 @@ void processMessage( Connection& c , Message& m ){
}
}
lastCursor[ c ] = 0;
- } else {
+ }
+ else {
conn->port().say( m );
}
- } else {
+ }
+ else {
Connection r = c.reverse();
long long myCursor = lastCursor[ r ];
QueryResult *qr = (QueryResult *) m.singleData();
@@ -375,7 +380,7 @@ void processMessage( Connection& c , Message& m ){
}
}
-void processDiagLog( const char * file ){
+void processDiagLog( const char * file ) {
Connection c;
MemoryMappedFile f;
long length;
@@ -385,45 +390,45 @@ void processDiagLog( const char * file ){
length = (long) L;
assert( root );
assert( length > 0 );
-
+
char * pos = root;
long read = 0;
- while ( read < length ){
+ while ( read < length ) {
Message m(pos,false);
int len = m.header()->len;
DbMessage d(m);
cout << len << " " << d.getns() << endl;
-
+
processMessage( c , m );
read += len;
pos += len;
}
-
+
f.close();
}
void usage() {
cout <<
- "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
- "--forward Forward all parsed request messages to mongod instance at \n"
- " specified host:port\n"
- "--source Source of traffic to sniff, either a network interface or a\n"
- " file containing previously captured packets in pcap format,\n"
- " or a file containing output from mongod's --diaglog option.\n"
- " If no source is specified, mongosniff will attempt to sniff\n"
- " from one of the machine's network interfaces.\n"
- "--objcheck Log hex representation of invalid BSON objects and nothing\n"
- " else. Spurious messages about invalid objects may result\n"
- " when there are dropped tcp packets.\n"
- "<port0>... These parameters are used to filter sniffing. By default, \n"
- " only port 27017 is sniffed.\n"
- "--help Print this help message.\n"
- << endl;
+ "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
+ "--forward Forward all parsed request messages to mongod instance at \n"
+ " specified host:port\n"
+ "--source Source of traffic to sniff, either a network interface or a\n"
+ " file containing previously captured packets in pcap format,\n"
+ " or a file containing output from mongod's --diaglog option.\n"
+ " If no source is specified, mongosniff will attempt to sniff\n"
+ " from one of the machine's network interfaces.\n"
+ "--objcheck Log hex representation of invalid BSON objects and nothing\n"
+ " else. Spurious messages about invalid objects may result\n"
+ " when there are dropped tcp packets.\n"
+ "<port0>... These parameters are used to filter sniffing. By default, \n"
+ " only port 27017 is sniffed.\n"
+ "--help Print this help message.\n"
+ << endl;
}
-int main(int argc, char **argv){
+int main(int argc, char **argv) {
stringstream nullStream;
nullStream.clear(ios::failbit);
@@ -435,7 +440,7 @@ int main(int argc, char **argv){
struct bpf_program fp;
bpf_u_int32 mask;
bpf_u_int32 net;
-
+
bool source = false;
bool replay = false;
bool diaglog = false;
@@ -451,10 +456,10 @@ int main(int argc, char **argv){
if ( arg == string( "--help" ) ) {
usage();
return 0;
- }
+ }
else if ( arg == string( "--forward" ) ) {
forwardAddress = args[ ++i ];
- }
+ }
else if ( arg == string( "--source" ) ) {
uassert( 10266 , "can't use --source twice" , source == false );
uassert( 10267 , "source needs more args" , args.size() > i + 2);
@@ -474,21 +479,22 @@ int main(int argc, char **argv){
serverPorts.insert( atoi( args[ i ] ) );
}
}
- } catch ( ... ) {
+ }
+ catch ( ... ) {
usage();
return -1;
}
if ( !serverPorts.size() )
serverPorts.insert( 27017 );
-
- if ( diaglog ){
+
+ if ( diaglog ) {
processDiagLog( file );
return 0;
}
- else if ( replay ){
+ else if ( replay ) {
handle = pcap_open_offline(file, errbuf);
- if ( ! handle ){
+ if ( ! handle ) {
cerr << "error opening capture file!" << endl;
return -1;
}
@@ -502,18 +508,18 @@ int main(int argc, char **argv){
}
cout << "found device: " << dev << endl;
}
- if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1){
+ if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
cerr << "can't get netmask: " << errbuf << endl;
return -1;
}
handle = pcap_open_live(dev, SNAP_LEN, 1, 1000, errbuf);
- if ( ! handle ){
+ if ( ! handle ) {
cerr << "error opening device: " << errbuf << endl;
return -1;
}
- }
+ }
- switch ( pcap_datalink( handle ) ){
+ switch ( pcap_datalink( handle ) ) {
case DLT_EN10MB:
captureHeaderSize = 14;
break;
diff --git a/tools/stat.cpp b/tools/stat.cpp
index f0618a74bce..5d00e508ec3 100644
--- a/tools/stat.cpp
+++ b/tools/stat.cpp
@@ -32,37 +32,37 @@
namespace po = boost::program_options;
namespace mongo {
-
+
class Stat : public Tool {
public:
- Stat() : Tool( "stat" , REMOTE_SERVER , "admin" ){
+ Stat() : Tool( "stat" , REMOTE_SERVER , "admin" ) {
_sleep = 1;
_http = false;
_many = false;
-
+
add_hidden_options()
- ( "sleep" , po::value<int>() , "time to sleep between calls" )
- ;
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
add_options()
- ("noheaders", "don't output column names")
- ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
- ("http", "use http instead of raw db connection")
- ("discover" , "discover nodes and display stats for all" )
- ("all" , "all optional fields" )
- ;
+ ("noheaders", "don't output column names")
+ ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
+ ("http", "use http instead of raw db connection")
+ ("discover" , "discover nodes and display stats for all" )
+ ("all" , "all optional fields" )
+ ;
addPositionArg( "sleep" , 1 );
_autoreconnect = true;
}
- virtual void printExtraHelp( ostream & out ){
+ virtual void printExtraHelp( ostream & out ) {
out << "usage: " << _name << " [options] [sleep time]" << endl;
out << "sleep time: time to wait (in seconds) between calls" << endl;
}
- virtual void printExtraHelpAfter( ostream & out ){
+ virtual void printExtraHelpAfter( ostream & out ) {
out << "\n";
out << " Fields\n";
out << " inserts \t- # of inserts per second\n";
@@ -85,12 +85,12 @@ namespace mongo {
out << " conn \t- number of open connections\n";
}
-
- BSONObj stats(){
- if ( _http ){
+
+ BSONObj stats() {
+ if ( _http ) {
HttpClient c;
HttpClient::Result r;
-
+
string url;
{
stringstream ss;
@@ -101,36 +101,36 @@ namespace mongo {
url = ss.str();
}
- if ( c.get( url , &r ) != 200 ){
+ if ( c.get( url , &r ) != 200 ) {
cout << "error (http): " << r.getEntireResponse() << endl;
return BSONObj();
}
-
+
BSONObj x = fromjson( r.getBody() );
BSONElement e = x["serverStatus"];
- if ( e.type() != Object ){
+ if ( e.type() != Object ) {
cout << "BROKEN: " << x << endl;
return BSONObj();
}
return e.embeddedObjectUserCheck();
}
BSONObj out;
- if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ){
+ if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ) {
cout << "error: " << out << endl;
return BSONObj();
}
return out.getOwned();
}
- double diff( const string& name , const BSONObj& a , const BSONObj& b ){
+ double diff( const string& name , const BSONObj& a , const BSONObj& b ) {
BSONElement x = a.getFieldDotted( name.c_str() );
BSONElement y = b.getFieldDotted( name.c_str() );
if ( ! x.isNumber() || ! y.isNumber() )
return -1;
return ( y.number() - x.number() ) / _sleep;
}
-
- double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ){
+
+ double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ) {
double x = ( b.getFieldDotted( val ).number() - a.getFieldDotted( val ).number() );
double y = ( b.getFieldDotted( outof ).number() - a.getFieldDotted( outof ).number() );
if ( y == 0 )
@@ -141,20 +141,20 @@ namespace mongo {
}
template<typename T>
- void _append( BSONObjBuilder& result , const string& name , unsigned width , const T& t ){
+ void _append( BSONObjBuilder& result , const string& name , unsigned width , const T& t ) {
if ( name.size() > width )
width = name.size();
result.append( name , BSON( "width" << (int)width << "data" << t ) );
}
-
- void _appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz ){
+
+ void _appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz ) {
string unit = "m";
- if ( sz > 1024 ){
+ if ( sz > 1024 ) {
unit = "g";
sz /= 1024;
}
- if ( sz > 1024 ){
+ if ( sz > 1024 ) {
string s = str::stream() << (int)sz << unit;
_append( result , name , width , s );
return;
@@ -164,24 +164,24 @@ namespace mongo {
ss << setprecision(3) << sz << unit;
_append( result , name , width , ss.str() );
}
-
- void _appendNet( BSONObjBuilder& result , const string& name , double diff ){
+
+ void _appendNet( BSONObjBuilder& result , const string& name , double diff ) {
// I think 1000 is correct for megabit, but I've seen conflicting things (ERH 11/2010)
const double div = 1000;
-
+
string unit = "b";
- if ( diff >= div ){
+ if ( diff >= div ) {
unit = "k";
diff /= div;
}
-
- if ( diff >= div ){
+
+ if ( diff >= div ) {
unit = "m";
diff /= div;
}
- if ( diff >= div ){
+ if ( diff >= div ) {
unit = "g";
diff /= div;
}
@@ -193,62 +193,62 @@ namespace mongo {
/**
* BSON( <field> -> BSON( width : ### , data : XXX ) )
*/
- BSONObj doRow( const BSONObj& a , const BSONObj& b ){
+ BSONObj doRow( const BSONObj& a , const BSONObj& b ) {
BSONObjBuilder result;
- if ( b["opcounters"].type() == Object ){
+ if ( b["opcounters"].type() == Object ) {
BSONObj ax = a["opcounters"].embeddedObject();
BSONObj bx = b["opcounters"].embeddedObject();
-
+
BSONObj ar = a["opcountersRepl"].isABSONObj() ? a["opcountersRepl"].embeddedObject() : BSONObj();
BSONObj br = b["opcountersRepl"].isABSONObj() ? b["opcountersRepl"].embeddedObject() : BSONObj();
-
+
BSONObjIterator i( bx );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( ar.isEmpty() || br.isEmpty() ){
+ if ( ar.isEmpty() || br.isEmpty() ) {
_append( result , e.fieldName() , 6 , (int)diff( e.fieldName() , ax , bx ) );
}
else {
string f = e.fieldName();
-
+
int m = (int)diff( f , ax , bx );
int r = (int)diff( f , ar , br );
-
+
string myout;
- if ( f == "command" ){
+ if ( f == "command" ) {
myout = str::stream() << m << "|" << r;
}
- else if ( f == "getmore" ){
+ else if ( f == "getmore" ) {
myout = str::stream() << m;
}
- else if ( m && r ){
+ else if ( m && r ) {
// this is weird...
myout = str::stream() << m << "|" << r;
}
- else if ( m ){
+ else if ( m ) {
myout = str::stream() << m;
}
- else if ( r ){
+ else if ( r ) {
myout = str::stream() << "*" << r;
}
else {
myout = "*0";
}
-
+
_append( result , f , 6 , myout );
}
}
}
-
- if ( b["backgroundFlushing"].type() == Object ){
+
+ if ( b["backgroundFlushing"].type() == Object ) {
BSONObj ax = a["backgroundFlushing"].embeddedObject();
BSONObj bx = b["backgroundFlushing"].embeddedObject();
_append( result , "flushes" , 6 , (int)diff( "flushes" , ax , bx ) );
}
- if ( b.getFieldDotted("mem.supported").trueValue() ){
+ if ( b.getFieldDotted("mem.supported").trueValue() ) {
BSONObj bx = b["mem"].embeddedObject();
BSONObjIterator i( bx );
_appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
@@ -259,17 +259,17 @@ namespace mongo {
_appendMem( result , "non-mapped" , 6 , bx["virtual"].numberInt() - bx["mapped"].numberInt() );
}
- if ( b["extra_info"].type() == Object ){
+ if ( b["extra_info"].type() == Object ) {
BSONObj ax = a["extra_info"].embeddedObject();
BSONObj bx = b["extra_info"].embeddedObject();
if ( ax["page_faults"].type() || ax["page_faults"].type() )
_append( result , "faults" , 6 , (int)diff( "page_faults" , ax , bx ) );
}
-
+
_append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
_append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
- if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ){
+ if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.currentQueue.readers" ).numberInt();
int w = b.getFieldDotted( "globalLock.currentQueue.writers" ).numberInt();
stringstream temp;
@@ -277,15 +277,15 @@ namespace mongo {
_append( result , "qr|qw" , 9 , temp.str() );
}
- if ( b.getFieldDotted( "globalLock.activeClients" ).type() == Object ){
+ if ( b.getFieldDotted( "globalLock.activeClients" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.activeClients.readers" ).numberInt();
int w = b.getFieldDotted( "globalLock.activeClients.writers" ).numberInt();
stringstream temp;
temp << r << "|" << w;
_append( result , "ar|aw" , 7 , temp.str() );
}
-
- if ( b["network"].isABSONObj() ){
+
+ if ( b["network"].isABSONObj() ) {
BSONObj ax = a["network"].embeddedObject();
BSONObj bx = b["network"].embeddedObject();
_appendNet( result , "netIn" , diff( "bytesIn" , ax , bx ) );
@@ -294,14 +294,14 @@ namespace mongo {
_append( result , "conn" , 5 , b.getFieldDotted( "connections.current" ).numberInt() );
- if ( b["repl"].type() == Object ){
+ if ( b["repl"].type() == Object ) {
BSONObj x = b["repl"].embeddedObject();
bool isReplSet = x["setName"].type() == String;
stringstream ss;
- if ( isReplSet ){
+ if ( isReplSet ) {
string setName = x["setName"].String();
_append( result , "set" , setName.size() , setName );
}
@@ -314,13 +314,13 @@ namespace mongo {
ss << "REC";
else if ( isReplSet )
ss << "UNK";
- else
+ else
ss << "SLV";
-
+
_append( result , "repl" , 4 , ss.str() );
-
+
}
- else if ( b["shardCursorType"].type() == Object ){
+ else if ( b["shardCursorType"].type() == Object ) {
// is a mongos
// TODO: should have a better check
_append( result , "repl" , 4 , "RTR" );
@@ -330,35 +330,35 @@ namespace mongo {
struct tm t;
time_t_to_Struct( time(0), &t , true );
stringstream temp;
- temp << setfill('0') << setw(2) << t.tm_hour
- << ":"
+ temp << setfill('0') << setw(2) << t.tm_hour
+ << ":"
<< setfill('0') << setw(2) << t.tm_min
- << ":"
+ << ":"
<< setfill('0') << setw(2) << t.tm_sec;
_append( result , "time" , 10 , temp.str() );
}
return result.obj();
}
-
- virtual void preSetup(){
- if ( hasParam( "http" ) ){
+
+ virtual void preSetup() {
+ if ( hasParam( "http" ) ) {
_http = true;
_noconnection = true;
}
- if ( hasParam( "host" ) &&
- getParam( "host" ).find( ',' ) != string::npos ){
+ if ( hasParam( "host" ) &&
+ getParam( "host" ).find( ',' ) != string::npos ) {
_noconnection = true;
_many = true;
}
- if ( hasParam( "discover" ) ){
+ if ( hasParam( "discover" ) ) {
_noconnection = true;
_many = true;
}
}
- int run(){
+ int run() {
_sleep = getParam( "sleep" , _sleep );
_all = hasParam( "all" );
if ( _many )
@@ -366,31 +366,31 @@ namespace mongo {
return runNormal();
}
- static void printHeaders( const BSONObj& o ){
+ static void printHeaders( const BSONObj& o ) {
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
BSONObj x = e.Obj();
cout << setw( x["width"].numberInt() ) << e.fieldName() << ' ';
}
- cout << endl;
+ cout << endl;
}
- static void printData( const BSONObj& o , const BSONObj& headers ){
-
+ static void printData( const BSONObj& o , const BSONObj& headers ) {
+
BSONObjIterator i(headers);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
BSONObj h = e.Obj();
int w = h["width"].numberInt();
-
+
BSONElement data;
{
BSONElement temp = o[e.fieldName()];
if ( temp.isABSONObj() )
data = temp.Obj()["data"];
}
-
+
if ( data.type() == String )
cout << setw(w) << data.String();
else if ( data.type() == NumberDouble )
@@ -399,15 +399,15 @@ namespace mongo {
cout << setw(w) << data.numberInt();
else if ( data.eoo() )
cout << setw(w) << "";
- else
+ else
cout << setw(w) << "???";
-
+
cout << ' ';
}
- cout << endl;
+ cout << endl;
}
- int runNormal(){
+ int runNormal() {
bool showHeaders = ! hasParam( "noheaders" );
int rowCount = getParam( "rowcount" , 0 );
int rowNum = 0;
@@ -416,50 +416,50 @@ namespace mongo {
if ( prev.isEmpty() )
return -1;
- while ( rowCount == 0 || rowNum < rowCount ){
+ while ( rowCount == 0 || rowNum < rowCount ) {
sleepsecs(_sleep);
BSONObj now;
try {
now = stats();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "can't get data: " << e.what() << endl;
continue;
}
if ( now.isEmpty() )
return -2;
-
+
try {
BSONObj out = doRow( prev , now );
- if ( showHeaders && rowNum % 10 == 0 ){
+ if ( showHeaders && rowNum % 10 == 0 ) {
printHeaders( out );
}
-
+
printData( out , out );
}
- catch ( AssertionException& e ){
+ catch ( AssertionException& e ) {
cout << "\nerror: " << e.what() << "\n"
<< now
<< endl;
}
-
+
prev = now;
rowNum++;
}
return 0;
}
-
+
struct ServerState {
- ServerState() : lock( "Stat::ServerState" ){}
+ ServerState() : lock( "Stat::ServerState" ) {}
string host;
scoped_ptr<boost::thread> thr;
-
+
mongo::mutex lock;
-
+
BSONObj prev;
BSONObj now;
time_t lastUpdate;
@@ -468,21 +468,21 @@ namespace mongo {
string error;
bool mongos;
};
-
- static void serverThread( shared_ptr<ServerState> state ){
+
+ static void serverThread( shared_ptr<ServerState> state ) {
try {
DBClientConnection conn( true );
conn._logLevel = 1;
string errmsg;
if ( ! conn.connect( state->host , errmsg ) )
state->error = errmsg;
-
+
long long cycleNumber = 0;
- while ( ++cycleNumber ){
+ while ( ++cycleNumber ) {
try {
BSONObj out;
- if ( conn.simpleCommand( "admin" , &out , "serverStatus" ) ){
+ if ( conn.simpleCommand( "admin" , &out , "serverStatus" ) ) {
scoped_lock lk( state->lock );
state->error = "";
state->lastUpdate = time(0);
@@ -494,13 +494,13 @@ namespace mongo {
state->error = "serverStatus failed";
state->lastUpdate = time(0);
}
-
- if ( out["shardCursorType"].type() == Object ){
+
+ if ( out["shardCursorType"].type() == Object ) {
state->mongos = true;
- if ( cycleNumber % 10 == 1 ){
+ if ( cycleNumber % 10 == 1 ) {
auto_ptr<DBClientCursor> c = conn.query( "config.shards" , BSONObj() );
vector<BSONObj> shards;
- while ( c->more() ){
+ while ( c->more() ) {
shards.push_back( c->next().getOwned() );
}
scoped_lock lk( state->lock );
@@ -508,57 +508,57 @@ namespace mongo {
}
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
scoped_lock lk( state->lock );
state->error = e.what();
}
-
+
sleepsecs( 1 );
}
-
-
+
+
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "serverThread (" << state->host << ") fatal error : " << e.what() << endl;
}
- catch ( ... ){
+ catch ( ... ) {
cout << "serverThread (" << state->host << ") fatal error" << endl;
}
}
typedef map<string,shared_ptr<ServerState> > StateMap;
- bool _add( StateMap& threads , string host ){
+ bool _add( StateMap& threads , string host ) {
shared_ptr<ServerState>& state = threads[host];
if ( state )
return false;
-
+
state.reset( new ServerState() );
state->host = host;
state->thr.reset( new boost::thread( boost::bind( serverThread , state ) ) );
return true;
}
-
+
/**
* @param hosts [ "a.foo.com" , "b.foo.com" ]
*/
- bool _addAll( StateMap& threads , const BSONObj& hosts ){
+ bool _addAll( StateMap& threads , const BSONObj& hosts ) {
BSONObjIterator i( hosts );
bool added = false;
- while ( i.more() ){
+ while ( i.more() ) {
bool me = _add( threads , i.next().String() );
added = added || me;
}
return added;
}
- bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ){
-
+ bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ) {
+
BSONObj info = ss->now;
bool found = false;
-
- if ( info["repl"].isABSONObj() ){
+
+ if ( info["repl"].isABSONObj() ) {
BSONObj x = info["repl"].Obj();
if ( x["hosts"].isABSONObj() )
if ( _addAll( threads , x["hosts"].Obj() ) )
@@ -567,106 +567,108 @@ namespace mongo {
if ( _addAll( threads , x["passives"].Obj() ) )
found = true;
}
-
- if ( ss->mongos ){
- for ( unsigned i=0; i<ss->shards.size(); i++ ){
+
+ if ( ss->mongos ) {
+ for ( unsigned i=0; i<ss->shards.size(); i++ ) {
BSONObj x = ss->shards[i];
string errmsg;
ConnectionString cs = ConnectionString::parse( x["host"].String() , errmsg );
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
cerr << errmsg << endl;
continue;
}
-
+
vector<HostAndPort> v = cs.getServers();
- for ( unsigned i=0; i<v.size(); i++ ){
+ for ( unsigned i=0; i<v.size(); i++ ) {
if ( _add( threads , v[i].toString() ) )
found = true;
}
}
}
-
+
return found;
}
-
- int runMany(){
+
+ int runMany() {
StateMap threads;
-
+
{
string orig = getParam( "host" );
if ( orig == "" )
orig = "localhost:27017";
StringSplitter ss( orig.c_str() , "," );
- while ( ss.more() ){
+ while ( ss.more() ) {
string host = ss.next();
_add( threads , host );
}
}
-
+
sleepsecs(1);
-
+
int row = 0;
bool discover = hasParam( "discover" );
- while ( 1 ){
+ while ( 1 ) {
sleepsecs( _sleep );
-
+
// collect data
vector<Row> rows;
- for ( map<string,shared_ptr<ServerState> >::iterator i=threads.begin(); i!=threads.end(); ++i ){
+ for ( map<string,shared_ptr<ServerState> >::iterator i=threads.begin(); i!=threads.end(); ++i ) {
scoped_lock lk( i->second->lock );
-
- if ( i->second->error.size() ){
+
+ if ( i->second->error.size() ) {
rows.push_back( Row( i->first , i->second->error ) );
}
- else if ( i->second->prev.isEmpty() || i->second->now.isEmpty() ){
+ else if ( i->second->prev.isEmpty() || i->second->now.isEmpty() ) {
rows.push_back( Row( i->first ) );
}
else {
BSONObj out = doRow( i->second->prev , i->second->now );
rows.push_back( Row( i->first , out ) );
}
-
- if ( discover && ! i->second->now.isEmpty() ){
+
+ if ( discover && ! i->second->now.isEmpty() ) {
if ( _discover( threads , i->first , i->second ) )
break;
}
}
-
+
// compute some stats
unsigned longestHost = 0;
BSONObj biggest;
- for ( unsigned i=0; i<rows.size(); i++ ){
+ for ( unsigned i=0; i<rows.size(); i++ ) {
if ( rows[i].host.size() > longestHost )
longestHost = rows[i].host.size();
if ( rows[i].data.nFields() > biggest.nFields() )
biggest = rows[i].data;
}
-
- { // check for any headers not in biggest
- // TODO: we put any new headers at end,
+ {
+ // check for any headers not in biggest
+
+ // TODO: we put any new headers at end,
// ideally we would interleave
set<string> seen;
-
+
BSONObjBuilder b;
-
- { // iterate biggest
+
+ {
+ // iterate biggest
BSONObjIterator i( biggest );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
seen.insert( e.fieldName() );
b.append( e );
}
}
-
+
// now do the rest
- for ( unsigned j=0; j<rows.size(); j++ ){
+ for ( unsigned j=0; j<rows.size(); j++ ) {
BSONObjIterator i( rows[j].data );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( seen.count( e.fieldName() ) )
continue;
@@ -677,30 +679,30 @@ namespace mongo {
}
biggest = b.obj();
-
+
}
-
+
// display data
-
+
cout << endl;
// header
- if ( row++ % 5 == 0 && ! biggest.isEmpty() ){
+ if ( row++ % 5 == 0 && ! biggest.isEmpty() ) {
cout << setw( longestHost ) << "" << "\t";
printHeaders( biggest );
}
-
+
// rows
- for ( unsigned i=0; i<rows.size(); i++ ){
+ for ( unsigned i=0; i<rows.size(); i++ ) {
cout << setw( longestHost ) << rows[i].host << "\t";
if ( rows[i].err.size() )
cout << rows[i].err << endl;
else if ( rows[i].data.isEmpty() )
cout << "no data" << endl;
- else
+ else
printData( rows[i].data , biggest );
}
-
+
}
return 0;
@@ -712,16 +714,16 @@ namespace mongo {
bool _all;
struct Row {
- Row( string h , string e ){
+ Row( string h , string e ) {
host = h;
err = e;
}
-
- Row( string h ){
+
+ Row( string h ) {
host = h;
}
- Row( string h , BSONObj d ){
+ Row( string h , BSONObj d ) {
host = h;
data = d;
}
diff --git a/tools/tool.cpp b/tools/tool.cpp
index e460f600cef..0491012b880 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -35,41 +35,41 @@ namespace mongo {
CmdLine cmdLine;
- Tool::Tool( string name , DBAccess access , string defaultDB ,
+ Tool::Tool( string name , DBAccess access , string defaultDB ,
string defaultCollection , bool usesstdout ) :
- _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) ,
+ _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) ,
_usesstdout(usesstdout), _noconnection(false), _autoreconnect(false), _conn(0), _slaveConn(0), _paired(false) {
-
+
_options = new po::options_description( "options" );
_options->add_options()
- ("help","produce help message")
- ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
- ;
+ ("help","produce help message")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ;
if ( access & REMOTE_SERVER )
_options->add_options()
- ("host,h",po::value<string>(), "mongo host to connect to (\"left,right\" for pairs)" )
- ("port",po::value<string>(), "server port. Can also use --host hostname:port" )
- ("ipv6", "enable IPv6 support (disabled by default)")
-
- ("username,u",po::value<string>(), "username" )
- ("password,p", new PasswordValue( &_password ), "password" )
- ;
-
+ ("host,h",po::value<string>(), "mongo host to connect to (\"left,right\" for pairs)" )
+ ("port",po::value<string>(), "server port. Can also use --host hostname:port" )
+ ("ipv6", "enable IPv6 support (disabled by default)")
+
+ ("username,u",po::value<string>(), "username" )
+ ("password,p", new PasswordValue( &_password ), "password" )
+ ;
+
if ( access & LOCAL_SERVER )
_options->add_options()
- ("dbpath",po::value<string>(), "directly access mongod database "
- "files in the given path, instead of connecting to a mongod "
- "server - needs to lock the data directory, so cannot be "
- "used if a mongod is currently accessing the same path" )
- ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
- ;
-
+ ("dbpath",po::value<string>(), "directly access mongod database "
+ "files in the given path, instead of connecting to a mongod "
+ "server - needs to lock the data directory, so cannot be "
+ "used if a mongod is currently accessing the same path" )
+ ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ;
+
if ( access & SPECIFY_DBCOL )
_options->add_options()
- ("db,d",po::value<string>(), "database to use" )
- ("collection,c",po::value<string>(), "collection to use (some commands)" )
- ;
+ ("db,d",po::value<string>(), "database to use" )
+ ("collection,c",po::value<string>(), "collection to use (some commands)" )
+ ;
_hidden_options = new po::options_description( name + " hidden options" );
@@ -79,7 +79,7 @@ namespace mongo {
}
}
- Tool::~Tool(){
+ Tool::~Tool() {
delete( _options );
delete( _hidden_options );
if ( _conn )
@@ -92,9 +92,9 @@ namespace mongo {
printExtraHelpAfter(out);
}
- int Tool::main( int argc , char ** argv ){
+ int Tool::main( int argc , char ** argv ) {
static StaticObserver staticObserver;
-
+
cmdLine.prealloc = false;
boost::filesystem::path::default_name_check( boost::filesystem::no_check );
@@ -116,23 +116,24 @@ namespace mongo {
style(command_line_style).run() , _params );
po::notify( _params );
- } catch (po::error &e) {
+ }
+ catch (po::error &e) {
cerr << "ERROR: " << e.what() << endl << endl;
printHelp(cerr);
return EXIT_BADOPTIONS;
}
// hide password from ps output
- for (int i=0; i < (argc-1); ++i){
- if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")){
+ for (int i=0; i < (argc-1); ++i) {
+ if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")) {
char* arg = argv[i+1];
- while (*arg){
+ while (*arg) {
*arg++ = 'x';
}
}
}
- if ( _params.count( "help" ) ){
+ if ( _params.count( "help" ) ) {
printHelp(cout);
return 0;
}
@@ -146,11 +147,11 @@ namespace mongo {
logLevel = s.length();
}
}
-
+
preSetup();
bool useDirectClient = hasParam( "dbpath" );
-
+
if ( ! useDirectClient ) {
_host = "127.0.0.1";
if ( _params.count( "host" ) )
@@ -158,21 +159,21 @@ namespace mongo {
if ( _params.count( "port" ) )
_host += ':' + _params["port"].as<string>();
-
- if ( _noconnection ){
+
+ if ( _noconnection ) {
// do nothing
}
else {
string errmsg;
ConnectionString cs = ConnectionString::parse( _host , errmsg );
- if ( ! cs.isValid() ){
+ if ( ! cs.isValid() ) {
cerr << "invalid hostname [" << _host << "] " << errmsg << endl;
return -1;
}
-
+
_conn = cs.connect( errmsg );
- if ( ! _conn ){
+ if ( ! _conn ) {
cerr << "couldn't connect to [" << _host << "] " << errmsg << endl;
return -1;
}
@@ -194,10 +195,10 @@ namespace mongo {
try {
acquirePathLock();
}
- catch ( DBException& ){
+ catch ( DBException& ) {
cerr << endl << "If you are running a mongod on the same "
- "path you should connect to that instead of direct data "
- "file access" << endl << endl;
+ "path you should connect to that instead of direct data "
+ "file access" << endl << endl;
dbexit( EXIT_CLEAN );
return -1;
}
@@ -215,7 +216,7 @@ namespace mongo {
_username = _params["username"].as<string>();
if ( _params.count( "password" )
- && ( _password.empty() ) ) {
+ && ( _password.empty() ) ) {
_password = askPassword();
}
@@ -226,11 +227,11 @@ namespace mongo {
try {
ret = run();
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
cerr << "assertion: " << e.toString() << endl;
ret = -1;
}
-
+
if ( currentClient.get() )
currentClient->shutdown();
@@ -239,8 +240,8 @@ namespace mongo {
return ret;
}
- DBClientBase& Tool::conn( bool slaveIfPaired ){
- if ( slaveIfPaired && _conn->type() == ConnectionString::SET ){
+ DBClientBase& Tool::conn( bool slaveIfPaired ) {
+ if ( slaveIfPaired && _conn->type() == ConnectionString::SET ) {
if (!_slaveConn)
_slaveConn = &((DBClientReplicaSet*)_conn)->slaveConn();
return *_slaveConn;
@@ -252,47 +253,47 @@ namespace mongo {
if ( hasParam("dbpath") ) {
return true;
}
-
+
BSONObj info;
bool isMaster;
bool ok = conn().isMaster(isMaster, &info);
-
+
if (ok && !isMaster) {
cerr << "ERROR: trying to write to non-master " << conn().toString() << endl;
cerr << "isMaster info: " << info << endl;
return false;
}
-
+
return true;
}
- void Tool::addFieldOptions(){
+ void Tool::addFieldOptions() {
add_options()
- ("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
- ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
- ;
+ ("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
+ ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
+ ;
}
- void Tool::needFields(){
+ void Tool::needFields() {
- if ( hasParam( "fields" ) ){
+ if ( hasParam( "fields" ) ) {
BSONObjBuilder b;
-
+
string fields_arg = getParam("fields");
pcrecpp::StringPiece input(fields_arg);
-
+
string f;
pcrecpp::RE re("([#\\w\\.\\s\\-]+),?" );
- while ( re.Consume( &input, &f ) ){
+ while ( re.Consume( &input, &f ) ) {
_fields.push_back( f );
b.append( f , 1 );
}
-
+
_fieldsObj = b.obj();
return;
}
- if ( hasParam( "fieldFile" ) ){
+ if ( hasParam( "fieldFile" ) ) {
string fn = getParam( "fieldFile" );
if ( ! exists( fn ) )
throw UserException( 9999 , ((string)"file: " + fn ) + " doesn't exist" );
@@ -302,7 +303,7 @@ namespace mongo {
ifstream file( fn.c_str() );
BSONObjBuilder b;
- while ( file.rdstate() == ios_base::goodbit ){
+ while ( file.rdstate() == ios_base::goodbit ) {
file.getline( line , BUF_SIZE );
const char * cur = line;
while ( isspace( cur[0] ) ) cur++;
@@ -319,7 +320,7 @@ namespace mongo {
throw UserException( 9998 , "you need to specify fields" );
}
- void Tool::auth( string dbname ){
+ void Tool::auth( string dbname ) {
if ( ! dbname.size() )
dbname = _db;
@@ -338,28 +339,28 @@ namespace mongo {
throw UserException( 9997 , (string)"auth failed: " + errmsg );
}
- BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
- : Tool( name , access , "" , "" ) , _objcheck( objcheck ){
-
+ BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
+ : Tool( name , access , "" , "" ) , _objcheck( objcheck ) {
+
add_options()
- ("objcheck" , "validate object before inserting" )
- ("filter" , po::value<string>() , "filter to apply before inserting" )
- ;
+ ("objcheck" , "validate object before inserting" )
+ ("filter" , po::value<string>() , "filter to apply before inserting" )
+ ;
}
- int BSONTool::run(){
+ int BSONTool::run() {
_objcheck = hasParam( "objcheck" );
-
+
if ( hasParam( "filter" ) )
_matcher.reset( new Matcher( fromjson( getParam( "filter" ) ) ) );
-
+
return doRun();
}
- long long BSONTool::processFile( const path& root ){
+ long long BSONTool::processFile( const path& root ) {
_fileName = root.string();
-
+
unsigned long long fileLength = file_size( root );
if ( fileLength == 0 ) {
@@ -369,7 +370,7 @@ namespace mongo {
FILE* file = fopen( _fileName.c_str() , "rb" );
- if ( ! file ){
+ if ( ! file ) {
log() << "error opening file: " << _fileName << endl;
return 0;
}
@@ -393,7 +394,7 @@ namespace mongo {
while ( read < fileLength ) {
int readlen = fread(buf, 4, 1, file);
int size = ((int*)buf)[0];
- if ( size >= BUF_SIZE ){
+ if ( size >= BUF_SIZE ) {
cerr << "got an object of size: " << size << " terminating..." << endl;
}
uassert( 10264 , "invalid object size" , size < BUF_SIZE );
@@ -401,24 +402,24 @@ namespace mongo {
readlen = fread(buf+4, size-4, 1, file);
BSONObj o( buf );
- if ( _objcheck && ! o.valid() ){
+ if ( _objcheck && ! o.valid() ) {
cerr << "INVALID OBJECT - going try and pring out " << endl;
cerr << "size: " << size << endl;
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
try {
e.validate();
}
- catch ( ... ){
+ catch ( ... ) {
cerr << "\t\t NEXT ONE IS INVALID" << endl;
}
cerr << "\t name : " << e.fieldName() << " " << e.type() << endl;
cerr << "\t " << e << endl;
}
}
-
- if ( _matcher.get() == 0 || _matcher->matches( o ) ){
+
+ if ( _matcher.get() == 0 || _matcher->matches( o ) ) {
gotObject( o );
processed++;
}
@@ -435,8 +436,8 @@ namespace mongo {
out() << "\t " << processed << " objects processed" << endl;
return processed;
}
-
- void setupSignals( bool inFork ){}
+
+ void setupSignals( bool inFork ) {}
}
diff --git a/tools/tool.h b/tools/tool.h
index 746f09f37d6..f6124b87b6c 100644
--- a/tools/tool.h
+++ b/tools/tool.h
@@ -36,45 +36,45 @@ namespace mongo {
class Tool {
public:
enum DBAccess {
- NONE = 0 ,
- REMOTE_SERVER = 1 << 1 ,
- LOCAL_SERVER = 1 << 2 ,
+ NONE = 0 ,
+ REMOTE_SERVER = 1 << 1 ,
+ LOCAL_SERVER = 1 << 2 ,
SPECIFY_DBCOL = 1 << 3 ,
ALL = REMOTE_SERVER | LOCAL_SERVER | SPECIFY_DBCOL
};
- Tool( string name , DBAccess access=ALL, string defaultDB="test" ,
+ Tool( string name , DBAccess access=ALL, string defaultDB="test" ,
string defaultCollection="", bool usesstdout=true);
virtual ~Tool();
int main( int argc , char ** argv );
- boost::program_options::options_description_easy_init add_options(){
+ boost::program_options::options_description_easy_init add_options() {
return _options->add_options();
}
- boost::program_options::options_description_easy_init add_hidden_options(){
+ boost::program_options::options_description_easy_init add_hidden_options() {
return _hidden_options->add_options();
}
- void addPositionArg( const char * name , int pos ){
+ void addPositionArg( const char * name , int pos ) {
_positonalOptions.add( name , pos );
}
- string getParam( string name , string def="" ){
+ string getParam( string name , string def="" ) {
if ( _params.count( name ) )
return _params[name.c_str()].as<string>();
return def;
}
- int getParam( string name , int def ){
+ int getParam( string name , int def ) {
if ( _params.count( name ) )
return _params[name.c_str()].as<int>();
return def;
}
- bool hasParam( string name ){
+ bool hasParam( string name ) {
return _params.count( name );
}
- string getNS(){
- if ( _coll.size() == 0 ){
+ string getNS() {
+ if ( _coll.size() == 0 ) {
cerr << "no collection specified!" << endl;
throw -1;
}
@@ -82,21 +82,21 @@ namespace mongo {
}
bool isMaster();
-
- virtual void preSetup(){}
+
+ virtual void preSetup() {}
virtual int run() = 0;
virtual void printHelp(ostream &out);
- virtual void printExtraHelp( ostream & out ){}
- virtual void printExtraHelpAfter( ostream & out ){}
+ virtual void printExtraHelp( ostream & out ) {}
+ virtual void printExtraHelpAfter( ostream & out ) {}
protected:
mongo::DBClientBase &conn( bool slaveIfPaired = false );
void auth( string db = "" );
-
+
string _name;
string _db;
@@ -105,18 +105,18 @@ namespace mongo {
string _username;
string _password;
-
+
bool _usesstdout;
bool _noconnection;
bool _autoreconnect;
void addFieldOptions();
void needFields();
-
+
vector<string> _fields;
BSONObj _fieldsObj;
-
+
string _host;
protected:
@@ -136,17 +136,17 @@ namespace mongo {
class BSONTool : public Tool {
bool _objcheck;
auto_ptr<Matcher> _matcher;
-
+
public:
BSONTool( const char * name , DBAccess access=ALL, bool objcheck = false );
-
+
virtual int doRun() = 0;
virtual void gotObject( const BSONObj& obj ) = 0;
-
+
virtual int run();
long long processFile( const path& file );
-
+
};
}
diff --git a/util/admin_access.h b/util/admin_access.h
index 2306bdd43c5..bb882b2b4c5 100644
--- a/util/admin_access.h
+++ b/util/admin_access.h
@@ -36,14 +36,14 @@ namespace mongo {
virtual bool haveAdminUsers() const = 0;
/** @return priviledged user with this name. This should not block
- * for long and throw if can't get a lock if needed
+ * for long and throw if can't get a lock if needed
*/
virtual BSONObj getAdminUser( const string& username ) const = 0;
};
class NoAdminAccess : public AdminAccess {
public:
- virtual ~NoAdminAccess() { }
+ virtual ~NoAdminAccess() { }
virtual bool haveAdminUsers() const { return false; }
virtual BSONObj getAdminUser( const string& username ) const { return BSONObj(); }
diff --git a/util/alignedbuilder.cpp b/util/alignedbuilder.cpp
index 5144c86fe85..8fa487d547f 100644
--- a/util/alignedbuilder.cpp
+++ b/util/alignedbuilder.cpp
@@ -53,7 +53,7 @@ namespace mongo {
_realloc(a, oldLen);
}
- void AlignedBuilder::_malloc(unsigned sz) {
+ void AlignedBuilder::_malloc(unsigned sz) {
_p._size = sz;
#if defined(_WIN32)
void *p = VirtualAlloc(0, sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
@@ -73,7 +73,7 @@ namespace mongo {
#endif
}
- void AlignedBuilder::_realloc(unsigned newSize, unsigned oldLen) {
+ void AlignedBuilder::_realloc(unsigned newSize, unsigned oldLen) {
// posix_memalign alignment is not maintained on reallocs, so we can't use realloc().
AllocationInfo old = _p;
_malloc(newSize);
diff --git a/util/alignedbuilder.h b/util/alignedbuilder.h
index 6e4ce3fbe92..452cec2fb37 100644
--- a/util/alignedbuilder.h
+++ b/util/alignedbuilder.h
@@ -20,7 +20,7 @@
#include "../bson/stringdata.h"
-namespace mongo {
+namespace mongo {
/** a page-aligned BufBuilder. */
class AlignedBuilder {
@@ -39,10 +39,10 @@ namespace mongo {
/** note this may be deallocated (realloced) if you keep writing or reset(). */
const char* buf() const { return _p._data; }
- /** leave room for some stuff later
+ /** leave room for some stuff later
@return offset in the buffer that was our current position
*/
- size_t skip(unsigned n) {
+ size_t skip(unsigned n) {
unsigned l = len();
grow(n);
return l;
@@ -50,10 +50,10 @@ namespace mongo {
char* atOfs(unsigned ofs) { return _p._data + ofs; }
- void appendChar(char j){
+ void appendChar(char j) {
*((char*)grow(sizeof(char))) = j;
}
- void appendNum(char j){
+ void appendNum(char j) {
*((char*)grow(sizeof(char))) = j;
}
void appendNum(short j) {
@@ -110,7 +110,7 @@ namespace mongo {
void mallocSelfAligned(unsigned sz);
void _malloc(unsigned sz);
void _realloc(unsigned newSize, unsigned oldLenInUse);
- void _free(void*);
+ void _free(void*);
struct AllocationInfo {
char *_data;
diff --git a/util/allocator.h b/util/allocator.h
index 2c07973d90d..a642e7cab56 100644
--- a/util/allocator.h
+++ b/util/allocator.h
@@ -18,22 +18,22 @@
#pragma once
namespace mongo {
-
+
inline void * ourmalloc(size_t size) {
void *x = malloc(size);
if ( x == 0 ) dbexit( EXIT_OOM_MALLOC , "malloc fails");
return x;
}
-
+
inline void * ourrealloc(void *ptr, size_t size) {
void *x = realloc(ptr, size);
if ( x == 0 ) dbexit( EXIT_OOM_REALLOC , "realloc fails");
return x;
}
-
+
#define MONGO_malloc mongo::ourmalloc
#define malloc MONGO_malloc
#define MONGO_realloc mongo::ourrealloc
#define realloc MONGO_realloc
-
+
} // namespace mongo
diff --git a/util/array.h b/util/array.h
index 8da06fe99e6..bf705a4d988 100644
--- a/util/array.h
+++ b/util/array.h
@@ -22,50 +22,50 @@ namespace mongo {
class FastArray {
public:
FastArray( int capacity=10000 )
- : _capacity( capacity ) , _size(0) , _end(this,capacity){
+ : _capacity( capacity ) , _size(0) , _end(this,capacity) {
_data = new T[capacity];
}
- ~FastArray(){
+ ~FastArray() {
delete[] _data;
}
-
- void clear(){
+
+ void clear() {
_size = 0;
}
-
- T& operator[]( int x ){
+
+ T& operator[]( int x ) {
assert( x >= 0 && x < _capacity );
return _data[x];
}
-
- T& getNext(){
+
+ T& getNext() {
return _data[_size++];
}
-
- void push_back( const T& t ){
+
+ void push_back( const T& t ) {
_data[_size++] = t;
}
-
- void sort( int (*comp)(const void *, const void *) ){
+
+ void sort( int (*comp)(const void *, const void *) ) {
qsort( _data , _size , sizeof(T) , comp );
}
-
- int size(){
+
+ int size() {
return _size;
}
-
- bool hasSpace(){
+
+ bool hasSpace() {
return _size < _capacity;
}
class iterator {
public:
- iterator(){
+ iterator() {
_it = 0;
_pos = 0;
}
-
- iterator( FastArray * it , int pos=0 ){
+
+ iterator( FastArray * it , int pos=0 ) {
_it = it;
_pos = pos;
}
@@ -78,14 +78,14 @@ namespace mongo {
return _pos != other._pos;
}
- void operator++(){
+ void operator++() {
_pos++;
}
- T& operator*(){
+ T& operator*() {
return _it->_data[_pos];
}
-
+
string toString() const {
stringstream ss;
ss << _pos;
@@ -97,13 +97,13 @@ namespace mongo {
friend class FastArray;
};
-
- iterator begin(){
+
+ iterator begin() {
return iterator(this);
}
- iterator end(){
+ iterator end() {
_end._pos = _size;
return _end;
}
@@ -112,7 +112,7 @@ namespace mongo {
private:
int _capacity;
int _size;
-
+
iterator _end;
T * _data;
diff --git a/util/assert_util.cpp b/util/assert_util.cpp
index 56f68c1f6f1..47be5e95246 100644
--- a/util/assert_util.cpp
+++ b/util/assert_util.cpp
@@ -33,12 +33,12 @@ using namespace std;
namespace mongo {
AssertionCount assertionCount;
-
+
AssertionCount::AssertionCount()
- : regular(0),warning(0),msg(0),user(0),rollovers(0){
+ : regular(0),warning(0),msg(0),user(0),rollovers(0) {
}
- void AssertionCount::rollover(){
+ void AssertionCount::rollover() {
rollovers++;
regular = 0;
warning = 0;
@@ -46,7 +46,7 @@ namespace mongo {
user = 0;
}
- void AssertionCount::condrollover( int newvalue ){
+ void AssertionCount::condrollover( int newvalue ) {
static int max = (int)pow( 2.0 , 30 );
if ( newvalue >= max )
rollover();
@@ -57,15 +57,15 @@ namespace mongo {
b.append( m , "unknown assertion" );
else
b.append( m , msg );
-
+
if ( code )
b.append( c , code );
}
-
- string getDbContext();
-
- /* "warning" assert -- safe to continue, so we don't throw exception. */
+
+ string getDbContext();
+
+ /* "warning" assert -- safe to continue, so we don't throw exception. */
void wasserted(const char *msg, const char *file, unsigned line) {
problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
sayDbContext();
@@ -111,15 +111,15 @@ namespace mongo {
throw MsgAssertionException(msgid, msg);
}
- void streamNotGood( int code , string msg , std::ios& myios ){
+ void streamNotGood( int code , string msg , std::ios& myios ) {
stringstream ss;
// errno might not work on all systems for streams
// if it doesn't for a system should deal with here
ss << msg << " stream invalid: " << errnoWithDescription();
throw UserException( code , ss.str() );
}
-
- string errnoWithPrefix( const char * prefix ){
+
+ string errnoWithPrefix( const char * prefix ) {
stringstream ss;
if ( prefix )
ss << prefix << ": ";
@@ -127,16 +127,16 @@ namespace mongo {
return ss.str();
}
- string demangleName( const type_info& typeinfo ){
+ string demangleName( const type_info& typeinfo ) {
#ifdef _WIN32
return typeinfo.name();
#else
int status;
-
+
char * niceName = abi::__cxa_demangle(typeinfo.name(), 0, 0, &status);
if ( ! niceName )
return typeinfo.name();
-
+
string s = niceName;
free(niceName);
return s;
diff --git a/util/assert_util.h b/util/assert_util.h
index 6b8e533e687..151e9507270 100644
--- a/util/assert_util.h
+++ b/util/assert_util.h
@@ -39,80 +39,80 @@ namespace mongo {
int user;
int rollovers;
};
-
+
extern AssertionCount assertionCount;
-
+
struct ExceptionInfo {
- ExceptionInfo() : msg(""),code(-1){}
+ ExceptionInfo() : msg(""),code(-1) {}
ExceptionInfo( const char * m , int c )
- : msg( m ) , code( c ){
+ : msg( m ) , code( c ) {
}
ExceptionInfo( const string& m , int c )
- : msg( m ) , code( c ){
+ : msg( m ) , code( c ) {
}
- void append( BSONObjBuilder& b , const char * m = "$err" , const char * c = "code" ) const ;
+ void append( BSONObjBuilder& b , const char * m = "$err" , const char * c = "code" ) const ;
string toString() const { stringstream ss; ss << "exception: " << code << " " << msg; return ss.str(); }
bool empty() const { return msg.empty(); }
-
+
string msg;
int code;
};
class DBException : public std::exception {
public:
- DBException( const ExceptionInfo& ei ) : _ei(ei){}
- DBException( const char * msg , int code ) : _ei(msg,code){}
- DBException( const string& msg , int code ) : _ei(msg,code){}
+ DBException( const ExceptionInfo& ei ) : _ei(ei) {}
+ DBException( const char * msg , int code ) : _ei(msg,code) {}
+ DBException( const string& msg , int code ) : _ei(msg,code) {}
virtual ~DBException() throw() { }
-
- virtual const char* what() const throw(){ return _ei.msg.c_str(); }
+
+ virtual const char* what() const throw() { return _ei.msg.c_str(); }
virtual int getCode() const { return _ei.code; }
-
+
virtual void appendPrefix( stringstream& ss ) const { }
-
+
virtual string toString() const {
stringstream ss; ss << getCode() << " " << what(); return ss.str();
return ss.str();
}
-
+
const ExceptionInfo& getInfo() const { return _ei; }
protected:
ExceptionInfo _ei;
};
-
+
class AssertionException : public DBException {
public:
- AssertionException( const ExceptionInfo& ei ) : DBException(ei){}
- AssertionException( const char * msg , int code ) : DBException(msg,code){}
- AssertionException( const string& msg , int code ) : DBException(msg,code){}
+ AssertionException( const ExceptionInfo& ei ) : DBException(ei) {}
+ AssertionException( const char * msg , int code ) : DBException(msg,code) {}
+ AssertionException( const string& msg , int code ) : DBException(msg,code) {}
virtual ~AssertionException() throw() { }
-
+
virtual bool severe() { return true; }
virtual bool isUserAssertion() { return false; }
/* true if an interrupted exception - see KillCurrentOp */
- bool interrupted() {
+ bool interrupted() {
return _ei.code == 11600 || _ei.code == 11601;
}
};
-
+
/* UserExceptions are valid errors that a user can cause, like out of disk space or duplicate key */
class UserException : public AssertionException {
public:
- UserException(int c , const string& m) : AssertionException( m , c ){}
+ UserException(int c , const string& m) : AssertionException( m , c ) {}
virtual bool severe() { return false; }
virtual bool isUserAssertion() { return true; }
virtual void appendPrefix( stringstream& ss ) const { ss << "userassert:"; }
};
-
+
class MsgAssertionException : public AssertionException {
public:
- MsgAssertionException( const ExceptionInfo& ei ) : AssertionException( ei ){}
- MsgAssertionException(int c, const string& m) : AssertionException( m , c ){}
+ MsgAssertionException( const ExceptionInfo& ei ) : AssertionException( ei ) {}
+ MsgAssertionException(int c, const string& m) : AssertionException( m , c ) {}
virtual bool severe() { return false; }
virtual void appendPrefix( stringstream& ss ) const { ss << "massert:"; }
};
@@ -121,14 +121,14 @@ namespace mongo {
void asserted(const char *msg, const char *file, unsigned line);
void wasserted(const char *msg, const char *file, unsigned line);
- /** a "user assertion". throws UserAssertion. logs. typically used for errors that a user
+ /** a "user assertion". throws UserAssertion. logs. typically used for errors that a user
could cause, such as dupliate key, disk full, etc.
*/
void uasserted(int msgid, const char *msg);
inline void uasserted(int msgid , string msg) { uasserted(msgid, msg.c_str()); }
/** reported via lasterror, but don't throw exception */
- void uassert_nothrow(const char *msg);
+ void uassert_nothrow(const char *msg);
/** msgassert and massert are for errors that are internal but have a well defined error text string.
a stack trace is logged.
@@ -167,21 +167,21 @@ namespace mongo {
#if defined(_DEBUG)
# define MONGO_dassert assert
#else
-# define MONGO_dassert(x)
+# define MONGO_dassert(x)
#endif
#define dassert MONGO_dassert
// some special ids that we want to duplicate
-
+
// > 10000 asserts
// < 10000 UserException
-
+
enum { ASSERT_ID_DUPKEY = 11000 };
/* throws a uassertion with an appropriate msg */
void streamNotGood( int code , string msg , std::ios& myios );
- inline void assertStreamGood(unsigned msgid, string msg, std::ios& myios) {
+ inline void assertStreamGood(unsigned msgid, string msg, std::ios& myios) {
if( !myios.good() ) streamNotGood(msgid, msg, myios);
}
@@ -191,15 +191,15 @@ namespace mongo {
#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
#define MONGO_BOOST_CHECK_EXCEPTION( expression ) \
- try { \
- expression; \
- } catch ( const std::exception &e ) { \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
stringstream ss; \
- ss << "caught boost exception: " << e.what(); \
+ ss << "caught boost exception: " << e.what(); \
msgasserted( 13294 , ss.str() ); \
- } catch ( ... ) { \
- massert( 10437 , "unknown boost failed" , false ); \
- }
+ } catch ( ... ) { \
+ massert( 10437 , "unknown boost failed" , false ); \
+ }
#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
#define MONGO_DESTRUCTOR_GUARD( expression ) \
diff --git a/util/background.cpp b/util/background.cpp
index 36aeb428eb8..9ef22cbbbe0 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -25,9 +25,9 @@ namespace mongo {
// both the BackgroundJob and the internal thread point to JobStatus
struct BackgroundJob::JobStatus {
- JobStatus( bool delFlag )
+ JobStatus( bool delFlag )
: deleteSelf(delFlag), m("backgroundJob"), state(NotStarted) { }
-
+
const bool deleteSelf;
mongo::mutex m; // protects state below
@@ -40,7 +40,7 @@ namespace mongo {
}
// Background object can be only be destroyed after jobBody() ran
- void BackgroundJob::jobBody( boost::shared_ptr<JobStatus> status ){
+ void BackgroundJob::jobBody( boost::shared_ptr<JobStatus> status ) {
{
scoped_lock l( status->m );
assert( status->state == NotStarted );
@@ -54,20 +54,20 @@ namespace mongo {
try {
run();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
log( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
}
catch(...) {
log( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
}
- {
+ {
scoped_lock l( status->m );
status->state = Done;
status->finished.notify_all();
}
- if( status->deleteSelf )
+ if( status->deleteSelf )
delete this;
}
@@ -87,7 +87,8 @@ namespace mongo {
unsigned long long ns = msTimeOut * 1000000ULL; // milli to nano
if ( xt.nsec + ns < 1000000000 ) {
xt.nsec = (xtime::xtime_nsec_t) (xt.nsec + ns);
- } else {
+ }
+ else {
xt.sec += 1 + ns / 1000000000;
xt.nsec = ( ns + xt.nsec ) % 1000000000;
}
@@ -95,21 +96,22 @@ namespace mongo {
if ( ! _status->finished.timed_wait( l.boost() , xt ) )
return false;
- } else {
+ }
+ else {
_status->finished.wait( l.boost() );
}
}
return true;
}
- BackgroundJob::State BackgroundJob::getState() const {
- scoped_lock l( _status->m);
- return _status->state;
+ BackgroundJob::State BackgroundJob::getState() const {
+ scoped_lock l( _status->m);
+ return _status->state;
}
- bool BackgroundJob::running() const {
+ bool BackgroundJob::running() const {
scoped_lock l( _status->m);
- return _status->state == Running;
+ return _status->state == Running;
}
} // namespace mongo
diff --git a/util/background.h b/util/background.h
index 5fdb286a9a0..4bbef8aac47 100644
--- a/util/background.h
+++ b/util/background.h
@@ -23,14 +23,14 @@ namespace mongo {
* Background thread dispatching.
* subclass and define run()
*
- * It is ok to call go(), that is, run the job, more than once -- if the
- * previous invocation has finished. Thus one pattern of use is to embed
- * a backgroundjob in your object and reuse it (or same thing with
+ * It is ok to call go(), that is, run the job, more than once -- if the
+ * previous invocation has finished. Thus one pattern of use is to embed
+ * a backgroundjob in your object and reuse it (or same thing with
* inheritance). Each go() call spawns a new thread.
*
* Thread safety:
* note when job destructs, the thread is not terminated if still running.
- * generally if the thread could still be running, allocate the job dynamically
+ * generally if the thread could still be running, allocate the job dynamically
* and set deleteSelf to true.
*
* go() and wait() are not thread safe
@@ -54,11 +54,11 @@ namespace mongo {
* define this to do your work.
* after this returns, state is set to done.
* after this returns, deleted if deleteSelf true.
- *
- * NOTE:
- * if run() throws, the exception will be caught within 'this' object and will ultimately lead to the
+ *
+ * NOTE:
+ * if run() throws, the exception will be caught within 'this' object and will ultimately lead to the
* BackgroundJob's thread being finished, as if run() returned.
- *
+ *
*/
virtual void run() = 0;
@@ -71,16 +71,16 @@ namespace mongo {
virtual ~BackgroundJob() { }
- /**
- * starts job.
- * returns immediatelly after dispatching.
+ /**
+ * starts job.
+ * returns immediatelly after dispatching.
*
* @note the BackgroundJob object must live for as long the thread is still running, ie
* until getState() returns Done.
*/
BackgroundJob& go();
- /**
+ /**
* wait for completion.
*
* @param msTimeOut maximum amount of time to wait in millisecons
diff --git a/util/base64.cpp b/util/base64.cpp
index 35a3abae717..aff06e26126 100644
--- a/util/base64.cpp
+++ b/util/base64.cpp
@@ -21,20 +21,20 @@
namespace mongo {
namespace base64 {
-
+
Alphabet alphabet;
- void encode( stringstream& ss , const char * data , int size ){
- for ( int i=0; i<size; i+=3 ){
+ void encode( stringstream& ss , const char * data , int size ) {
+ for ( int i=0; i<size; i+=3 ) {
int left = size - i;
const unsigned char * start = (const unsigned char*)data + i;
-
+
// byte 0
ss << alphabet.e(start[0]>>2);
-
+
// byte 1
unsigned char temp = ( start[0] << 4 );
- if ( left == 1 ){
+ if ( left == 1 ) {
ss << alphabet.e(temp);
break;
}
@@ -43,7 +43,7 @@ namespace mongo {
// byte 2
temp = ( start[1] & 0xF ) << 2;
- if ( left == 2 ){
+ if ( left == 2 ) {
ss << alphabet.e(temp);
break;
}
@@ -55,50 +55,50 @@ namespace mongo {
}
int mod = size % 3;
- if ( mod == 1 ){
+ if ( mod == 1 ) {
ss << "==";
}
- else if ( mod == 2 ){
+ else if ( mod == 2 ) {
ss << "=";
}
}
- string encode( const char * data , int size ){
+ string encode( const char * data , int size ) {
stringstream ss;
encode( ss , data ,size );
return ss.str();
}
-
- string encode( const string& s ){
+
+ string encode( const string& s ) {
return encode( s.c_str() , s.size() );
}
- void decode( stringstream& ss , const string& s ){
+ void decode( stringstream& ss , const string& s ) {
uassert( 10270 , "invalid base64" , s.size() % 4 == 0 );
const unsigned char * data = (const unsigned char*)s.c_str();
int size = s.size();
-
+
unsigned char buf[3];
- for ( int i=0; i<size; i+=4){
+ for ( int i=0; i<size; i+=4) {
const unsigned char * start = data + i;
buf[0] = ( ( alphabet.decode[start[0]] << 2 ) & 0xFC ) | ( ( alphabet.decode[start[1]] >> 4 ) & 0x3 );
buf[1] = ( ( alphabet.decode[start[1]] << 4 ) & 0xF0 ) | ( ( alphabet.decode[start[2]] >> 2 ) & 0xF );
buf[2] = ( ( alphabet.decode[start[2]] << 6 ) & 0xC0 ) | ( ( alphabet.decode[start[3]] & 0x3F ) );
-
+
int len = 3;
- if ( start[3] == '=' ){
+ if ( start[3] == '=' ) {
len = 2;
- if ( start[2] == '=' ){
+ if ( start[2] == '=' ) {
len = 1;
}
}
ss.write( (const char*)buf , len );
}
}
-
- string decode( const string& s ){
+
+ string decode( const string& s ) {
stringstream ss;
decode( ss , s );
return ss.str();
diff --git a/util/base64.h b/util/base64.h
index c113eedb447..505b5d78cca 100644
--- a/util/base64.h
+++ b/util/base64.h
@@ -24,45 +24,44 @@ namespace mongo {
public:
Alphabet()
: encode((unsigned char*)
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz"
- "0123456789"
- "+/")
- , decode(new unsigned char[257])
- {
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789"
+ "+/")
+ , decode(new unsigned char[257]) {
memset( decode.get() , 0 , 256 );
- for ( int i=0; i<64; i++ ){
+ for ( int i=0; i<64; i++ ) {
decode[ encode[i] ] = i;
}
test();
}
- void test(){
+ void test() {
assert( strlen( (char*)encode ) == 64 );
for ( int i=0; i<26; i++ )
assert( encode[i] == toupper( encode[i+26] ) );
}
- char e( int x ){
+ char e( int x ) {
return encode[x&0x3f];
}
-
+
private:
const unsigned char * encode;
public:
boost::scoped_array<unsigned char> decode;
};
-
+
extern Alphabet alphabet;
void encode( stringstream& ss , const char * data , int size );
string encode( const char * data , int size );
string encode( const string& s );
-
+
void decode( stringstream& ss , const string& s );
string decode( const string& s );
-
+
void testAlphabet();
}
diff --git a/util/bufreader.h b/util/bufreader.h
index d4e938de621..a0dcefa8d83 100644
--- a/util/bufreader.h
+++ b/util/bufreader.h
@@ -18,15 +18,15 @@
#pragma once
-namespace mongo {
+namespace mongo {
- /** helper to read and parse a block of memory
+ /** helper to read and parse a block of memory
methods throw the eof exception if the operation would pass the end of the
buffer with which we are working.
*/
- class BufReader : boost::noncopyable {
+ class BufReader : boost::noncopyable {
public:
- class eof : public std::exception {
+ class eof : public std::exception {
public:
virtual const char * what() { return "BufReader eof"; }
};
@@ -34,10 +34,10 @@ namespace mongo {
BufReader(const void *p, unsigned len) : _start(p), _pos(p), _end(((char *)_pos)+len) { }
bool atEof() const { return _pos == _end; }
-
+
/** read in the object specified, and advance buffer pointer */
template <typename T>
- void read(T &t) {
+ void read(T &t) {
T* cur = (T*) _pos;
T *next = cur + 1;
if( _end < next ) throw eof();
@@ -47,7 +47,7 @@ namespace mongo {
/** verify we can look at t, but do not advance */
template <typename T>
- void peek(T &t) {
+ void peek(T &t) {
T* cur = (T*) _pos;
T *next = cur + 1;
if( _end < next ) throw eof();
@@ -61,13 +61,13 @@ namespace mongo {
unsigned remaining() const { return (char*)_end -(char*)_pos; }
/** back up by nbytes */
- void rewind(unsigned nbytes) {
+ void rewind(unsigned nbytes) {
_pos = ((char *) _pos) - nbytes;
assert( _pos >= _start );
}
/** return current position pointer, and advance by len */
- const void* skip(unsigned len) {
+ const void* skip(unsigned len) {
const char *nxt = ((char *) _pos) + len;
if( _end < nxt ) throw eof();
const void *p = _pos;
@@ -77,7 +77,7 @@ namespace mongo {
void readStr(string& s) {
StringBuilder b;
- while( 1 ) {
+ while( 1 ) {
char ch;
read(ch);
if( ch == 0 )
diff --git a/util/concurrency/list.h b/util/concurrency/list.h
index 58b38ac63bd..e5eaec63bec 100644
--- a/util/concurrency/list.h
+++ b/util/concurrency/list.h
@@ -18,64 +18,64 @@
#pragma once
-namespace mongo {
+namespace mongo {
-/* this class uses a mutex for writes, but not for reads.
- we can get fancier later...
+ /* this class uses a mutex for writes, but not for reads.
+ we can get fancier later...
- struct Member : public List1<Member>::Base {
- const char *host;
- int port;
- };
- List1<Member> _members;
- _members.head()->next();
+ struct Member : public List1<Member>::Base {
+ const char *host;
+ int port;
+ };
+ List1<Member> _members;
+ _members.head()->next();
-*/
-template<typename T>
-class List1 : boost::noncopyable {
-public:
- /* next() and head() return 0 at end of list */
+ */
+ template<typename T>
+ class List1 : boost::noncopyable {
+ public:
+ /* next() and head() return 0 at end of list */
- List1() : _head(0), _m("List1"), _orphans(0) { }
+ List1() : _head(0), _m("List1"), _orphans(0) { }
- class Base {
- friend class List1;
- T *_next;
- public:
- T* next() const { return _next; }
- };
+ class Base {
+ friend class List1;
+ T *_next;
+ public:
+ T* next() const { return _next; }
+ };
- T* head() const { return _head; }
+ T* head() const { return _head; }
- void push(T* t) {
- scoped_lock lk(_m);
- t->_next = _head;
- _head = t;
- }
+ void push(T* t) {
+ scoped_lock lk(_m);
+ t->_next = _head;
+ _head = t;
+ }
- // intentionally leak.
- void orphanAll() {
- _head = 0;
- }
+ // intentionally leak.
+ void orphanAll() {
+ _head = 0;
+ }
- /* t is not deleted, but is removed from the list. (orphaned) */
- void orphan(T* t) {
- scoped_lock lk(_m);
- T *&prev = _head;
- T *n = prev;
- while( n != t ) {
- prev = n->_next;
- n = prev;
+ /* t is not deleted, but is removed from the list. (orphaned) */
+ void orphan(T* t) {
+ scoped_lock lk(_m);
+ T *&prev = _head;
+ T *n = prev;
+ while( n != t ) {
+ prev = n->_next;
+ n = prev;
+ }
+ prev = t->_next;
+ if( ++_orphans > 500 )
+ log() << "warning orphans=" << _orphans << '\n';
}
- prev = t->_next;
- if( ++_orphans > 500 )
- log() << "warning orphans=" << _orphans << '\n';
- }
-private:
- T *_head;
- mongo::mutex _m;
- int _orphans;
-};
+ private:
+ T *_head;
+ mongo::mutex _m;
+ int _orphans;
+ };
};
diff --git a/util/concurrency/msg.h b/util/concurrency/msg.h
index aa657dc053e..f7c6788dadc 100644
--- a/util/concurrency/msg.h
+++ b/util/concurrency/msg.h
@@ -21,14 +21,14 @@
#include <deque>
#include "task.h"
-namespace mongo {
+namespace mongo {
- namespace task {
+ namespace task {
typedef boost::function<void()> lam;
/** typical usage is: task::fork( new Server("threadname") ); */
- class Server : public Task {
+ class Server : public Task {
public:
/** send a message to the port */
void send(lam);
diff --git a/util/concurrency/mutex.h b/util/concurrency/mutex.h
index 40205ddccd0..a8a84220e2b 100644
--- a/util/concurrency/mutex.h
+++ b/util/concurrency/mutex.h
@@ -22,11 +22,11 @@
#include "../heapcheck.h"
-namespace mongo {
+namespace mongo {
class mutex;
- inline boost::xtime incxtimemillis( long long s ){
+ inline boost::xtime incxtimemillis( long long s ) {
boost::xtime xt;
boost::xtime_get(&xt, boost::TIME_UTC);
xt.sec += (int)( s / 1000 );
@@ -34,7 +34,7 @@ namespace mongo {
if ( xt.nsec >= 1000000000 ) {
xt.nsec -= 1000000000;
xt.sec++;
- }
+ }
return xt;
}
@@ -42,7 +42,7 @@ namespace mongo {
MutexDebugger checks that we always acquire locks for multiple mutexes in a consistant (acyclic) order.
If we were inconsistent we could deadlock.
*/
- class MutexDebugger {
+ class MutexDebugger {
typedef const char * mid; // mid = mutex ID
typedef map<mid,int> Preceeding;
map< mid, int > maxNest;
@@ -55,12 +55,12 @@ namespace mongo {
public:
// set these to create an assert that
// b must never be locked before a
- // so
+ // so
// a.lock(); b.lock(); is fine
// b.lock(); alone is fine too
// only checked on _DEBUG builds.
string a,b;
-
+
/** outputs some diagnostic info on mutexes (on _DEBUG builds) */
void programEnding();
@@ -75,7 +75,7 @@ namespace mongo {
us.reset( _preceeding = new Preceeding() );
Preceeding &preceeding = *_preceeding;
- if( a == m ) {
+ if( a == m ) {
aBreakPoint();
if( preceeding[b.c_str()] ) {
cout << "****** MutexDebugger error! warning " << b << " was locked before " << a << endl;
@@ -84,7 +84,7 @@ namespace mongo {
}
preceeding[m]++;
- if( preceeding[m] > 1 ) {
+ if( preceeding[m] > 1 ) {
// recursive re-locking.
if( preceeding[m] > maxNest[m] )
maxNest[m] = preceeding[m];
@@ -96,19 +96,19 @@ namespace mongo {
{
boost::mutex::scoped_lock lk(x);
followers[m];
- for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
+ for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
if( m != i->first && i->second > 0 ) {
followers[i->first].insert(m);
- if( followers[m].count(i->first) != 0 ){
+ if( followers[m].count(i->first) != 0 ) {
failed = true;
stringstream ss;
mid bad = i->first;
ss << "mutex problem" <<
- "\n when locking " << m <<
- "\n " << bad << " was already locked and should not be."
- "\n set a and b above to debug.\n";
+ "\n when locking " << m <<
+ "\n " << bad << " was already locked and should not be."
+ "\n set a and b above to debug.\n";
stringstream q;
- for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
+ for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
if( i->first != m && i->first != bad && i->second > 0 )
q << " " << i->first << '\n';
}
@@ -126,7 +126,7 @@ namespace mongo {
assert( 0 );
}
}
- void leaving(mid m) {
+ void leaving(mid m) {
if( this == 0 ) return; // still in startup pre-main()
Preceeding& preceeding = *us.get();
preceeding[m]--;
@@ -137,7 +137,7 @@ namespace mongo {
}
};
extern MutexDebugger &mutexDebugger;
-
+
// If you create a local static instance of this class, that instance will be destroyed
// before all global static objects are destroyed, so _destroyingStatics will be set
// to true before the global static variables are destroyed.
@@ -157,13 +157,13 @@ namespace mongo {
#endif
#if defined(_DEBUG)
- mutex(const char *name)
- : _name(name)
+ mutex(const char *name)
+ : _name(name)
#else
- mutex(const char *)
+ mutex(const char *)
#endif
- {
- _m = new boost::timed_mutex();
+ {
+ _m = new boost::timed_mutex();
IGNORE_OBJECT( _m ); // Turn-off heap checking on _m
}
~mutex() {
@@ -172,22 +172,22 @@ namespace mongo {
delete _m;
}
}
-
+
class try_lock : boost::noncopyable {
public:
- try_lock( mongo::mutex &m , int millis = 0 )
- : _l( m.boost() , incxtimemillis( millis ) ) ,
+ try_lock( mongo::mutex &m , int millis = 0 )
+ : _l( m.boost() , incxtimemillis( millis ) ) ,
#if BOOST_VERSION >= 103500
- ok( _l.owns_lock() )
+ ok( _l.owns_lock() )
#else
ok( _l.locked() )
#endif
{
}
- ~try_lock() {
+ ~try_lock() {
}
-
+
private:
boost::timed_mutex::scoped_timed_lock _l;
@@ -207,7 +207,7 @@ namespace mongo {
mutexDebugger.entering(mut->_name);
#endif
}
- ~scoped_lock() {
+ ~scoped_lock() {
#if defined(_DEBUG)
mutexDebugger.leaving(mut->_name);
#endif
@@ -223,7 +223,7 @@ namespace mongo {
boost::timed_mutex &boost() { return *_m; }
boost::timed_mutex *_m;
};
-
+
typedef mutex::scoped_lock scoped_lock;
typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
diff --git a/util/concurrency/mvar.h b/util/concurrency/mvar.h
index 7d17051368e..9c7a505b6d5 100644
--- a/util/concurrency/mvar.h
+++ b/util/concurrency/mvar.h
@@ -31,18 +31,18 @@ namespace mongo {
// create an empty MVar
MVar()
- : _state(EMPTY)
+ : _state(EMPTY)
{}
// creates a full MVar
MVar(const T& val)
- : _state(FULL)
- , _value(val)
+ : _state(FULL)
+ , _value(val)
{}
// puts val into the MVar and returns true or returns false if full
// never blocks
- bool tryPut(const T& val){
+ bool tryPut(const T& val) {
// intentionally repeat test before and after lock
if (_state == FULL) return false;
Mutex::scoped_lock lock(_mutex);
@@ -59,17 +59,17 @@ namespace mongo {
// puts val into the MVar
// will block if the MVar is already full
- void put(const T& val){
+ void put(const T& val) {
Mutex::scoped_lock lock(_mutex);
- while (!tryPut(val)){
- // unlocks lock while waiting and relocks before returning
+ while (!tryPut(val)) {
+ // unlocks lock while waiting and relocks before returning
_condition.wait(lock);
- }
+ }
}
// takes val out of the MVar and returns true or returns false if empty
// never blocks
- bool tryTake(T& out){
+ bool tryTake(T& out) {
// intentionally repeat test before and after lock
if (_state == EMPTY) return false;
Mutex::scoped_lock lock(_mutex);
@@ -86,14 +86,14 @@ namespace mongo {
// takes val out of the MVar
// will block if the MVar is empty
- T take(){
+ T take() {
T ret = T();
Mutex::scoped_lock lock(_mutex);
- while (!tryTake(ret)){
- // unlocks lock while waiting and relocks before returning
+ while (!tryTake(ret)) {
+ // unlocks lock while waiting and relocks before returning
_condition.wait(lock);
- }
+ }
return ret;
}
@@ -102,7 +102,7 @@ namespace mongo {
// Note: this is fast because there is no locking, but state could
// change before you get a chance to act on it.
// Mainly useful for sanity checks / asserts.
- State getState(){ return _state; }
+ State getState() { return _state; }
private:
diff --git a/util/concurrency/rwlock.h b/util/concurrency/rwlock.h
index 2364b3a39b9..c9429c5b76d 100644
--- a/util/concurrency/rwlock.h
+++ b/util/concurrency/rwlock.h
@@ -22,14 +22,14 @@
#include "../time_support.h"
#if BOOST_VERSION >= 103500
- #define BOOST_RWLOCK
+#define BOOST_RWLOCK
#else
- #if defined(_WIN32)
- #error need boost >= 1.35 for windows
- #endif
-
- #include <pthread.h>
+#if defined(_WIN32)
+#error need boost >= 1.35 for windows
+#endif
+
+#include <pthread.h>
#endif
@@ -51,40 +51,40 @@ namespace mongo {
#else
RWLock(const char *) { }
#endif
- void lock(){
+ void lock() {
_m.lock();
#if defined(_DEBUG)
mutexDebugger.entering(_name);
#endif
}
- void unlock(){
+ void unlock() {
#if defined(_DEBUG)
mutexDebugger.leaving(_name);
#endif
_m.unlock();
}
-
- void lock_shared(){
+
+ void lock_shared() {
_m.lock_shared();
}
-
- void unlock_shared(){
+
+ void unlock_shared() {
_m.unlock_shared();
}
- bool lock_shared_try( int millis ){
+ bool lock_shared_try( int millis ) {
boost::system_time until = get_system_time();
until += boost::posix_time::milliseconds(millis);
- if( _m.timed_lock_shared( until ) ) {
+ if( _m.timed_lock_shared( until ) ) {
return true;
}
return false;
}
- bool lock_try( int millis = 0 ){
+ bool lock_try( int millis = 0 ) {
boost::system_time until = get_system_time();
until += boost::posix_time::milliseconds(millis);
- if( _m.timed_lock( until ) ) {
+ if( _m.timed_lock( until ) ) {
#if defined(_DEBUG)
mutexDebugger.entering(_name);
#endif
@@ -99,7 +99,7 @@ namespace mongo {
class RWLock {
pthread_rwlock_t _lock;
- inline void check( int x ){
+ inline void check( int x ) {
if( x == 0 )
return;
log() << "pthread rwlock failed: " << x << endl;
@@ -115,40 +115,40 @@ namespace mongo {
#endif
check( pthread_rwlock_init( &_lock , 0 ) );
}
-
- ~RWLock(){
- if ( ! StaticObserver::_destroyingStatics ){
+
+ ~RWLock() {
+ if ( ! StaticObserver::_destroyingStatics ) {
check( pthread_rwlock_destroy( &_lock ) );
}
}
- void lock(){
+ void lock() {
check( pthread_rwlock_wrlock( &_lock ) );
#if defined(_DEBUG)
mutexDebugger.entering(_name);
#endif
}
- void unlock(){
+ void unlock() {
#if defined(_DEBUG)
mutexDebugger.leaving(_name);
#endif
check( pthread_rwlock_unlock( &_lock ) );
}
-
- void lock_shared(){
+
+ void lock_shared() {
check( pthread_rwlock_rdlock( &_lock ) );
}
-
- void unlock_shared(){
+
+ void unlock_shared() {
check( pthread_rwlock_unlock( &_lock ) );
}
-
- bool lock_shared_try( int millis ){
+
+ bool lock_shared_try( int millis ) {
return _try( millis , false );
}
- bool lock_try( int millis = 0 ){
- if( _try( millis , true ) ) {
+ bool lock_try( int millis = 0 ) {
+ if( _try( millis , true ) ) {
#if defined(_DEBUG)
mutexDebugger.entering(_name);
#endif
@@ -157,31 +157,31 @@ namespace mongo {
return false;
}
- bool _try( int millis , bool write ){
+ bool _try( int millis , bool write ) {
while ( true ) {
- int x = write ?
- pthread_rwlock_trywrlock( &_lock ) :
- pthread_rwlock_tryrdlock( &_lock );
-
+ int x = write ?
+ pthread_rwlock_trywrlock( &_lock ) :
+ pthread_rwlock_tryrdlock( &_lock );
+
if ( x <= 0 ) {
return true;
}
-
+
if ( millis-- <= 0 )
return false;
-
- if ( x == EBUSY ){
+
+ if ( x == EBUSY ) {
sleepmillis(1);
continue;
}
check(x);
- }
-
+ }
+
return false;
}
};
-
+
#endif
@@ -190,7 +190,7 @@ namespace mongo {
public:
struct exception { };
rwlock_try_write(RWLock& l, int millis = 0) : _l(l) {
- if( !l.lock_try(millis) )
+ if( !l.lock_try(millis) )
throw exception();
}
~rwlock_try_write() { _l.unlock(); }
@@ -216,7 +216,7 @@ namespace mongo {
else
_lock.unlock_shared();
}
- private:
+ private:
RWLock& _lock;
const bool _write;
};
diff --git a/util/concurrency/spin_lock.cpp b/util/concurrency/spin_lock.cpp
index 2e56acb47b3..0f33609d645 100644
--- a/util/concurrency/spin_lock.cpp
+++ b/util/concurrency/spin_lock.cpp
@@ -22,7 +22,7 @@
namespace mongo {
- SpinLock::~SpinLock() {
+ SpinLock::~SpinLock() {
#if defined(_WIN32)
DeleteCriticalSection(&_cs);
#endif
@@ -30,14 +30,14 @@ namespace mongo {
SpinLock::SpinLock()
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
- : _locked( false ) { }
+ : _locked( false ) { }
#elif defined(_WIN32)
{ InitializeCriticalSectionAndSpinCount(&_cs, 4000); }
#else
: _mutex( "SpinLock" ) { }
#endif
- void SpinLock::lock(){
+ void SpinLock::lock() {
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
// fast path
if (!_locked && !__sync_lock_test_and_set(&_locked, true)) {
@@ -65,17 +65,17 @@ namespace mongo {
#endif
}
- void SpinLock::unlock(){
+ void SpinLock::unlock() {
#if defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
__sync_lock_release(&_locked);
-#elif defined(WIN32)
+#elif defined(WIN32)
LeaveCriticalSection(&_cs);
#else
-
+
_mutex.unlock();
#endif
diff --git a/util/concurrency/spin_lock.h b/util/concurrency/spin_lock.h
index 7324a6e4f32..d5360f7b3c6 100644
--- a/util/concurrency/spin_lock.h
+++ b/util/concurrency/spin_lock.h
@@ -48,7 +48,7 @@ namespace mongo {
// Non-copyable, non-assignable
SpinLock(SpinLock&);
SpinLock& operator=(SpinLock&);
- };
+ };
} // namespace mongo
diff --git a/util/concurrency/synchronization.cpp b/util/concurrency/synchronization.cpp
index 5639c888a32..12e2894ef1a 100644
--- a/util/concurrency/synchronization.cpp
+++ b/util/concurrency/synchronization.cpp
@@ -22,15 +22,15 @@ namespace mongo {
Notification::Notification() : _mutex ( "Notification" ) , _notified( false ) { }
- Notification::~Notification(){ }
+ Notification::~Notification() { }
- void Notification::waitToBeNotified(){
+ void Notification::waitToBeNotified() {
scoped_lock lock( _mutex );
while ( ! _notified )
_condition.wait( lock.boost() );
}
- void Notification::notifyOne(){
+ void Notification::notifyOne() {
scoped_lock lock( _mutex );
assert( !_notified );
_notified = true;
@@ -38,7 +38,7 @@ namespace mongo {
}
NotifyAll::NotifyAll() : _mutex("NotifyAll"), _counter(0) { }
-
+
void NotifyAll::wait() {
scoped_lock lock( _mutex );
unsigned long long old = _counter;
diff --git a/util/concurrency/synchronization.h b/util/concurrency/synchronization.h
index c2e70cabe19..ac2fcabcb86 100644
--- a/util/concurrency/synchronization.h
+++ b/util/concurrency/synchronization.h
@@ -52,12 +52,12 @@ namespace mongo {
/** establishes a synchronization point between threads. N threads are waits and one is notifier.
threadsafe.
*/
- class NotifyAll : boost::noncopyable {
+ class NotifyAll : boost::noncopyable {
public:
NotifyAll();
- /** awaits the next notifyAll() call by another thread. notifications that precede this
- call are ignored -- we are looking for a fresh event.
+ /** awaits the next notifyAll() call by another thread. notifications that precede this
+ call are ignored -- we are looking for a fresh event.
*/
void wait();
diff --git a/util/concurrency/task.cpp b/util/concurrency/task.cpp
index 20801f10217..d84cd71dceb 100644
--- a/util/concurrency/task.cpp
+++ b/util/concurrency/task.cpp
@@ -25,11 +25,11 @@
#include "../unittest.h"
#include "../time_support.h"
-namespace mongo {
+namespace mongo {
- namespace task {
+ namespace task {
- /*void foo() {
+ /*void foo() {
boost::mutex m;
boost::mutex::scoped_lock lk(m);
boost::condition cond;
@@ -37,21 +37,21 @@ namespace mongo {
cond.notify_one();
}*/
- Task::Task()
- : BackgroundJob( true /* deleteSelf */ ) {
+ Task::Task()
+ : BackgroundJob( true /* deleteSelf */ ) {
n = 0;
repeat = 0;
}
void Task::halt() { repeat = 0; }
- void Task::run() {
+ void Task::run() {
assert( n == 0 );
while( 1 ) {
n++;
- try {
+ try {
doWork();
- }
+ }
catch(...) { }
if( repeat == 0 )
break;
@@ -65,11 +65,11 @@ namespace mongo {
go();
}
- void fork(Task *t) {
+ void fork(Task *t) {
t->begin();
}
- void repeat(Task *t, unsigned millis) {
+ void repeat(Task *t, unsigned millis) {
t->repeat = millis;
t->begin();
}
@@ -110,7 +110,7 @@ namespace mongo {
}
}
- void Server::send( lam msg ) {
+ void Server::send( lam msg ) {
{
boost::mutex::scoped_lock lk(m);
d.push_back(msg);
@@ -118,9 +118,9 @@ namespace mongo {
c.notify_one();
}
- void Server::doWork() {
+ void Server::doWork() {
starting();
- while( 1 ) {
+ while( 1 ) {
lam f;
try {
boost::mutex::scoped_lock lk(m);
@@ -129,7 +129,7 @@ namespace mongo {
f = d.front();
d.pop_front();
}
- catch(...) {
+ catch(...) {
log() << "ERROR exception in Server:doWork?" << endl;
}
try {
@@ -141,27 +141,28 @@ namespace mongo {
d.push_back(f);
}
}
- } catch(std::exception& e) {
- log() << "Server::doWork task:" << name() << " exception:" << e.what() << endl;
- }
- catch(const char *p) {
- log() << "Server::doWork task:" << name() << " unknown c exception:" <<
- ((p&&strlen(p)<800)?p:"?") << endl;
- }
- catch(...) {
- log() << "Server::doWork unknown exception task:" << name() << endl;
+ }
+ catch(std::exception& e) {
+ log() << "Server::doWork task:" << name() << " exception:" << e.what() << endl;
+ }
+ catch(const char *p) {
+ log() << "Server::doWork task:" << name() << " unknown c exception:" <<
+ ((p&&strlen(p)<800)?p:"?") << endl;
+ }
+ catch(...) {
+ log() << "Server::doWork unknown exception task:" << name() << endl;
}
}
}
static Server *s;
- static void abc(int i) {
+ static void abc(int i) {
cout << "Hello " << i << endl;
s->requeue();
}
class TaskUnitTest : public mongo::UnitTest {
public:
- virtual void run() {
+ virtual void run() {
lam f = boost::bind(abc, 3);
//f();
diff --git a/util/concurrency/task.h b/util/concurrency/task.h
index 654ecd35fd2..d7b45eeef24 100644
--- a/util/concurrency/task.h
+++ b/util/concurrency/task.h
@@ -20,9 +20,9 @@
#include "../background.h"
-namespace mongo {
+namespace mongo {
- namespace task {
+ namespace task {
/** abstraction around threads. simpler than BackgroundJob which is used behind the scenes.
allocate the Task dynamically. when the thread terminates, the Task object will delete itself.
@@ -34,7 +34,7 @@ namespace mongo {
public:
Task();
- /** for a repeating task, stop after current invocation ends. can be called by other threads
+ /** for a repeating task, stop after current invocation ends. can be called by other threads
as long as the Task is still in scope.
*/
void halt();
@@ -54,8 +54,8 @@ namespace mongo {
void repeat(Task *t, unsigned millis);
/*** Example ***
- inline void sample() {
- class Sample : public Task {
+ inline void sample() {
+ class Sample : public Task {
public:
int result;
virtual void doWork() { result = 1234; }
diff --git a/util/concurrency/thread_pool.cpp b/util/concurrency/thread_pool.cpp
index 2caac1ff3f3..1c258847cb5 100644
--- a/util/concurrency/thread_pool.cpp
+++ b/util/concurrency/thread_pool.cpp
@@ -20,8 +20,8 @@
#include "thread_pool.h"
#include "mvar.h"
-namespace mongo{
- namespace threadpool{
+namespace mongo {
+ namespace threadpool {
// Worker thread
class Worker : boost::noncopyable {
@@ -34,12 +34,12 @@ namespace mongo{
// destructor will block until current operation is completed
// Acts as a "join" on this thread
- ~Worker(){
+ ~Worker() {
_task.put(Task());
_thread.join();
}
- void set_task(Task& func){
+ void set_task(Task& func) {
assert(!func.empty());
assert(_is_done);
_is_done = false;
@@ -47,13 +47,13 @@ namespace mongo{
_task.put(func);
}
- private:
+ private:
ThreadPool& _owner;
MVar<Task> _task;
bool _is_done; // only used for error detection
boost::thread _thread;
- void loop(){
+ void loop() {
while (true) {
Task task = _task.take();
if (task.empty())
@@ -61,9 +61,11 @@ namespace mongo{
try {
task();
- } catch (std::exception e){
+ }
+ catch (std::exception e) {
log() << "Unhandled exception in worker thread: " << e.what() << endl;;
- } catch (...){
+ }
+ catch (...) {
log() << "Unhandled non-exception in worker thread" << endl;
}
_is_done = true;
@@ -74,16 +76,15 @@ namespace mongo{
ThreadPool::ThreadPool(int nThreads)
: _mutex("ThreadPool"), _tasksRemaining(0)
- , _nThreads(nThreads)
- {
+ , _nThreads(nThreads) {
scoped_lock lock(_mutex);
- while (nThreads-- > 0){
+ while (nThreads-- > 0) {
Worker* worker = new Worker(*this);
_freeWorkers.push_front(worker);
}
}
- ThreadPool::~ThreadPool(){
+ ThreadPool::~ThreadPool() {
join();
assert(_tasks.empty());
@@ -91,40 +92,42 @@ namespace mongo{
// O(n) but n should be small
assert(_freeWorkers.size() == (unsigned)_nThreads);
- while(!_freeWorkers.empty()){
+ while(!_freeWorkers.empty()) {
delete _freeWorkers.front();
_freeWorkers.pop_front();
}
}
- void ThreadPool::join(){
+ void ThreadPool::join() {
scoped_lock lock(_mutex);
- while(_tasksRemaining){
+ while(_tasksRemaining) {
_condition.wait(lock.boost());
}
}
- void ThreadPool::schedule(Task task){
+ void ThreadPool::schedule(Task task) {
scoped_lock lock(_mutex);
_tasksRemaining++;
- if (!_freeWorkers.empty()){
+ if (!_freeWorkers.empty()) {
_freeWorkers.front()->set_task(task);
_freeWorkers.pop_front();
- }else{
+ }
+ else {
_tasks.push_back(task);
}
}
// should only be called by a worker from the worker thread
- void ThreadPool::task_done(Worker* worker){
+ void ThreadPool::task_done(Worker* worker) {
scoped_lock lock(_mutex);
- if (!_tasks.empty()){
+ if (!_tasks.empty()) {
worker->set_task(_tasks.front());
_tasks.pop_front();
- }else{
+ }
+ else {
_freeWorkers.push_front(worker);
}
diff --git a/util/concurrency/thread_pool.h b/util/concurrency/thread_pool.h
index 31e06430088..b348ed1d01b 100644
--- a/util/concurrency/thread_pool.h
+++ b/util/concurrency/thread_pool.h
@@ -24,59 +24,59 @@
namespace mongo {
-namespace threadpool {
- class Worker;
-
- typedef boost::function<void(void)> Task; //nullary function or functor
-
- // exported to the mongo namespace
- class ThreadPool : boost::noncopyable{
- public:
- explicit ThreadPool(int nThreads=8);
-
- // blocks until all tasks are complete (tasks_remaining() == 0)
- // You should not call schedule while in the destructor
- ~ThreadPool();
-
- // blocks until all tasks are complete (tasks_remaining() == 0)
- // does not prevent new tasks from being scheduled so could wait forever.
- // Also, new tasks could be scheduled after this returns.
- void join();
-
- // task will be copied a few times so make sure it's relatively cheap
- void schedule(Task task);
-
- // Helpers that wrap schedule and boost::bind.
- // Functor and args will be copied a few times so make sure it's relatively cheap
- template<typename F, typename A>
- void schedule(F f, A a){ schedule(boost::bind(f,a)); }
- template<typename F, typename A, typename B>
- void schedule(F f, A a, B b){ schedule(boost::bind(f,a,b)); }
- template<typename F, typename A, typename B, typename C>
- void schedule(F f, A a, B b, C c){ schedule(boost::bind(f,a,b,c)); }
- template<typename F, typename A, typename B, typename C, typename D>
- void schedule(F f, A a, B b, C c, D d){ schedule(boost::bind(f,a,b,c,d)); }
- template<typename F, typename A, typename B, typename C, typename D, typename E>
- void schedule(F f, A a, B b, C c, D d, E e){ schedule(boost::bind(f,a,b,c,d,e)); }
-
- int tasks_remaining() { return _tasksRemaining; }
-
- private:
- mongo::mutex _mutex;
- boost::condition _condition;
-
- list<Worker*> _freeWorkers; //used as LIFO stack (always front)
- list<Task> _tasks; //used as FIFO queue (push_back, pop_front)
- int _tasksRemaining; // in queue + currently processing
- int _nThreads; // only used for sanity checking. could be removed in the future.
-
- // should only be called by a worker from the worker's thread
- void task_done(Worker* worker);
- friend class Worker;
- };
-
-} //namespace threadpool
-
-using threadpool::ThreadPool;
+ namespace threadpool {
+ class Worker;
+
+ typedef boost::function<void(void)> Task; //nullary function or functor
+
+ // exported to the mongo namespace
+ class ThreadPool : boost::noncopyable {
+ public:
+ explicit ThreadPool(int nThreads=8);
+
+ // blocks until all tasks are complete (tasks_remaining() == 0)
+ // You should not call schedule while in the destructor
+ ~ThreadPool();
+
+ // blocks until all tasks are complete (tasks_remaining() == 0)
+ // does not prevent new tasks from being scheduled so could wait forever.
+ // Also, new tasks could be scheduled after this returns.
+ void join();
+
+ // task will be copied a few times so make sure it's relatively cheap
+ void schedule(Task task);
+
+ // Helpers that wrap schedule and boost::bind.
+ // Functor and args will be copied a few times so make sure it's relatively cheap
+ template<typename F, typename A>
+ void schedule(F f, A a) { schedule(boost::bind(f,a)); }
+ template<typename F, typename A, typename B>
+ void schedule(F f, A a, B b) { schedule(boost::bind(f,a,b)); }
+ template<typename F, typename A, typename B, typename C>
+ void schedule(F f, A a, B b, C c) { schedule(boost::bind(f,a,b,c)); }
+ template<typename F, typename A, typename B, typename C, typename D>
+ void schedule(F f, A a, B b, C c, D d) { schedule(boost::bind(f,a,b,c,d)); }
+ template<typename F, typename A, typename B, typename C, typename D, typename E>
+ void schedule(F f, A a, B b, C c, D d, E e) { schedule(boost::bind(f,a,b,c,d,e)); }
+
+ int tasks_remaining() { return _tasksRemaining; }
+
+ private:
+ mongo::mutex _mutex;
+ boost::condition _condition;
+
+ list<Worker*> _freeWorkers; //used as LIFO stack (always front)
+ list<Task> _tasks; //used as FIFO queue (push_back, pop_front)
+ int _tasksRemaining; // in queue + currently processing
+ int _nThreads; // only used for sanity checking. could be removed in the future.
+
+ // should only be called by a worker from the worker's thread
+ void task_done(Worker* worker);
+ friend class Worker;
+ };
+
+ } //namespace threadpool
+
+ using threadpool::ThreadPool;
} //namespace mongo
diff --git a/util/concurrency/value.h b/util/concurrency/value.h
index dabeb956e43..08d53062bf6 100644
--- a/util/concurrency/value.h
+++ b/util/concurrency/value.h
@@ -20,11 +20,11 @@
#pragma once
-namespace mongo {
+namespace mongo {
extern mutex _atomicMutex;
- /** atomic wrapper for a value. enters a mutex on each access. must
+ /** atomic wrapper for a value. enters a mutex on each access. must
be copyable.
*/
template<typename T>
@@ -33,20 +33,22 @@ namespace mongo {
public:
Atomic<T>() { }
- void operator=(const T& a) {
+ void operator=(const T& a) {
scoped_lock lk(_atomicMutex);
- val = a; }
+ val = a;
+ }
- operator T() const {
+ operator T() const {
scoped_lock lk(_atomicMutex);
- return val; }
-
+ return val;
+ }
+
/** example:
Atomic<int> q;
...
{
Atomic<int>::tran t(q);
- if( q.ref() > 0 )
+ if( q.ref() > 0 )
q.ref()--;
}
*/
@@ -58,11 +60,11 @@ namespace mongo {
};
};
- /** this string COULD be mangled but with the double buffering, assuming writes
- are infrequent, it's unlikely. thus, this is reasonable for lockless setting of
+ /** this string COULD be mangled but with the double buffering, assuming writes
+ are infrequent, it's unlikely. thus, this is reasonable for lockless setting of
diagnostic strings, where their content isn't critical.
*/
- class DiagStr {
+ class DiagStr {
char buf1[256];
char buf2[256];
char *p;
diff --git a/util/concurrency/vars.cpp b/util/concurrency/vars.cpp
index 0bf52ec048e..3d057a4801e 100644
--- a/util/concurrency/vars.cpp
+++ b/util/concurrency/vars.cpp
@@ -20,28 +20,28 @@
#include "value.h"
#include "mutex.h"
-namespace mongo {
+namespace mongo {
mongo::mutex _atomicMutex("_atomicMutex");
// intentional leak. otherwise destructor orders can be problematic at termination.
MutexDebugger &mutexDebugger = *(new MutexDebugger());
- MutexDebugger::MutexDebugger() :
- x( *(new boost::mutex()) ), magic(0x12345678) {
- // optional way to debug lock order
- /*
- a = "a_lock";
- b = "b_lock";
- */
+ MutexDebugger::MutexDebugger() :
+ x( *(new boost::mutex()) ), magic(0x12345678) {
+ // optional way to debug lock order
+ /*
+ a = "a_lock";
+ b = "b_lock";
+ */
}
- void MutexDebugger::programEnding() {
+ void MutexDebugger::programEnding() {
if( logLevel>=1 && followers.size() ) {
std::cout << followers.size() << " mutexes in program" << endl;
- for( map< mid, set<mid> >::iterator i = followers.begin(); i != followers.end(); i++ ) {
+ for( map< mid, set<mid> >::iterator i = followers.begin(); i != followers.end(); i++ ) {
cout << i->first;
- if( maxNest[i->first] > 1 )
+ if( maxNest[i->first] > 1 )
cout << " maxNest:" << maxNest[i->first];
cout << '\n';
for( set<mid>::iterator j = i->second.begin(); j != i->second.end(); j++ )
diff --git a/util/debug_util.cpp b/util/debug_util.cpp
index f0a916d0ded..8ba6534ef7c 100644
--- a/util/debug_util.cpp
+++ b/util/debug_util.cpp
@@ -29,7 +29,7 @@ namespace mongo {
* 2) You have run "handle SIGSTOP noprint" in gdb
* 3) cmdLine.port + 2000 is free
*/
- void launchGDB(int){
+ void launchGDB(int) {
// Don't come back here
signal(SIGTRAP, SIG_IGN);
@@ -38,18 +38,19 @@ namespace mongo {
string pidToDebug = BSONObjBuilder::numStr(getpid());
cout << "\n\n\t**** Launching gdbserver on " << newPortStr << " ****" << endl << endl;
- if (fork() == 0){
+ if (fork() == 0) {
//child
execlp("gdbserver", "gdbserver", "--attach", newPortStr.c_str(), pidToDebug.c_str(), NULL);
perror(NULL);
- }else{
+ }
+ else {
//parent
raise(SIGSTOP); // pause all threads until gdb connects and continues
raise(SIGTRAP); // break inside gdbserver
}
}
- void setupSIGTRAPforGDB(){
+ void setupSIGTRAPforGDB() {
assert( signal(SIGTRAP , launchGDB ) != SIG_ERR );
}
#else
diff --git a/util/debug_util.h b/util/debug_util.h
index 85d17362d86..abed8d94924 100644
--- a/util/debug_util.h
+++ b/util/debug_util.h
@@ -62,7 +62,7 @@ namespace mongo {
#define MONGO_RARELY SOMETIMES( rarely, 128 )
#define RARELY MONGO_RARELY
-#define MONGO_ONCE for( static bool undone = true; undone; undone = false )
+#define MONGO_ONCE for( static bool undone = true; undone; undone = false )
#define ONCE MONGO_ONCE
#if defined(_WIN32)
@@ -74,8 +74,8 @@ namespace mongo {
void setupSIGTRAPforGDB();
extern int tlogLevel;
-
- inline void breakpoint(){
+
+ inline void breakpoint() {
if ( tlogLevel < 0 )
return;
#ifdef _WIN32
@@ -87,20 +87,20 @@ namespace mongo {
//prevent SIGTRAP from crashing the program if default action is specified and we are not in gdb
struct sigaction current;
sigaction(SIGTRAP, NULL, &current);
- if (current.sa_handler == SIG_DFL){
+ if (current.sa_handler == SIG_DFL) {
signal(SIGTRAP, SIG_IGN);
}
}
-
+
raise(SIGTRAP);
#endif
}
-
+
// conditional breakpoint
- inline void breakif(bool test){
+ inline void breakif(bool test) {
if (test)
breakpoint();
}
-
+
} // namespace mongo
diff --git a/util/embedded_builder.h b/util/embedded_builder.h
index 71e85c7ee1b..abf518e2583 100644
--- a/util/embedded_builder.h
+++ b/util/embedded_builder.h
@@ -29,10 +29,10 @@ namespace mongo {
// parameter in lex ascending order.
void prepareContext( string &name ) {
int i = 1, n = _builders.size();
- while( i < n &&
- name.substr( 0, _builders[ i ].first.length() ) == _builders[ i ].first &&
- ( name[ _builders[i].first.length() ] == '.' || name[ _builders[i].first.length() ] == 0 )
- ){
+ while( i < n &&
+ name.substr( 0, _builders[ i ].first.length() ) == _builders[ i ].first &&
+ ( name[ _builders[i].first.length() ] == '.' || name[ _builders[i].first.length() ] == 0 )
+ ) {
name = name.substr( _builders[ i ].first.length() + 1 );
++i;
}
@@ -83,10 +83,10 @@ namespace mongo {
}
BSONObjBuilder *back() { return _builders.back().second; }
-
+
vector< pair< string, BSONObjBuilder * > > _builders;
vector< shared_ptr< BSONObjBuilder > > _builderStorage;
};
-
+
} //namespace mongo
diff --git a/util/file.h b/util/file.h
index 38eef8ab3b3..28ddfc720a8 100644
--- a/util/file.h
+++ b/util/file.h
@@ -29,138 +29,138 @@
#include "text.h"
-namespace mongo {
+namespace mongo {
#ifndef __sunos__
-typedef uint64_t fileofs;
+ typedef uint64_t fileofs;
#else
-typedef boost::uint64_t fileofs;
+ typedef boost::uint64_t fileofs;
#endif
-class FileInterface {
-public:
- void open(const char *fn) {}
- void write(fileofs o, const char *data, unsigned len) {}
- void read(fileofs o, char *data, unsigned len) {}
- bool bad() {return false;}
- bool is_open() {return false;}
- fileofs len() { return 0; }
- void fsync() { assert(false); }
-};
-
-#if defined(_WIN32)
+ class FileInterface {
+ public:
+ void open(const char *fn) {}
+ void write(fileofs o, const char *data, unsigned len) {}
+ void read(fileofs o, char *data, unsigned len) {}
+ bool bad() {return false;}
+ bool is_open() {return false;}
+ fileofs len() { return 0; }
+ void fsync() { assert(false); }
+ };
+
+#if defined(_WIN32)
#include <io.h>
-class File : public FileInterface {
- HANDLE fd;
- bool _bad;
- void err(BOOL b=false) { /* false = error happened */
- if( !b && !_bad ) {
+ class File : public FileInterface {
+ HANDLE fd;
+ bool _bad;
+ void err(BOOL b=false) { /* false = error happened */
+ if( !b && !_bad ) {
+ _bad = true;
+ log() << "File I/O error " << GetLastError() << '\n';
+ }
+ }
+ public:
+ File() {
+ fd = INVALID_HANDLE_VALUE;
_bad = true;
- log() << "File I/O error " << GetLastError() << '\n';
}
- }
-public:
- File() {
- fd = INVALID_HANDLE_VALUE;
- _bad = true;
- }
- ~File() {
- if( is_open() ) CloseHandle(fd);
- fd = INVALID_HANDLE_VALUE;
- }
- void open(const char *filename, bool readOnly=false ) {
- fd = CreateFile(
- toNativeString(filename).c_str(),
- ( readOnly ? 0 : GENERIC_WRITE ) | GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ,
- NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
- if( !is_open() ) {
- DWORD e = GetLastError();
- log() << "Create/Open File failed " << filename << ' ' << errnoWithDescription(e) << endl;
+ ~File() {
+ if( is_open() ) CloseHandle(fd);
+ fd = INVALID_HANDLE_VALUE;
}
- else
- _bad = false;
- }
- void write(fileofs o, const char *data, unsigned len) {
- LARGE_INTEGER li;
- li.QuadPart = o;
- SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
- DWORD written;
- err( WriteFile(fd, data, len, &written, NULL) );
- }
- void read(fileofs o, char *data, unsigned len) {
- DWORD read;
- LARGE_INTEGER li;
- li.QuadPart = o;
- SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
- int ok = ReadFile(fd, data, len, &read, 0);
- if( !ok )
- err(ok);
- else
- massert( 10438 , "ReadFile error - truncated file?", read == len);
- }
- bool bad() { return _bad; }
- bool is_open() { return fd != INVALID_HANDLE_VALUE; }
- fileofs len() {
- LARGE_INTEGER li;
- li.LowPart = GetFileSize(fd, (DWORD *) &li.HighPart);
- if( li.HighPart == 0 && li.LowPart == INVALID_FILE_SIZE ) {
- err( false );
- return 0;
+ void open(const char *filename, bool readOnly=false ) {
+ fd = CreateFile(
+ toNativeString(filename).c_str(),
+ ( readOnly ? 0 : GENERIC_WRITE ) | GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ,
+ NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if( !is_open() ) {
+ DWORD e = GetLastError();
+ log() << "Create/Open File failed " << filename << ' ' << errnoWithDescription(e) << endl;
+ }
+ else
+ _bad = false;
+ }
+ void write(fileofs o, const char *data, unsigned len) {
+ LARGE_INTEGER li;
+ li.QuadPart = o;
+ SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
+ DWORD written;
+ err( WriteFile(fd, data, len, &written, NULL) );
}
- return li.QuadPart;
- }
- void fsync() { FlushFileBuffers(fd); }
-};
+ void read(fileofs o, char *data, unsigned len) {
+ DWORD read;
+ LARGE_INTEGER li;
+ li.QuadPart = o;
+ SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
+ int ok = ReadFile(fd, data, len, &read, 0);
+ if( !ok )
+ err(ok);
+ else
+ massert( 10438 , "ReadFile error - truncated file?", read == len);
+ }
+ bool bad() { return _bad; }
+ bool is_open() { return fd != INVALID_HANDLE_VALUE; }
+ fileofs len() {
+ LARGE_INTEGER li;
+ li.LowPart = GetFileSize(fd, (DWORD *) &li.HighPart);
+ if( li.HighPart == 0 && li.LowPart == INVALID_FILE_SIZE ) {
+ err( false );
+ return 0;
+ }
+ return li.QuadPart;
+ }
+ void fsync() { FlushFileBuffers(fd); }
+ };
#else
-class File : public FileInterface {
- int fd;
- bool _bad;
- void err(bool ok) {
- if( !ok && !_bad ) {
+ class File : public FileInterface {
+ int fd;
+ bool _bad;
+ void err(bool ok) {
+ if( !ok && !_bad ) {
+ _bad = true;
+ log() << "File I/O " << errnoWithDescription() << '\n';
+ }
+ }
+ public:
+ File() {
+ fd = -1;
_bad = true;
- log() << "File I/O " << errnoWithDescription() << '\n';
}
- }
-public:
- File() {
- fd = -1;
- _bad = true;
- }
- ~File() {
- if( is_open() ) ::close(fd);
- fd = -1;
- }
+ ~File() {
+ if( is_open() ) ::close(fd);
+ fd = -1;
+ }
#ifndef O_NOATIME
#define O_NOATIME 0
#endif
- void open(const char *filename, bool readOnly=false ) {
- fd = ::open(filename,
- O_CREAT | ( readOnly ? 0 : ( O_RDWR | O_NOATIME ) ) ,
- S_IRUSR | S_IWUSR);
- if ( fd <= 0 ) {
- out() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
- return;
+ void open(const char *filename, bool readOnly=false ) {
+ fd = ::open(filename,
+ O_CREAT | ( readOnly ? 0 : ( O_RDWR | O_NOATIME ) ) ,
+ S_IRUSR | S_IWUSR);
+ if ( fd <= 0 ) {
+ out() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
+ return;
+ }
+ _bad = false;
+ }
+ void write(fileofs o, const char *data, unsigned len) {
+ err( ::pwrite(fd, data, len, o) == (int) len );
+ }
+ void read(fileofs o, char *data, unsigned len) {
+ err( ::pread(fd, data, len, o) == (int) len );
+ }
+ bool bad() { return _bad; }
+ bool is_open() { return fd > 0; }
+ fileofs len() {
+ return lseek(fd, 0, SEEK_END);
}
- _bad = false;
- }
- void write(fileofs o, const char *data, unsigned len) {
- err( ::pwrite(fd, data, len, o) == (int) len );
- }
- void read(fileofs o, char *data, unsigned len) {
- err( ::pread(fd, data, len, o) == (int) len );
- }
- bool bad() { return _bad; }
- bool is_open() { return fd > 0; }
- fileofs len() {
- return lseek(fd, 0, SEEK_END);
- }
- void fsync() { ::fsync(fd); }
-};
+ void fsync() { ::fsync(fd); }
+ };
#endif
diff --git a/util/file_allocator.cpp b/util/file_allocator.cpp
index ca4d7549aed..fe2b616ddee 100644
--- a/util/file_allocator.cpp
+++ b/util/file_allocator.cpp
@@ -36,20 +36,20 @@ using namespace mongoutils;
namespace mongo {
#if defined(_WIN32)
- FileAllocator::FileAllocator(){
+ FileAllocator::FileAllocator() {
}
-
- void FileAllocator::start(){
+
+ void FileAllocator::start() {
}
void FileAllocator::requestAllocation( const string &name, long &size ) {
- /* Some of the system calls in the file allocator don't work in win,
- so no win support - 32 or 64 bit. Plus we don't seem to need preallocation
+ /* Some of the system calls in the file allocator don't work in win,
+ so no win support - 32 or 64 bit. Plus we don't seem to need preallocation
on windows anyway as we don't have to pre-zero the file there.
*/
}
- void FileAllocator::allocateAsap( const string &name, unsigned long long &size ){
+ void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
// no-op
}
@@ -63,16 +63,16 @@ namespace mongo {
}
#else
-
- FileAllocator::FileAllocator()
+
+ FileAllocator::FileAllocator()
: _pendingMutex("FileAllocator"), _failed() {
}
-
+
void FileAllocator::start() {
boost::thread t( boost::bind( &FileAllocator::run , this ) );
}
-
+
void FileAllocator::requestAllocation( const string &name, long &size ) {
scoped_lock lk( _pendingMutex );
if ( _failed )
@@ -86,7 +86,7 @@ namespace mongo {
_pendingSize[ name ] = size;
_pendingUpdated.notify_all();
}
-
+
void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
scoped_lock lk( _pendingMutex );
long oldSize = prevSize( name );
@@ -110,9 +110,9 @@ namespace mongo {
checkFailure();
_pendingUpdated.wait( lk.boost() );
}
-
+
}
-
+
void FileAllocator::waitUntilFinished() const {
if ( _failed )
return;
@@ -120,16 +120,16 @@ namespace mongo {
while( _pending.size() != 0 )
_pendingUpdated.wait( lk.boost() );
}
-
+
void FileAllocator::ensureLength(int fd , long size) {
-#if defined(__linux__)
+#if defined(__linux__)
int ret = posix_fallocate(fd,0,size);
if ( ret == 0 )
return;
-
+
log() << "FileAllocator: posix_fallocate failed: " << errnoWithDescription( ret ) << " falling back" << endl;
#endif
-
+
off_t filelen = lseek(fd, 0, SEEK_END);
if ( filelen < size ) {
if (filelen != 0) {
@@ -138,13 +138,13 @@ namespace mongo {
uassert( 10440 , ss.str(), filelen == 0 );
}
// Check for end of disk.
-
+
uassert( 10441 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
size - 1 == lseek(fd, size - 1, SEEK_SET) );
uassert( 10442 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
1 == write(fd, "", 1) );
lseek(fd, 0, SEEK_SET);
-
+
const long z = 256 * 1024;
const boost::scoped_array<char> buf_holder (new char[z]);
char* buf = buf_holder.get();
@@ -154,21 +154,21 @@ namespace mongo {
long towrite = left;
if ( towrite > z )
towrite = z;
-
+
int written = write( fd , buf , towrite );
uassert( 10443 , errnoWithPrefix("FileAllocator: file write failed" ), written > 0 );
left -= written;
}
}
}
-
+
void FileAllocator::checkFailure() {
if (_failed) {
// we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
msgassertedNoTrace( 12520, "new file allocation failure" );
}
}
-
+
long FileAllocator::prevSize( const string &name ) const {
if ( _pendingSize.count( name ) > 0 )
return _pendingSize[ name ];
@@ -176,7 +176,7 @@ namespace mongo {
return boost::filesystem::file_size( name );
return -1;
}
-
+
// caller must hold _pendingMutex lock.
bool FileAllocator::inProgress( const string &name ) const {
for( list< string >::const_iterator i = _pending.begin(); i != _pending.end(); ++i )
@@ -185,7 +185,7 @@ namespace mongo {
return false;
}
- void FileAllocator::run( FileAllocator * fa ){
+ void FileAllocator::run( FileAllocator * fa ) {
setThreadName( "FileAllocator" );
while( 1 ) {
{
@@ -213,29 +213,31 @@ namespace mongo {
}
#if defined(POSIX_FADV_DONTNEED)
- if( posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED) ) {
+ if( posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED) ) {
log() << "warning: posix_fadvise fails " << name << ' ' << errnoWithDescription() << endl;
}
#endif
-
+
Timer t;
-
+
/* make sure the file is the full desired length */
ensureLength( fd , size );
- log() << "done allocating datafile " << name << ", "
+ log() << "done allocating datafile " << name << ", "
<< "size: " << size/1024/1024 << "MB, "
- << " took " << ((double)t.millis())/1000.0 << " secs"
+ << " took " << ((double)t.millis())/1000.0 << " secs"
<< endl;
close( fd );
-
- } catch ( ... ) {
+
+ }
+ catch ( ... ) {
log() << "error failed to allocate new file: " << name
<< " size: " << size << ' ' << errnoWithDescription() << endl;
try {
BOOST_CHECK_EXCEPTION( boost::filesystem::remove( name ) );
- } catch ( ... ) {
+ }
+ catch ( ... ) {
}
scoped_lock lk( fa->_pendingMutex );
fa->_failed = true;
@@ -243,7 +245,7 @@ namespace mongo {
fa->_pendingUpdated.notify_all();
return; // no more allocation
}
-
+
{
scoped_lock lk( fa->_pendingMutex );
fa->_pendingSize.erase( name );
@@ -253,11 +255,11 @@ namespace mongo {
}
}
}
-
-#endif
-
+
+#endif
+
// The mutex contained in this object may be held on shutdown.
FileAllocator &theFileAllocator_ = *(new FileAllocator());
FileAllocator &theFileAllocator() { return theFileAllocator_; }
-
+
} // namespace mongo
diff --git a/util/file_allocator.h b/util/file_allocator.h
index 320294d7aaf..c2688dc8bb7 100644
--- a/util/file_allocator.h
+++ b/util/file_allocator.h
@@ -18,13 +18,13 @@
#include "../pch.h"
namespace mongo {
-
- /*
+
+ /*
* Handles allocation of contiguous files on disk. Allocation may be
* requested asynchronously or synchronously.
*/
class FileAllocator {
- /*
+ /*
* The public functions may not be called concurrently. The allocation
* functions may be called multiple times per file, but only the first
* size specified per file will be used.
@@ -33,7 +33,7 @@ namespace mongo {
FileAllocator();
void start();
-
+
/**
* May be called if file exists. If file exists, or its allocation has
* been requested, size is updated to match existing file size.
@@ -48,20 +48,20 @@ namespace mongo {
void allocateAsap( const string &name, unsigned long long &size );
void waitUntilFinished() const;
-
+
static void ensureLength(int fd , long size);
-
+
private:
#if !defined(_WIN32)
void checkFailure();
-
- // caller must hold pendingMutex_ lock. Returns size if allocated or
+
+ // caller must hold pendingMutex_ lock. Returns size if allocated or
// allocation requested, -1 otherwise.
long prevSize( const string &name ) const;
-
+
// caller must hold pendingMutex_ lock.
bool inProgress( const string &name ) const;
-
+
/** called from the worked thread */
static void run( FileAllocator * fa );
@@ -72,8 +72,8 @@ namespace mongo {
mutable map< string, long > _pendingSize;
bool _failed;
-#endif
+#endif
};
-
+
FileAllocator &theFileAllocator();
} // namespace mongo
diff --git a/util/goodies.h b/util/goodies.h
index 9e1f7d2d206..53a74c200bd 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -32,9 +32,9 @@ namespace mongo {
*/
unsigned setThreadName(const char * name);
string getThreadName();
-
+
template<class T>
- inline string ToString(const T& t) {
+ inline string ToString(const T& t) {
stringstream s;
s << t;
return s.str();
@@ -98,7 +98,8 @@ namespace mongo {
cout << endl;
len -= 16;
}
- } catch (...) {
+ }
+ catch (...) {
}
}
@@ -137,14 +138,14 @@ namespace mongo {
};
/*
-
+
class DebugMutex : boost::noncopyable {
- friend class lock;
- mongo::mutex m;
- int locked;
+ friend class lock;
+ mongo::mutex m;
+ int locked;
public:
- DebugMutex() : locked(0); { }
- bool isLocked() { return locked; }
+ DebugMutex() : locked(0); { }
+ bool isLocked() { return locked; }
};
*/
@@ -182,7 +183,7 @@ namespace mongo {
return swapEndian(x);
}
#endif
-
+
#if !defined(_WIN32)
typedef int HANDLE;
inline void strcpy_s(char *dst, unsigned len, const char *src) {
@@ -192,10 +193,10 @@ namespace mongo {
#else
typedef void *HANDLE;
#endif
-
+
/* thread local "value" rather than a pointer
good for things which have copy constructors (and the copy constructor is fast enough)
- e.g.
+ e.g.
ThreadLocalValue<int> myint;
*/
template<class T>
@@ -212,7 +213,7 @@ namespace mongo {
void set( const T& i ) {
T *v = _val.get();
- if( v ) {
+ if( v ) {
*v = i;
return;
}
@@ -227,15 +228,15 @@ namespace mongo {
class ProgressMeter : boost::noncopyable {
public:
- ProgressMeter( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ){
+ ProgressMeter( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ) {
reset( total , secondsBetween , checkInterval );
}
- ProgressMeter(){
+ ProgressMeter() {
_active = 0;
}
-
- void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ){
+
+ void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ) {
_total = total;
_secondsBetween = secondsBetween;
_checkInterval = checkInterval;
@@ -247,19 +248,19 @@ namespace mongo {
_active = 1;
}
- void finished(){
+ void finished() {
_active = 0;
}
- bool isActive(){
+ bool isActive() {
return _active;
}
-
+
/**
* @return if row was printed
*/
- bool hit( int n = 1 ){
- if ( ! _active ){
+ bool hit( int n = 1 ) {
+ if ( ! _active ) {
cout << "warning: hit on in-active ProgressMeter" << endl;
return false;
}
@@ -268,12 +269,12 @@ namespace mongo {
_hits++;
if ( _hits % _checkInterval )
return false;
-
+
int t = (int) time(0);
if ( t - _lastTime < _secondsBetween )
return false;
-
- if ( _total > 0 ){
+
+ if ( _total > 0 ) {
int per = (int)( ( (double)_done * 100.0 ) / (double)_total );
cout << "\t\t" << _done << "/" << _total << "\t" << per << "%" << endl;
}
@@ -281,11 +282,11 @@ namespace mongo {
return true;
}
- unsigned long long done(){
+ unsigned long long done() {
return _done;
}
-
- unsigned long long hits(){
+
+ unsigned long long hits() {
return _hits;
}
@@ -303,7 +304,7 @@ namespace mongo {
private:
bool _active;
-
+
unsigned long long _total;
int _secondsBetween;
int _checkInterval;
@@ -316,29 +317,29 @@ namespace mongo {
class ProgressMeterHolder : boost::noncopyable {
public:
ProgressMeterHolder( ProgressMeter& pm )
- : _pm( pm ){
+ : _pm( pm ) {
}
-
- ~ProgressMeterHolder(){
+
+ ~ProgressMeterHolder() {
_pm.finished();
}
- ProgressMeter* operator->(){
+ ProgressMeter* operator->() {
return &_pm;
}
- bool hit( int n = 1 ){
+ bool hit( int n = 1 ) {
return _pm.hit( n );
}
- void finished(){
+ void finished() {
_pm.finished();
}
-
- bool operator==( const ProgressMeter& other ){
+
+ bool operator==( const ProgressMeter& other ) {
return _pm == other;
}
-
+
private:
ProgressMeter& _pm;
};
@@ -349,11 +350,11 @@ namespace mongo {
_outof = num;
_num = num;
}
-
- bool tryAcquire(){
+
+ bool tryAcquire() {
scoped_lock lk( _mutex );
- if ( _num <= 0 ){
- if ( _num < 0 ){
+ if ( _num <= 0 ) {
+ if ( _num < 0 ) {
cerr << "DISASTER! in TicketHolder" << endl;
}
return false;
@@ -361,20 +362,20 @@ namespace mongo {
_num--;
return true;
}
-
- void release(){
+
+ void release() {
scoped_lock lk( _mutex );
_num++;
}
- void resize( int newSize ){
- scoped_lock lk( _mutex );
+ void resize( int newSize ) {
+ scoped_lock lk( _mutex );
int used = _outof - _num;
- if ( used > newSize ){
+ if ( used > newSize ) {
cout << "ERROR: can't resize since we're using (" << used << ") more than newSize(" << newSize << ")" << endl;
return;
}
-
+
_outof = newSize;
_num = _outof - used;
}
@@ -397,11 +398,11 @@ namespace mongo {
class TicketHolderReleaser {
public:
- TicketHolderReleaser( TicketHolder * holder ){
+ TicketHolderReleaser( TicketHolder * holder ) {
_holder = holder;
}
-
- ~TicketHolderReleaser(){
+
+ ~TicketHolderReleaser() {
_holder->release();
}
private:
@@ -416,26 +417,26 @@ namespace mongo {
class ThreadSafeString {
public:
ThreadSafeString( size_t size=256 )
- : _size( 256 ) , _buf( new char[256] ){
+ : _size( 256 ) , _buf( new char[256] ) {
memset( _buf , 0 , _size );
}
ThreadSafeString( const ThreadSafeString& other )
- : _size( other._size ) , _buf( new char[_size] ){
+ : _size( other._size ) , _buf( new char[_size] ) {
strncpy( _buf , other._buf , _size );
}
- ~ThreadSafeString(){
+ ~ThreadSafeString() {
delete[] _buf;
_buf = 0;
}
-
+
string toString() const {
string s = _buf;
return s;
}
- ThreadSafeString& operator=( const char * str ){
+ ThreadSafeString& operator=( const char * str ) {
size_t s = strlen(str);
if ( s >= _size - 2 )
s = _size - 2;
@@ -443,7 +444,7 @@ namespace mongo {
_buf[s] = 0;
return *this;
}
-
+
bool operator==( const ThreadSafeString& other ) const {
return strcmp( _buf , other._buf ) == 0;
}
@@ -462,7 +463,7 @@ namespace mongo {
private:
size_t _size;
- char * _buf;
+ char * _buf;
};
ostream& operator<<( ostream &s, const ThreadSafeString &o );
@@ -484,7 +485,7 @@ namespace mongo {
}
return x;
}
-
+
// for convenience, '{' is greater than anything and stops number parsing
inline int lexNumCmp( const char *s1, const char *s2 ) {
//cout << "START : " << s1 << "\t" << s2 << endl;
@@ -497,10 +498,10 @@ namespace mongo {
return 1;
if ( p2 && !p1 )
return -1;
-
+
bool n1 = isNumber( *s1 );
bool n2 = isNumber( *s2 );
-
+
if ( n1 && n2 ) {
// get rid of leading 0s
while ( *s1 == '0' ) s1++;
@@ -534,24 +535,24 @@ namespace mongo {
s1 = e1;
s2 = e2;
continue;
- }
-
- if ( n1 )
+ }
+
+ if ( n1 )
return 1;
-
- if ( n2 )
+
+ if ( n2 )
return -1;
-
+
if ( *s1 > *s2 )
return 1;
-
+
if ( *s2 > *s1 )
return -1;
-
+
s1++; s2++;
}
-
- if ( *s1 )
+
+ if ( *s1 )
return 1;
if ( *s2 )
return -1;
@@ -565,8 +566,8 @@ namespace mongo {
* ptr<const T> => T const * or const T*
*/
template <typename T>
- struct ptr{
-
+ struct ptr {
+
ptr() : _p(NULL) {}
// convert to ptr<T>
@@ -576,7 +577,7 @@ namespace mongo {
template<typename U> ptr(const boost::shared_ptr<U>& p) : _p(p.get()) {}
template<typename U> ptr(const boost::scoped_ptr<U>& p) : _p(p.get()) {}
//template<typename U> ptr(const auto_ptr<U>& p) : _p(p.get()) {}
-
+
// assign to ptr<T>
ptr& operator= (T* p) { _p = p; return *this; } // needed for NULL
template<typename U> ptr& operator= (U* p) { _p = p; return *this; }
diff --git a/util/hashtab.h b/util/hashtab.h
index 4f25aa74318..6818befb24a 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -37,8 +37,8 @@ namespace mongo {
template <
class Key,
- class Type
- >
+ class Type
+ >
class HashTable : boost::noncopyable {
public:
const char *name;
@@ -57,9 +57,9 @@ namespace mongo {
int n;
int maxChain;
- Node& nodes(int i) {
+ Node& nodes(int i) {
Node *nodes = (Node *) _buf;
- return nodes[i];
+ return nodes[i];
}
int _find(const Key& k, bool& found) {
@@ -88,7 +88,7 @@ namespace mongo {
out() << "error: hashtable " << name << " is full n:" << n << endl;
return -1;
}
- if( chain >= maxChain ) {
+ if( chain >= maxChain ) {
if ( firstNonUsed >= 0 )
return firstNonUsed;
out() << "error: hashtable " << name << " max chain reached:" << maxChain << endl;
@@ -109,7 +109,7 @@ namespace mongo {
_buf = buf;
//nodes = (Node *) buf;
- if ( sizeof(Node) != 628 ){
+ if ( sizeof(Node) != 628 ) {
out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << " sizeof(Key): " << sizeof(Key) << " sizeof(Type):" << sizeof(Type) << endl;
assert( sizeof(Node) == 628 );
}
@@ -152,10 +152,10 @@ namespace mongo {
n->value = value;
return true;
}
-
+
typedef void (*IteratorCallback)( const Key& k , Type& v );
- void iterAll( IteratorCallback callback ){
- for ( int i=0; i<n; i++ ){
+ void iterAll( IteratorCallback callback ) {
+ for ( int i=0; i<n; i++ ) {
if ( ! nodes(i).inUse() )
continue;
callback( nodes(i).k , nodes(i).value );
@@ -164,14 +164,14 @@ namespace mongo {
// TODO: should probably use boost::bind for this, but didn't want to look at it
typedef void (*IteratorCallback2)( const Key& k , Type& v , void * extra );
- void iterAll( IteratorCallback2 callback , void * extra ){
- for ( int i=0; i<n; i++ ){
+ void iterAll( IteratorCallback2 callback , void * extra ) {
+ for ( int i=0; i<n; i++ ) {
if ( ! nodes(i).inUse() )
continue;
callback( nodes(i).k , nodes(i).value , extra );
}
}
-
+
};
#pragma pack()
diff --git a/util/heapcheck.h b/util/heapcheck.h
index e8c87bdc672..95da9538db5 100644
--- a/util/heapcheck.h
+++ b/util/heapcheck.h
@@ -25,9 +25,9 @@
#define IGNORE_OBJECT( a ) HeapLeakChecker::IgnoreObject( a )
#define UNIGNORE_OBJECT( a ) HeapLeakChecker::UnIgnoreObject( a )
-#else
+#else
#define IGNORE_OBJECT( a )
-#define UNIGNORE_OBJECT( a )
+#define UNIGNORE_OBJECT( a )
#endif
diff --git a/util/hex.h b/util/hex.h
index c1c551999f1..8cf30f2d9d3 100644
--- a/util/hex.h
+++ b/util/hex.h
@@ -33,12 +33,12 @@ namespace mongo {
return (char)(( fromHex( c[ 0 ] ) << 4 ) | fromHex( c[ 1 ] ));
}
- inline string toHex(const void* inRaw, int len){
+ inline string toHex(const void* inRaw, int len) {
static const char hexchars[] = "0123456789ABCDEF";
StringBuilder out;
const char* in = reinterpret_cast<const char*>(inRaw);
- for (int i=0; i<len; ++i){
+ for (int i=0; i<len; ++i) {
char c = in[i];
char hi = hexchars[(c & 0xF0) >> 4];
char lo = hexchars[(c & 0x0F)];
@@ -48,13 +48,13 @@ namespace mongo {
return out.str();
}
-
- inline string toHexLower(const void* inRaw, int len){
+
+ inline string toHexLower(const void* inRaw, int len) {
static const char hexchars[] = "0123456789abcdef";
StringBuilder out;
const char* in = reinterpret_cast<const char*>(inRaw);
- for (int i=0; i<len; ++i){
+ for (int i=0; i<len; ++i) {
char c = in[i];
char hi = hexchars[(c & 0xF0) >> 4];
char lo = hexchars[(c & 0x0F)];
diff --git a/util/histogram.cpp b/util/histogram.cpp
index 4541dfde16d..17a85059d58 100644
--- a/util/histogram.cpp
+++ b/util/histogram.cpp
@@ -28,30 +28,31 @@ namespace mongo {
using std::setfill;
using std::setw;
- Histogram::Histogram( const Options& opts )
+ Histogram::Histogram( const Options& opts )
: _initialValue( opts.initialValue )
, _numBuckets( opts.numBuckets )
, _boundaries( new uint32_t[_numBuckets] )
- , _buckets( new uint64_t[_numBuckets] ){
+ , _buckets( new uint64_t[_numBuckets] ) {
// TODO more sanity checks
// + not too few buckets
// + initialBucket and bucketSize fit within 32 bit ints
// _boundaries store the maximum value falling in that bucket.
- if ( opts.exponential ){
+ if ( opts.exponential ) {
uint32_t twoPow = 1; // 2^0
- for ( uint32_t i = 0; i < _numBuckets - 1; i++){
+ for ( uint32_t i = 0; i < _numBuckets - 1; i++) {
_boundaries[i] = _initialValue + opts.bucketSize * twoPow;
twoPow *= 2; // 2^i+1
}
- } else {
+ }
+ else {
_boundaries[0] = _initialValue + opts.bucketSize;
- for ( uint32_t i = 1; i < _numBuckets - 1; i++ ){
+ for ( uint32_t i = 1; i < _numBuckets - 1; i++ ) {
_boundaries[i] = _boundaries[ i-1 ] + opts.bucketSize;
}
}
- _boundaries[ _numBuckets-1 ] = std::numeric_limits<uint32_t>::max();
+ _boundaries[ _numBuckets-1 ] = std::numeric_limits<uint32_t>::max();
for ( uint32_t i = 0; i < _numBuckets; i++ ) {
_buckets[i] = 0;
@@ -63,16 +64,16 @@ namespace mongo {
delete [] _buckets;
}
- void Histogram::insert( uint32_t element ){
+ void Histogram::insert( uint32_t element ) {
if ( element < _initialValue) return;
_buckets[ _findBucket(element) ] += 1;
}
- string Histogram::toHTML() const{
+ string Histogram::toHTML() const {
uint64_t max = 0;
- for ( uint32_t i = 0; i < _numBuckets; i++ ){
- if ( _buckets[i] > max ){
+ for ( uint32_t i = 0; i < _numBuckets; i++ ) {
+ if ( _buckets[i] > max ) {
max = _buckets[i];
}
}
@@ -83,10 +84,10 @@ namespace mongo {
// normalize buckets to max
const int maxBar = 20;
ostringstream ss;
- for ( uint32_t i = 0; i < _numBuckets; i++ ){
+ for ( uint32_t i = 0; i < _numBuckets; i++ ) {
int barSize = _buckets[i] * maxBar / max;
- ss << string( barSize,'*' )
- << setfill(' ') << setw( maxBar-barSize + 12 )
+ ss << string( barSize,'*' )
+ << setfill(' ') << setw( maxBar-barSize + 12 )
<< _boundaries[i] << '\n';
}
@@ -109,21 +110,22 @@ namespace mongo {
return _numBuckets;
}
- uint32_t Histogram::_findBucket( uint32_t element ) const{
+ uint32_t Histogram::_findBucket( uint32_t element ) const {
// TODO assert not too small a value?
uint32_t low = 0;
uint32_t high = _numBuckets - 1;
- while ( low < high ){
+ while ( low < high ) {
// low + ( (high - low) / 2 );
uint32_t mid = ( low + high ) >> 1;
- if ( element > _boundaries[ mid ] ){
+ if ( element > _boundaries[ mid ] ) {
low = mid + 1;
- } else {
+ }
+ else {
high = mid;
}
}
return low;
- }
+ }
} // namespace mongo
diff --git a/util/histogram.h b/util/histogram.h
index d4a6fa78234..40ec5628dda 100644
--- a/util/histogram.h
+++ b/util/histogram.h
@@ -65,12 +65,12 @@ namespace mongo {
// use exponential buckets?
bool exponential;
-
- Options()
+
+ Options()
: numBuckets(0)
, bucketSize(0)
, initialValue(0)
- , exponential(false){}
+ , exponential(false) {}
};
explicit Histogram( const Options& opts );
~Histogram();
@@ -103,7 +103,7 @@ namespace mongo {
* Return the number of buckets in this histogram.
*/
boost::uint32_t getBucketsNum() const;
-
+
private:
/**
* Returns the bucket where 'element' should fall
diff --git a/util/hostandport.h b/util/hostandport.h
index 714f44ef267..fd2729609cf 100644
--- a/util/hostandport.h
+++ b/util/hostandport.h
@@ -20,17 +20,17 @@
#include "sock.h"
#include "../db/cmdline.h"
#include "mongoutils/str.h"
-
-namespace mongo {
+
+namespace mongo {
using namespace mongoutils;
- /** helper for manipulating host:port connection endpoints.
+ /** helper for manipulating host:port connection endpoints.
*/
- struct HostAndPort {
+ struct HostAndPort {
HostAndPort() : _port(-1) { }
- /** From a string hostname[:portnumber]
+ /** From a string hostname[:portnumber]
Throws user assertion if bad config string or bad port #.
*/
HostAndPort(string s);
@@ -38,11 +38,11 @@ namespace mongo {
/** @param p port number. -1 is ok to use default. */
HostAndPort(string h, int p /*= -1*/) : _host(h), _port(p) { }
- HostAndPort(const SockAddr& sock )
- : _host( sock.getAddr() ) , _port( sock.getPort() ){
+ HostAndPort(const SockAddr& sock )
+ : _host( sock.getAddr() ) , _port( sock.getPort() ) {
}
- static HostAndPort me() {
+ static HostAndPort me() {
return HostAndPort("localhost", cmdLine.port);
}
@@ -50,7 +50,7 @@ namespace mongo {
static HostAndPort Me();
bool operator<(const HostAndPort& r) const {
- if( _host < r._host )
+ if( _host < r._host )
return true;
if( _host == r._host )
return port() < r.port();
@@ -71,7 +71,7 @@ namespace mongo {
bool isLocalHost() const;
// @returns host:port
- string toString() const;
+ string toString() const;
operator string() const { return toString(); }
@@ -96,7 +96,8 @@ namespace mongo {
if (prefixLen == a.size()) { // (a == b) or (a isPrefixOf b)
if ( b[prefixLen] == '.' || b[prefixLen] == '\0')
return true;
- } else if(prefixLen == b.size()) { // (b isPrefixOf a)
+ }
+ else if(prefixLen == b.size()) { // (b isPrefixOf a)
if ( a[prefixLen] == '.') // can't be '\0'
return true;
}
@@ -104,15 +105,16 @@ namespace mongo {
return false;
}
- inline HostAndPort HostAndPort::Me() {
+ inline HostAndPort HostAndPort::Me() {
const char* ips = cmdLine.bind_ip.c_str();
- while(*ips){
+ while(*ips) {
string ip;
const char * comma = strchr(ips, ',');
- if (comma){
+ if (comma) {
ip = string(ips, comma - ips);
ips = comma + 1;
- }else{
+ }
+ else {
ip = string(ips);
ips = "";
}
@@ -121,7 +123,7 @@ namespace mongo {
return h;
}
}
-
+
string h = getHostName();
assert( !h.empty() );
assert( h != "localhost" );
@@ -131,10 +133,10 @@ namespace mongo {
inline string HostAndPort::toString() const {
stringstream ss;
ss << _host;
- if ( _port != -1 ){
+ if ( _port != -1 ) {
ss << ':';
#if defined(_DEBUG)
- if( _port >= 44000 && _port < 44100 ) {
+ if( _port >= 44000 && _port < 44100 ) {
log() << "warning: special debug port 44xxx used" << endl;
ss << _port+1;
}
@@ -147,7 +149,7 @@ namespace mongo {
return ss.str();
}
- inline bool HostAndPort::isLocalHost() const {
+ inline bool HostAndPort::isLocalHost() const {
return _host == "localhost" || startsWith(_host.c_str(), "127.") || _host == "::1";
}
diff --git a/util/httpclient.cpp b/util/httpclient.cpp
index 4f78029f36e..61d5671fc31 100644
--- a/util/httpclient.cpp
+++ b/util/httpclient.cpp
@@ -27,20 +27,20 @@ namespace mongo {
#define HD(x)
- int HttpClient::get( string url , Result * result ){
+ int HttpClient::get( string url , Result * result ) {
return _go( "GET" , url , 0 , result );
}
- int HttpClient::post( string url , string data , Result * result ){
+ int HttpClient::post( string url , string data , Result * result ) {
return _go( "POST" , url , data.c_str() , result );
- }
+ }
- int HttpClient::_go( const char * command , string url , const char * body , Result * result ){
+ int HttpClient::_go( const char * command , string url , const char * body , Result * result ) {
uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
url = url.substr( 7 );
-
+
string host , path;
- if ( url.find( "/" ) == string::npos ){
+ if ( url.find( "/" ) == string::npos ) {
host = url;
path = "/";
}
@@ -49,15 +49,15 @@ namespace mongo {
path = url.substr( url.find( "/" ) );
}
-
+
HD( "host [" << host << "]" );
HD( "path [" << path << "]" );
string server = host;
int port = 80;
-
+
string::size_type idx = host.find( ":" );
- if ( idx != string::npos ){
+ if ( idx != string::npos ) {
server = host.substr( 0 , idx );
string t = host.substr( idx + 1 );
port = atoi( t.c_str() );
@@ -65,7 +65,7 @@ namespace mongo {
HD( "server [" << server << "]" );
HD( "port [" << port << "]" );
-
+
string req;
{
stringstream ss;
@@ -83,20 +83,20 @@ namespace mongo {
req = ss.str();
}
-
+
SockAddr addr( server.c_str() , port );
HD( "addr: " << addr.toString() );
-
+
MessagingPort p;
if ( ! p.connect( addr ) )
return -1;
-
- {
+
+ {
const char * out = req.c_str();
int toSend = req.size();
p.send( out , toSend, "_go" );
}
-
+
char buf[4096];
int got = p.unsafe_recv( buf , 4096 );
buf[got] = 0;
@@ -105,46 +105,46 @@ namespace mongo {
char version[32];
assert( sscanf( buf , "%s %d" , version , &rc ) == 2 );
HD( "rc: " << rc );
-
+
StringBuilder sb;
if ( result )
sb << buf;
-
- while ( ( got = p.unsafe_recv( buf , 4096 ) ) > 0){
+
+ while ( ( got = p.unsafe_recv( buf , 4096 ) ) > 0) {
if ( result )
sb << buf;
}
- if ( result ){
+ if ( result ) {
result->_init( rc , sb.str() );
}
return rc;
}
- void HttpClient::Result::_init( int code , string entire ){
+ void HttpClient::Result::_init( int code , string entire ) {
_code = code;
_entireResponse = entire;
- while ( true ){
+ while ( true ) {
size_t i = entire.find( '\n' );
- if ( i == string::npos ){
+ if ( i == string::npos ) {
// invalid
break;
}
-
+
string h = entire.substr( 0 , i );
entire = entire.substr( i + 1 );
-
+
if ( h.size() && h[h.size()-1] == '\r' )
h = h.substr( 0 , h.size() - 1 );
if ( h.size() == 0 )
break;
}
-
+
_body = entire;
}
-
+
}
diff --git a/util/httpclient.h b/util/httpclient.h
index 8b9da9737c9..d66544e150c 100644
--- a/util/httpclient.h
+++ b/util/httpclient.h
@@ -20,28 +20,28 @@
#include "../pch.h"
namespace mongo {
-
+
class HttpClient {
public:
-
+
class Result {
public:
- Result(){}
-
+ Result() {}
+
const string& getEntireResponse() const {
return _entireResponse;
}
-
+
const map<string,string> getHeaders() const {
return _headers;
}
-
+
const string& getBody() const {
return _body;
}
-
+
private:
-
+
void _init( int code , string entire );
int _code;
@@ -49,10 +49,10 @@ namespace mongo {
map<string,string> _headers;
string _body;
-
+
friend class HttpClient;
};
-
+
/**
* @return response code
*/
@@ -65,7 +65,7 @@ namespace mongo {
private:
int _go( const char * command , string url , const char * body , Result * result );
-
+
};
}
diff --git a/util/log.cpp b/util/log.cpp
index 104b97623aa..eb1cbae1efc 100644
--- a/util/log.cpp
+++ b/util/log.cpp
@@ -42,53 +42,54 @@ namespace mongo {
LoggingManager()
: _enabled(0) , _file(0) {
}
-
- void start( const string& lp , bool append ){
+
+ void start( const string& lp , bool append ) {
uassert( 10268 , "LoggingManager already started" , ! _enabled );
_append = append;
// test path
FILE * test = fopen( lp.c_str() , _append ? "a" : "w" );
- if ( ! test ){
- if (boost::filesystem::is_directory(lp)){
+ if ( ! test ) {
+ if (boost::filesystem::is_directory(lp)) {
cout << "logpath [" << lp << "] should be a file name not a directory" << endl;
- } else {
+ }
+ else {
cout << "can't open [" << lp << "] for log file: " << errnoWithDescription() << endl;
}
dbexit( EXIT_BADOPTIONS );
assert( 0 );
}
fclose( test );
-
+
_path = lp;
_enabled = 1;
rotate();
}
-
- void rotate(){
- if ( ! _enabled ){
+
+ void rotate() {
+ if ( ! _enabled ) {
cout << "LoggingManager not enabled" << endl;
return;
}
- if ( _file ){
+ if ( _file ) {
#ifdef _WIN32
cout << "log rotation doesn't work on windows" << endl;
return;
#else
struct tm t;
localtime_r( &_opened , &t );
-
+
stringstream ss;
ss << _path << "." << terseCurrentTime(false);
string s = ss.str();
rename( _path.c_str() , s.c_str() );
#endif
}
-
-
+
+
FILE* tmp = freopen(_path.c_str(), (_append ? "a" : "w"), stdout);
- if (!tmp){
+ if (!tmp) {
cerr << "can't open: " << _path.c_str() << " for log file" << endl;
dbexit( EXIT_BADOPTIONS );
assert(0);
@@ -99,24 +100,24 @@ namespace mongo {
_file = tmp;
_opened = time(0);
}
-
+
private:
-
+
bool _enabled;
string _path;
bool _append;
-
+
FILE * _file;
time_t _opened;
-
+
} loggingManager;
- void initLogging( const string& lp , bool append ){
+ void initLogging( const string& lp , bool append ) {
cout << "all output going to: " << lp << endl;
loggingManager.start( lp , append );
}
- void rotateLogs( int signal ){
+ void rotateLogs( int signal ) {
loggingManager.rotate();
}
diff --git a/util/log.h b/util/log.h
index 8e5cb2496e4..fd52b3dc9b6 100644
--- a/util/log.h
+++ b/util/log.h
@@ -28,24 +28,24 @@
namespace mongo {
enum LogLevel { LL_DEBUG , LL_INFO , LL_NOTICE , LL_WARNING , LL_ERROR , LL_SEVERE };
-
- inline const char * logLevelToString( LogLevel l ){
- switch ( l ){
+
+ inline const char * logLevelToString( LogLevel l ) {
+ switch ( l ) {
case LL_DEBUG:
- case LL_INFO:
+ case LL_INFO:
case LL_NOTICE:
return "";
- case LL_WARNING:
- return "warning" ;
- case LL_ERROR:
+ case LL_WARNING:
+ return "warning" ;
+ case LL_ERROR:
return "ERROR";
- case LL_SEVERE:
+ case LL_SEVERE:
return "SEVERE";
default:
return "UNKNOWN";
}
}
-
+
class LazyString {
public:
virtual ~LazyString() {}
@@ -62,15 +62,15 @@ namespace mongo {
const T& t_;
};
- class Tee {
+ class Tee {
public:
- virtual ~Tee(){}
+ virtual ~Tee() {}
virtual void write(LogLevel level , const string& str) = 0;
};
class Nullstream {
public:
- virtual Nullstream& operator<< (Tee* tee) {
+ virtual Nullstream& operator<< (Tee* tee) {
return *this;
}
virtual ~Nullstream() {}
@@ -128,13 +128,13 @@ namespace mongo {
template< class T >
Nullstream& operator<<(T *t) {
return operator<<( static_cast<void*>( t ) );
- }
+ }
template< class T >
Nullstream& operator<<(const T *t) {
return operator<<( static_cast<const void*>( t ) );
- }
+ }
template< class T >
- Nullstream& operator<<(const shared_ptr<T> p ){
+ Nullstream& operator<<(const shared_ptr<T> p ) {
return *this;
}
template< class T >
@@ -150,7 +150,7 @@ namespace mongo {
virtual void flush(Tee *t = 0) {}
};
extern Nullstream nullstream;
-
+
class Logstream : public Nullstream {
static mongo::mutex mutex;
static int doneSetup;
@@ -161,13 +161,13 @@ namespace mongo {
static vector<Tee*> * globalTees;
public:
inline static void logLockless( const StringData& s );
-
- static void setLogFile(FILE* f){
+
+ static void setLogFile(FILE* f) {
scoped_lock lk(mutex);
logfile = f;
}
- static int magicNumber(){
+ static int magicNumber() {
return 1717;
}
@@ -184,8 +184,8 @@ namespace mongo {
}
inline void flush(Tee *t = 0);
-
- inline Nullstream& setLogLevel(LogLevel l){
+
+ inline Nullstream& setLogLevel(LogLevel l) {
logLevel = l;
return *this;
}
@@ -212,7 +212,7 @@ namespace mongo {
ss << x.val();
return *this;
}
- Nullstream& operator<< (Tee* tee) {
+ Nullstream& operator<< (Tee* tee) {
ss << '\n';
flush(tee);
return *this;
@@ -228,11 +228,11 @@ namespace mongo {
}
template< class T >
- Nullstream& operator<<(const shared_ptr<T> p ){
+ Nullstream& operator<<(const shared_ptr<T> p ) {
T * t = p.get();
if ( ! t )
*this << "null";
- else
+ else
*this << *t;
return *this;
}
@@ -240,8 +240,8 @@ namespace mongo {
Logstream& prolog() {
return *this;
}
-
- void addGlobalTee( Tee * t ){
+
+ void addGlobalTee( Tee * t ) {
if ( ! globalTees )
globalTees = new vector<Tee*>();
globalTees->push_back( t );
@@ -249,10 +249,10 @@ namespace mongo {
private:
static thread_specific_ptr<Logstream> tsp;
- Logstream(){
+ Logstream() {
_init();
}
- void _init(){
+ void _init() {
ss.str("");
logLevel = LL_INFO;
}
@@ -273,16 +273,16 @@ namespace mongo {
return nullstream;
return Logstream::get();
}
-
- /* flush the log stream if the log level is
+
+ /* flush the log stream if the log level is
at the specified level or higher. */
- inline void logflush(int level = 0) {
+ inline void logflush(int level = 0) {
if( level > logLevel )
Logstream::get().flush(0);
}
/* without prolog */
- inline Nullstream& _log( int level = 0 ){
+ inline Nullstream& _log( int level = 0 ) {
if ( level > logLevel )
return nullstream;
return Logstream::get();
@@ -313,7 +313,7 @@ namespace mongo {
inline Nullstream& log() {
return Logstream::get().prolog();
}
-
+
inline Nullstream& error() {
return log( LL_ERROR );
}
@@ -335,7 +335,7 @@ namespace mongo {
/**
log to a file rather than stdout
- defined in assert_util.cpp
+ defined in assert_util.cpp
*/
void initLogging( const string& logpath , bool append );
void rotateLogs( int signal = 0 );
@@ -351,7 +351,7 @@ namespace mongo {
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
|FORMAT_MESSAGE_ALLOCATE_BUFFER
- |FORMAT_MESSAGE_IGNORE_INSERTS,
+ |FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
x, 0,
(LPTSTR) &errorText, // output
@@ -360,7 +360,7 @@ namespace mongo {
if( errorText ) {
string x = toUtf8String(errorText);
for( string::iterator i = x.begin(); i != x.end(); i++ ) {
- if( *i == '\n' || *i == '\r' )
+ if( *i == '\n' || *i == '\r' )
break;
s << *i;
}
@@ -369,11 +369,11 @@ namespace mongo {
else
s << strerror(x);
/*
- DWORD n = FormatMessage(
- FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
+ DWORD n = FormatMessage(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL, x,
+ NULL, x,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &lpMsgBuf, 0, NULL);
*/
@@ -383,21 +383,21 @@ namespace mongo {
return s.str();
}
- /** output the error # and error message with prefix.
+ /** output the error # and error message with prefix.
handy for use as parm in uassert/massert.
*/
string errnoWithPrefix( const char * prefix );
- void Logstream::logLockless( const StringData& s ){
+ void Logstream::logLockless( const StringData& s ) {
if ( s.size() == 0 )
return;
- if ( doneSetup == 1717 ){
- if (fwrite(s.data(), s.size(), 1, logfile)){
+ if ( doneSetup == 1717 ) {
+ if (fwrite(s.data(), s.size(), 1, logfile)) {
fflush(logfile);
}
- else{
+ else {
int x = errno;
cout << "Failed to write to logfile: " << errnoWithDescription(x) << endl;
}
@@ -422,13 +422,13 @@ namespace mongo {
BufBuilder b(bufSize);
time_t_to_String( time(0) , b.grow(20) );
- if (!threadName.empty()){
+ if (!threadName.empty()) {
b.appendChar( '[' );
b.appendStr( threadName , false );
b.appendChar( ']' );
b.appendChar( ' ' );
}
- if ( type[0] ){
+ if ( type[0] ) {
b.appendStr( type , false );
b.appendStr( ": " , false );
}
@@ -439,7 +439,7 @@ namespace mongo {
scoped_lock lk(mutex);
if( t ) t->write(logLevel,out);
- if ( globalTees ){
+ if ( globalTees ) {
for ( unsigned i=0; i<globalTees->size(); i++ )
(*globalTees)[i]->write(logLevel,out);
}
@@ -447,9 +447,10 @@ namespace mongo {
#ifndef _WIN32
//syslog( LOG_INFO , "%s" , cc );
#endif
- if(fwrite(out.data(), out.size(), 1, logfile)){
+ if(fwrite(out.data(), out.size(), 1, logfile)) {
fflush(logfile);
- }else{
+ }
+ else {
int x = errno;
cout << "Failed to write to logfile: " << errnoWithDescription(x) << ": " << out << endl;
}
diff --git a/util/logfile.cpp b/util/logfile.cpp
index 86d13370b2b..44b0879c7c0 100644
--- a/util/logfile.cpp
+++ b/util/logfile.cpp
@@ -25,11 +25,11 @@
using namespace mongoutils;
namespace mongo {
- struct LogfileTest : public UnitTest {
- LogfileTest() { }
- void run() {
+ struct LogfileTest : public UnitTest {
+ LogfileTest() { }
+ void run() {
if( 0 && debug ) {
- try {
+ try {
LogFile f("logfile_test");
void *p = malloc(16384);
char *buf = (char*) p;
@@ -43,7 +43,7 @@ namespace mongo {
f.synchronousAppend(buf, 8192);
free(p);
}
- catch(DBException& e ) {
+ catch(DBException& e ) {
log() << "logfile.cpp test failed : " << e.what() << endl;
throw;
}
@@ -54,17 +54,17 @@ namespace mongo {
#if defined(_WIN32)
-namespace mongo {
+namespace mongo {
LogFile::LogFile(string name) : _name(name) {
_fd = CreateFile(
- toNativeString(name.c_str()).c_str(),
- GENERIC_WRITE,
- FILE_SHARE_READ,
- NULL,
- CREATE_NEW, //OPEN_ALWAYS,
- FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH,
- NULL);
+ toNativeString(name.c_str()).c_str(),
+ GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ CREATE_NEW, //OPEN_ALWAYS,
+ FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH,
+ NULL);
if( _fd == INVALID_HANDLE_VALUE ) {
DWORD e = GetLastError();
uasserted(13518, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription(e));
@@ -72,22 +72,22 @@ namespace mongo {
SetFilePointer(_fd, 0, 0, FILE_END);
}
- LogFile::~LogFile() {
+ LogFile::~LogFile() {
if( _fd != INVALID_HANDLE_VALUE )
CloseHandle(_fd);
}
- void LogFile::synchronousAppend(const void *buf, size_t len) {
+ void LogFile::synchronousAppend(const void *buf, size_t len) {
assert(_fd);
DWORD written;
- if( !WriteFile(_fd, buf, len, &written, NULL) ) {
+ if( !WriteFile(_fd, buf, len, &written, NULL) ) {
DWORD e = GetLastError();
if( e == 87 )
massert(13519, "error appending to file - misaligned direct write?", false);
else
uasserted(13517, str::stream() << "error appending to file " << errnoWithDescription(e));
}
- else {
+ else {
dassert( written == len );
}
}
@@ -100,18 +100,18 @@ namespace mongo {
#include <sys/stat.h>
#include <fcntl.h>
-namespace mongo {
+namespace mongo {
LogFile::LogFile(string name) : _name(name) {
- _fd = open(name.c_str(),
- O_APPEND
+ _fd = open(name.c_str(),
+ O_APPEND
| O_CREAT | O_EXCL
| O_RDWR
#if defined(O_DIRECT)
| O_DIRECT
#endif
#if defined(O_NOATIME)
- | O_NOATIME
+ | O_NOATIME
#endif
#if defined(O_SYNC)
| O_SYNC
@@ -123,7 +123,7 @@ namespace mongo {
}
}
- LogFile::~LogFile() {
+ LogFile::~LogFile() {
if( _fd >= 0 )
close(_fd);
_fd = -1;
@@ -133,17 +133,17 @@ namespace mongo {
const char *buf = (char *) b;
assert(_fd);
assert(((size_t)buf)%4096==0); // aligned
- if( len % 4096 != 0 ) {
+ if( len % 4096 != 0 ) {
log() << len << ' ' << len % 4096 << endl;
assert(false);
}
ssize_t written = write(_fd, buf, len);
- if( written != (ssize_t) len ) {
+ if( written != (ssize_t) len ) {
log() << "write fails written:" << written << " len:" << len << " errno:" << errno << endl;
uasserted(13515, str::stream() << "error appending to file " << _fd << errnoWithDescription());
}
#if !defined(O_SYNC)
- if( fdatasync(_fd) < 0 ) {
+ if( fdatasync(_fd) < 0 ) {
uasserted(13514, str::stream() << "error appending to file on fsync " << errnoWithDescription());
}
#endif
diff --git a/util/logfile.h b/util/logfile.h
index 11e7a4bddad..9085161aee8 100644
--- a/util/logfile.h
+++ b/util/logfile.h
@@ -18,17 +18,17 @@
#pragma once
-namespace mongo {
+namespace mongo {
- class LogFile {
+ class LogFile {
public:
- /** create the file and open. must not already exist.
+ /** create the file and open. must not already exist.
throws UserAssertion on i/o error
*/
LogFile(string name);
/** closes */
- ~LogFile();
+ ~LogFile();
/** append to file. does not return until sync'd. uses direct i/o when possible.
throws UserAssertion on an i/o error
diff --git a/util/lruishmap.h b/util/lruishmap.h
index fe8b1dc4873..ba91bf6f0f6 100644
--- a/util/lruishmap.h
+++ b/util/lruishmap.h
@@ -23,8 +23,8 @@
namespace mongo {
/* Your K object must define:
- int hash() - must always return > 0.
- operator==
+ int hash() - must always return > 0.
+ operator==
*/
template <class K, class V, int MaxChain>
diff --git a/util/md5.h b/util/md5.h
index d00123414ae..a3f3b6db0e2 100644
--- a/util/md5.h
+++ b/util/md5.h
@@ -27,7 +27,7 @@
This code implements the MD5 Algorithm defined in RFC 1321, whose
text is available at
- http://www.ietf.org/rfc/rfc1321.txt
+ http://www.ietf.org/rfc/rfc1321.txt
The code is derived from the text of the RFC, including the test suite
(section A.5) but excluding the rest of Appendix A. It does not include
any code or documentation that is identified in the RFC as being
@@ -38,12 +38,12 @@
that follows (in reverse chronological order):
2002-04-13 lpd Removed support for non-ANSI compilers; removed
- references to Ghostscript; clarified derivation from RFC 1321;
- now handles byte order either statically or dynamically.
+ references to Ghostscript; clarified derivation from RFC 1321;
+ now handles byte order either statically or dynamically.
1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5);
- added conditionalization for C++ compilation from Martin
- Purschke <purschke@bnl.gov>.
+ added conditionalization for C++ compilation from Martin
+ Purschke <purschke@bnl.gov>.
1999-05-03 lpd Original version.
*/
@@ -65,9 +65,9 @@ typedef unsigned int md5_word_t; /* 32-bit word */
/* Define the state of the MD5 Algorithm. */
typedef struct md5_state_s {
- md5_word_t count[2]; /* message length in bits, lsw first */
- md5_word_t abcd[4]; /* digest buffer */
- md5_byte_t buf[64]; /* accumulate block */
+ md5_word_t count[2]; /* message length in bits, lsw first */
+ md5_word_t abcd[4]; /* digest buffer */
+ md5_byte_t buf[64]; /* accumulate block */
} md5_state_t;
#ifdef __cplusplus
diff --git a/util/md5main.cpp b/util/md5main.cpp
index 9c56f91b43c..9995fee8fa7 100644
--- a/util/md5main.cpp
+++ b/util/md5main.cpp
@@ -27,7 +27,7 @@
This code implements the MD5 Algorithm defined in RFC 1321, whose
text is available at
- http://www.ietf.org/rfc/rfc1321.txt
+ http://www.ietf.org/rfc/rfc1321.txt
The code is derived from the text of the RFC, including the test suite
(section A.5) but excluding the rest of Appendix A. It does not include
any code or documentation that is identified in the RFC as being
@@ -49,7 +49,7 @@
/*
* This file builds an executable that performs various functions related
* to the MD5 library. Typical compilation:
- * gcc -o md5main -lm md5main.c md5.c
+ * gcc -o md5main -lm md5main.c md5.c
*/
static const char *const usage = "\
Usage:\n\
@@ -63,62 +63,61 @@ static const char *const version = "2002-04-13";
/* Run the self-test. */
/*static*/ int
//do_test(void)
-do_md5_test(void)
-{
+do_md5_test(void) {
static const char *const test[7*2] = {
- "", "d41d8cd98f00b204e9800998ecf8427e",
- "a", "0cc175b9c0f1b6a831c399e269772661",
- "abc", "900150983cd24fb0d6963f7d28e17f72",
- "message digest", "f96b697d7cb7938d525a2f31aaf161d0",
- "abcdefghijklmnopqrstuvwxyz", "c3fcd3d76192e4007dfb496cca67e13b",
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
- "d174ab98d277d9f5a5611c2c9f419d9f",
- "12345678901234567890123456789012345678901234567890123456789012345678901234567890", "57edf4a22be3c955ac49da2e2107b67a"
+ "", "d41d8cd98f00b204e9800998ecf8427e",
+ "a", "0cc175b9c0f1b6a831c399e269772661",
+ "abc", "900150983cd24fb0d6963f7d28e17f72",
+ "message digest", "f96b697d7cb7938d525a2f31aaf161d0",
+ "abcdefghijklmnopqrstuvwxyz", "c3fcd3d76192e4007dfb496cca67e13b",
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+ "d174ab98d277d9f5a5611c2c9f419d9f",
+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890", "57edf4a22be3c955ac49da2e2107b67a"
};
int i;
int status = 0;
for (i = 0; i < 7*2; i += 2) {
- md5_state_t state;
- md5_byte_t digest[16];
- char hex_output[16*2 + 1];
- int di;
-
- md5_init(&state);
- md5_append(&state, (const md5_byte_t *)test[i], strlen(test[i]));
- md5_finish(&state, digest);
- for (di = 0; di < 16; ++di)
- sprintf(hex_output + di * 2, "%02x", digest[di]);
- if (strcmp(hex_output, test[i + 1])) {
- printf("MD5 (\"%s\") = ", test[i]);
- puts(hex_output);
- printf("**** ERROR, should be: %s\n", test[i + 1]);
- status = 1;
- }
+ md5_state_t state;
+ md5_byte_t digest[16];
+ char hex_output[16*2 + 1];
+ int di;
+
+ md5_init(&state);
+ md5_append(&state, (const md5_byte_t *)test[i], strlen(test[i]));
+ md5_finish(&state, digest);
+ for (di = 0; di < 16; ++di)
+ sprintf(hex_output + di * 2, "%02x", digest[di]);
+ if (strcmp(hex_output, test[i + 1])) {
+ printf("MD5 (\"%s\") = ", test[i]);
+ puts(hex_output);
+ printf("**** ERROR, should be: %s\n", test[i + 1]);
+ status = 1;
+ }
}
// if (status == 0)
-/*modified commented out: puts("md5 self-test completed successfully."); */
+ /*modified commented out: puts("md5 self-test completed successfully."); */
return status;
}
/* Print the T values. */
static int
-do_t_values(void)
-{
+do_t_values(void) {
int i;
for (i = 1; i <= 64; ++i) {
- unsigned long v = (unsigned long)(4294967296.0 * fabs(sin((double)i)));
-
- /*
- * The following nonsense is only to avoid compiler warnings about
- * "integer constant is unsigned in ANSI C, signed with -traditional".
- */
- if (v >> 31) {
- printf("#define T%d /* 0x%08lx */ (T_MASK ^ 0x%08lx)\n", i,
- v, (unsigned long)(unsigned int)(~v));
- } else {
- printf("#define T%d 0x%08lx\n", i, v);
- }
+ unsigned long v = (unsigned long)(4294967296.0 * fabs(sin((double)i)));
+
+ /*
+ * The following nonsense is only to avoid compiler warnings about
+ * "integer constant is unsigned in ANSI C, signed with -traditional".
+ */
+ if (v >> 31) {
+ printf("#define T%d /* 0x%08lx */ (T_MASK ^ 0x%08lx)\n", i,
+ v, (unsigned long)(unsigned int)(~v));
+ }
+ else {
+ printf("#define T%d 0x%08lx\n", i, v);
+ }
}
return 0;
}
@@ -126,17 +125,16 @@ do_t_values(void)
/* modified from original code changed function name main->md5main */
/* Main program */
int
-md5main(int argc, char *argv[])
-{
+md5main(int argc, char *argv[]) {
if (argc == 2) {
- if (!strcmp(argv[1], "--test"))
- return do_md5_test();
- if (!strcmp(argv[1], "--t-values"))
- return do_t_values();
- if (!strcmp(argv[1], "--version")) {
- puts(version);
- return 0;
- }
+ if (!strcmp(argv[1], "--test"))
+ return do_md5_test();
+ if (!strcmp(argv[1], "--t-values"))
+ return do_t_values();
+ if (!strcmp(argv[1], "--version")) {
+ puts(version);
+ return 0;
+ }
}
puts(usage);
return 0;
diff --git a/util/message.cpp b/util/message.cpp
index 48134feda4d..3f8f5035d98 100644
--- a/util/message.cpp
+++ b/util/message.cpp
@@ -50,7 +50,7 @@ namespace mongo {
bool objcheck = false;
void checkTicketNumbers();
-
+
// if you want trace output:
#define mmm(x)
@@ -64,9 +64,9 @@ namespace mongo {
const Listener* Listener::_timeTracker;
- vector<SockAddr> ipToAddrs(const char* ips, int port){
+ vector<SockAddr> ipToAddrs(const char* ips, int port) {
vector<SockAddr> out;
- if (*ips == '\0'){
+ if (*ips == '\0') {
out.push_back(SockAddr("0.0.0.0", port)); // IPv4 all
if (IPv6Enabled())
@@ -78,13 +78,14 @@ namespace mongo {
return out;
}
- while(*ips){
+ while(*ips) {
string ip;
const char * comma = strchr(ips, ',');
- if (comma){
+ if (comma) {
ip = string(ips, comma - ips);
ips = comma + 1;
- }else{
+ }
+ else {
ip = string(ips);
ips = "";
}
@@ -109,7 +110,7 @@ namespace mongo {
vector<int> socks;
SOCKET maxfd = 0; // needed for select()
- for (vector<SockAddr>::iterator it=mine.begin(), end=mine.end(); it != end; ++it){
+ for (vector<SockAddr>::iterator it=mine.begin(), end=mine.end(); it != end; ++it) {
SockAddr& me = *it;
SOCKET sock = ::socket(me.getType(), SOCK_STREAM, 0);
@@ -117,17 +118,18 @@ namespace mongo {
log() << "ERROR: listen(): invalid socket? " << errnoWithDescription() << endl;
}
- if (me.getType() == AF_UNIX){
+ if (me.getType() == AF_UNIX) {
#if !defined(_WIN32)
- if (unlink(me.getAddr().c_str()) == -1){
+ if (unlink(me.getAddr().c_str()) == -1) {
int x = errno;
- if (x != ENOENT){
+ if (x != ENOENT) {
log() << "couldn't unlink socket file " << me << errnoWithDescription(x) << " skipping" << endl;
continue;
}
}
#endif
- } else if (me.getType() == AF_INET6) {
+ }
+ else if (me.getType() == AF_INET6) {
// IPv6 can also accept IPv4 connections as mapped addresses (::ffff:127.0.0.1)
// That causes a conflict if we don't do set it to IPV6_ONLY
const int one = 1;
@@ -135,7 +137,7 @@ namespace mongo {
}
prebindOptions( sock );
-
+
if ( ::bind(sock, me.raw(), me.addressSize) != 0 ) {
int x = errno;
log() << "listen(): bind() failed " << errnoWithDescription(x) << " for socket: " << me.toString() << endl;
@@ -146,8 +148,8 @@ namespace mongo {
}
#if !defined(_WIN32)
- if (me.getType() == AF_UNIX){
- if (chmod(me.getAddr().c_str(), 0777) == -1){
+ if (me.getType() == AF_UNIX) {
+ if (chmod(me.getAddr().c_str(), 0777) == -1) {
log() << "couldn't chmod socket file " << me << errnoWithDescription() << endl;
}
@@ -174,15 +176,15 @@ namespace mongo {
fd_set fds[1];
FD_ZERO(fds);
- for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it){
+ for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
FD_SET(*it, fds);
}
maxSelectTime.tv_sec = 0;
maxSelectTime.tv_usec = 10000;
const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
-
- if (ret == 0){
+
+ if (ret == 0) {
#if defined(__linux__)
_elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
#else
@@ -191,11 +193,11 @@ namespace mongo {
continue;
}
_elapsedTime += ret; // assume 1ms to grab connection. very rough
-
- if (ret < 0){
+
+ if (ret < 0) {
int x = errno;
#ifdef EINTR
- if ( x == EINTR ){
+ if ( x == EINTR ) {
log() << "select() signal caught, continuing" << endl;
continue;
}
@@ -205,7 +207,7 @@ namespace mongo {
return;
}
- for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it){
+ for (vector<int>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
if (! (FD_ISSET(*it, fds)))
continue;
@@ -216,24 +218,24 @@ namespace mongo {
if ( x == ECONNABORTED || x == EBADF ) {
log() << "Listener on port " << _port << " aborted" << endl;
return;
- }
+ }
if ( x == 0 && inShutdown() ) {
return; // socket closed
}
if( !inShutdown() )
log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
continue;
- }
+ }
if (from.getType() != AF_UNIX)
disableNagle(s);
- if ( _logConnect && ! cmdLine.quiet )
+ if ( _logConnect && ! cmdLine.quiet )
log() << "connection accepted from " << from.toString() << " #" << ++connNumber << endl;
accepted(s, from);
}
}
}
- void Listener::accepted(int sock, const SockAddr& from){
+ void Listener::accepted(int sock, const SockAddr& from) {
accepted( new MessagingPort(sock, from) );
}
@@ -280,7 +282,7 @@ namespace mongo {
char * _cur;
};
- class Ports {
+ class Ports {
set<MessagingPort*> ports;
mongo::mutex m;
public:
@@ -293,11 +295,11 @@ namespace mongo {
(*i)->shutdown();
}
}
- void insert(MessagingPort* p) {
+ void insert(MessagingPort* p) {
scoped_lock bl(m);
ports.insert(p);
}
- void erase(MessagingPort* p) {
+ void erase(MessagingPort* p) {
scoped_lock bl(m);
ports.erase(p);
}
@@ -345,15 +347,14 @@ namespace mongo {
void run() { _res = ::connect(_sock, _farEnd.raw(), _farEnd.addressSize); }
string name() const { return ""; /* too short lived to need to name */ }
int inError() const { return _res; }
-
+
private:
int _sock;
int _res;
SockAddr _farEnd;
};
- bool MessagingPort::connect(SockAddr& _far)
- {
+ bool MessagingPort::connect(SockAddr& _far) {
farEnd = _far;
sock = socket(farEnd.getType(), SOCK_STREAM, 0);
@@ -365,7 +366,7 @@ namespace mongo {
if ( _timeout > 0 ) {
setSockTimeouts( sock, _timeout );
}
-
+
ConnectBG bg(sock, farEnd);
bg.go();
if ( bg.wait(5000) ) {
@@ -394,26 +395,26 @@ namespace mongo {
/*
// SO_LINGER is bad
-#ifdef SO_LINGER
+ #ifdef SO_LINGER
struct linger ling;
ling.l_onoff = 1;
ling.l_linger = 0;
setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *) &ling, sizeof(ling));
-#endif
+ #endif
*/
return true;
}
bool MessagingPort::recv(Message& m) {
try {
- again:
+again:
mmm( log() << "* recv() sock:" << this->sock << endl; )
int len = -1;
-
+
char *lenbuf = (char *) &len;
int lft = 4;
recv( lenbuf, lft );
-
+
if ( len < 16 || len > 48000000 ) { // messages must be large enough for headers
if ( len == -1 ) {
// Endian check from the client, after connecting, to see what mode server is running in.
@@ -421,8 +422,8 @@ namespace mongo {
send( (char *) &foo, 4, "endian" );
goto again;
}
-
- if ( len == 542393671 ){
+
+ if ( len == 542393671 ) {
// an http GET
log(_logLevel) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
@@ -435,34 +436,36 @@ namespace mongo {
log(0) << "recv(): message len " << len << " is too large" << len << endl;
return false;
}
-
+
int z = (len+1023)&0xfffffc00;
assert(z>=len);
MsgData *md = (MsgData *) malloc(z);
assert(md);
md->len = len;
-
+
char *p = (char *) &md->id;
int left = len -4;
try {
recv( p, left );
- } catch (...) {
+ }
+ catch (...) {
free(md);
throw;
}
-
+
_bytesIn += len;
m.setData(md, true);
return true;
-
- } catch ( const SocketException & e ) {
+
+ }
+ catch ( const SocketException & e ) {
log(_logLevel + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
m.reset();
return false;
}
}
-
+
void MessagingPort::reply(Message& received, Message& response) {
say(/*received.from, */response, received.header()->id);
}
@@ -476,7 +479,7 @@ namespace mongo {
say(toSend);
return recv( toSend , response );
}
-
+
bool MessagingPort::recv( const Message& toSend , Message& response ) {
while ( 1 ) {
bool ok = recv(response);
@@ -486,7 +489,7 @@ namespace mongo {
if ( response.header()->responseTo == toSend.header()->id )
break;
error() << "MessagingPort::call() wrong id got:" << hex << (unsigned)response.header()->responseTo << " expect:" << (unsigned)toSend.header()->id << '\n'
- << dec
+ << dec
<< " toSend op: " << (unsigned)toSend.operation() << '\n'
<< " response msgid:" << (unsigned)response.header()->id << '\n'
<< " response len: " << (unsigned)response.header()->len << '\n'
@@ -521,7 +524,7 @@ namespace mongo {
toSend.send( *this, "say" );
}
- // sends all data or throws an exception
+ // sends all data or throws an exception
void MessagingPort::send( const char * data , int len, const char *context ) {
_bytesOut += len;
while( len > 0 ) {
@@ -533,23 +536,25 @@ namespace mongo {
if( e == WSAETIMEDOUT ) t = SocketException::SEND_TIMEOUT;
#endif
log(_logLevel) << "MessagingPort " << context << " send() " << errnoWithDescription() << ' ' << farEnd.toString() << endl;
- throw SocketException( t );
- } else {
+ throw SocketException( t );
+ }
+ else {
if ( !serverAlive( farEnd.toString() ) ) {
log(_logLevel) << "MessagingPort " << context << " send() remote dead " << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
+ throw SocketException( SocketException::SEND_ERROR );
}
}
- } else {
+ }
+ else {
assert( ret <= len );
len -= ret;
data += ret;
}
- }
+ }
}
-
+
// sends all data or throws an exception
- void MessagingPort::send( const vector< pair< char *, int > > &data, const char *context ){
+ void MessagingPort::send( const vector< pair< char *, int > > &data, const char *context ) {
#if defined(_WIN32)
// TODO use scatter/gather api
for( vector< pair< char *, int > >::const_iterator i = data.begin(); i != data.end(); ++i ) {
@@ -571,27 +576,30 @@ namespace mongo {
memset( &meta, 0, sizeof( meta ) );
meta.msg_iov = &d[ 0 ];
meta.msg_iovlen = d.size();
-
+
while( meta.msg_iovlen > 0 ) {
int ret = ::sendmsg( sock , &meta , portSendFlags );
if ( ret == -1 ) {
if ( errno != EAGAIN || _timeout == 0 ) {
log(_logLevel) << "MessagingPort " << context << " send() " << errnoWithDescription() << ' ' << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
- } else {
+ throw SocketException( SocketException::SEND_ERROR );
+ }
+ else {
if ( !serverAlive( farEnd.toString() ) ) {
log(_logLevel) << "MessagingPort " << context << " send() remote dead " << farEnd.toString() << endl;
- throw SocketException( SocketException::SEND_ERROR );
+ throw SocketException( SocketException::SEND_ERROR );
}
}
- } else {
+ }
+ else {
struct iovec *& i = meta.msg_iov;
while( ret > 0 ) {
if ( i->iov_len > unsigned( ret ) ) {
i->iov_len -= ret;
i->iov_base = (char*)(i->iov_base) + ret;
ret = 0;
- } else {
+ }
+ else {
ret -= i->iov_len;
++i;
--(meta.msg_iovlen);
@@ -602,7 +610,7 @@ namespace mongo {
#endif
}
- void MessagingPort::recv( char * buf , int len ){
+ void MessagingPort::recv( char * buf , int len ) {
unsigned retries = 0;
while( len > 0 ) {
int ret = ::recv( sock , buf , len , portRecvFlags );
@@ -629,13 +637,15 @@ namespace mongo {
#endif
log(_logLevel) << "MessagingPort recv() " << errnoWithDescription(e) << " " << farEnd.toString() <<endl;
throw SocketException(t);
- } else {
+ }
+ else {
if ( !serverAlive( farEnd.toString() ) ) {
log(_logLevel) << "MessagingPort recv() remote dead " << farEnd.toString() << endl;
- throw SocketException( SocketException::RECV_ERROR );
+ throw SocketException( SocketException::RECV_ERROR );
}
}
- } else {
+ }
+ else {
if ( len <= 4 && ret != len )
log(_logLevel) << "MessagingPort recv() got " << ret << " bytes wanted len=" << len << endl;
assert( ret <= len );
@@ -646,9 +656,9 @@ namespace mongo {
}
int MessagingPort::unsafe_recv( char *buf, int max ) {
- return ::recv( sock , buf , max , portRecvFlags );
+ return ::recv( sock , buf , max , portRecvFlags );
}
-
+
void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
if ( toSend.header()->len > 1300 ) {
@@ -685,28 +695,28 @@ namespace mongo {
assert(MsgDataHeaderSize == 16);
}
} msgstart;
-
- MSGID nextMessageId(){
+
+ MSGID nextMessageId() {
MSGID msgid = NextMsgId++;
return msgid;
}
- bool doesOpGetAResponse( int op ){
+ bool doesOpGetAResponse( int op ) {
return op == dbQuery || op == dbGetMore;
}
-
- void setClientId( int id ){
+
+ void setClientId( int id ) {
clientId.set( id );
}
-
- int getClientId(){
+
+ int getClientId() {
return clientId.get();
}
const int DEFAULT_MAX_CONN = 20000;
const int MAX_MAX_CONN = 20000;
-
- int getMaxConnections(){
+
+ int getMaxConnections() {
#ifdef _WIN32
return DEFAULT_MAX_CONN;
#else
@@ -715,12 +725,12 @@ namespace mongo {
int max = (int)(limit.rlim_cur * .8);
- log(1) << "fd limit"
- << " hard:" << limit.rlim_max
- << " soft:" << limit.rlim_cur
+ log(1) << "fd limit"
+ << " hard:" << limit.rlim_max
+ << " soft:" << limit.rlim_cur
<< " max conn: " << max
<< endl;
-
+
if ( max > MAX_MAX_CONN )
max = MAX_MAX_CONN;
@@ -728,10 +738,10 @@ namespace mongo {
#endif
}
- void checkTicketNumbers(){
+ void checkTicketNumbers() {
int want = getMaxConnections();
int current = connTicketHolder.outof();
- if ( current != DEFAULT_MAX_CONN ){
+ if ( current != DEFAULT_MAX_CONN ) {
if ( current < want ) {
// they want fewer than they can handle
// which is fine
diff --git a/util/message.h b/util/message.h
index d21c2b40209..8628eb7cf69 100644
--- a/util/message.h
+++ b/util/message.h
@@ -32,7 +32,7 @@ namespace mongo {
class Listener : boost::noncopyable {
public:
- Listener(const string &ip, int p, bool logConnect=true ) : _port(p), _ip(ip), _logConnect(logConnect), _elapsedTime(0){ }
+ Listener(const string &ip, int p, bool logConnect=true ) : _port(p), _ip(ip), _logConnect(logConnect), _elapsedTime(0) { }
virtual ~Listener() {
if ( _timeTracker == this )
_timeTracker = 0;
@@ -41,26 +41,26 @@ namespace mongo {
/* spawn a thread, etc., then return */
virtual void accepted(int sock, const SockAddr& from);
- virtual void accepted(MessagingPort *mp){
+ virtual void accepted(MessagingPort *mp) {
assert(!"You must overwrite one of the accepted methods");
}
const int _port;
-
+
/**
* @return a rough estimate of elepased time since the server started
*/
long long getMyElapsedTimeMillis() const { return _elapsedTime; }
- void setAsTimeTracker(){
+ void setAsTimeTracker() {
_timeTracker = this;
}
- static const Listener* getTimeTracker(){
+ static const Listener* getTimeTracker() {
return _timeTracker;
}
-
- static long long getElapsedTimeMillis() {
+
+ static long long getElapsedTimeMillis() {
if ( _timeTracker )
return _timeTracker->getMyElapsedTimeMillis();
@@ -81,11 +81,11 @@ namespace mongo {
virtual ~AbstractMessagingPort() { }
virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
virtual void reply(Message& received, Message& response) = 0;
-
+
virtual HostAndPort remote() const = 0;
virtual unsigned remotePort() const = 0;
- virtual int getClientId(){
+ virtual int getClientId() {
int x = remotePort();
x = x << 16;
x |= ( ( 0xFF0 & (long long)this ) >> 8 ); // lowest byte in pointer often meaningless
@@ -105,7 +105,7 @@ namespace mongo {
virtual ~MessagingPort();
void shutdown();
-
+
bool connect(SockAddr& farEnd);
/* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
@@ -115,7 +115,7 @@ namespace mongo {
void reply(Message& received, Message& response, MSGID responseTo);
void reply(Message& received, Message& response);
bool call(Message& toSend, Message& response);
-
+
void say(Message& toSend, int responseTo = -1);
/**
@@ -127,7 +127,7 @@ namespace mongo {
* Note: if you fail to call recv and someone else uses this port,
* horrible things will happend
*/
- bool recv( const Message& sent , Message& response );
+ bool recv( const Message& sent , Message& response );
void piggyBack( Message& toSend , int responseTo = -1 );
@@ -140,7 +140,7 @@ namespace mongo {
// recv len or throw SocketException
void recv( char * data , int len );
-
+
int unsafe_recv( char *buf, int max );
void clearCounters() { _bytesIn = 0; _bytesOut = 0; }
@@ -180,8 +180,8 @@ namespace mongo {
bool doesOpGetAResponse( int op );
- inline const char * opToString( int op ){
- switch ( op ){
+ inline const char * opToString( int op ) {
+ switch ( op ) {
case 0: return "none";
case opReply: return "reply";
case dbMsg: return "msg";
@@ -191,54 +191,54 @@ namespace mongo {
case dbGetMore: return "getmore";
case dbDelete: return "remove";
case dbKillCursors: return "killcursors";
- default:
+ default:
PRINT(op);
- assert(0);
+ assert(0);
return "";
}
}
-
- inline bool opIsWrite( int op ){
- switch ( op ){
-
- case 0:
- case opReply:
- case dbMsg:
- case dbQuery:
- case dbGetMore:
- case dbKillCursors:
+
+ inline bool opIsWrite( int op ) {
+ switch ( op ) {
+
+ case 0:
+ case opReply:
+ case dbMsg:
+ case dbQuery:
+ case dbGetMore:
+ case dbKillCursors:
return false;
-
- case dbUpdate:
- case dbInsert:
- case dbDelete:
+
+ case dbUpdate:
+ case dbInsert:
+ case dbDelete:
return false;
- default:
+ default:
PRINT(op);
- assert(0);
+ assert(0);
return "";
}
-
+
}
#pragma pack(1)
-/* see http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol
-*/
-struct MSGHEADER {
- int messageLength; // total message size, including this
- int requestID; // identifier for this message
- int responseTo; // requestID from the original request
- // (used in reponses from db)
- int opCode;
-};
-struct OP_GETMORE : public MSGHEADER {
- MSGHEADER header; // standard message header
- int ZERO_or_flags; // 0 - reserved for future use
- //cstring fullCollectionName; // "dbname.collectionname"
- //int32 numberToReturn; // number of documents to return
- //int64 cursorID; // cursorID from the OP_REPLY
-};
+ /* see http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol
+ */
+ struct MSGHEADER {
+ int messageLength; // total message size, including this
+ int requestID; // identifier for this message
+ int responseTo; // requestID from the original request
+ // (used in reponses from db)
+ int opCode;
+ };
+ struct OP_GETMORE : public MSGHEADER {
+ MSGHEADER header; // standard message header
+ int ZERO_or_flags; // 0 - reserved for future use
+ //cstring fullCollectionName; // "dbname.collectionname"
+ //int32 numberToReturn; // number of documents to return
+ //int64 cursorID; // cursorID from the OP_REPLY
+ };
#pragma pack()
#pragma pack(1)
@@ -263,8 +263,8 @@ struct OP_GETMORE : public MSGHEADER {
int& dataAsInt() {
return *((int *) _data);
}
-
- bool valid(){
+
+ bool valid() {
if ( len <= 0 || len > ( 4 * BSONObjMaxInternalSize ) )
return false;
if ( _operation < 0 || _operation > 30000 )
@@ -272,7 +272,7 @@ struct OP_GETMORE : public MSGHEADER {
return true;
}
- long long getCursor(){
+ long long getCursor() {
assert( responseTo > 0 );
assert( _operation == opReply );
long long * l = (long long *)(_data + 4);
@@ -295,13 +295,13 @@ struct OP_GETMORE : public MSGHEADER {
_buf( 0 ), _data( 0 ), _freeIt( false ) {
_setData( reinterpret_cast< MsgData* >( data ), freeIt );
};
- Message(Message& r) : _buf( 0 ), _data( 0 ), _freeIt( false ) {
+ Message(Message& r) : _buf( 0 ), _data( 0 ), _freeIt( false ) {
*this = r;
}
~Message() {
reset();
}
-
+
SockAddr _from;
MsgData *header() const {
@@ -309,7 +309,7 @@ struct OP_GETMORE : public MSGHEADER {
return _buf ? _buf : reinterpret_cast< MsgData* > ( _data[ 0 ].first );
}
int operation() const { return header()->operation(); }
-
+
MsgData *singleData() const {
massert( 13273, "single data buffer expected", _buf );
return header();
@@ -317,18 +317,19 @@ struct OP_GETMORE : public MSGHEADER {
bool empty() const { return !_buf && _data.empty(); }
- int size() const{
+ int size() const {
int res = 0;
- if ( _buf ){
+ if ( _buf ) {
res = _buf->len;
- } else {
- for (MsgVec::const_iterator it = _data.begin(); it != _data.end(); ++it){
+ }
+ else {
+ for (MsgVec::const_iterator it = _data.begin(); it != _data.end(); ++it) {
res += it->second;
}
}
return res;
}
-
+
int dataSize() const { return size() - sizeof(MSGHEADER); }
// concat multiple buffers - noop if <2 buffers already, otherwise can be expensive copy
@@ -337,7 +338,7 @@ struct OP_GETMORE : public MSGHEADER {
if ( _buf || empty() ) {
return;
}
-
+
assert( _freeIt );
int totalSize = 0;
for( vector< pair< char *, int > >::const_iterator i = _data.begin(); i != _data.end(); ++i ) {
@@ -352,7 +353,7 @@ struct OP_GETMORE : public MSGHEADER {
reset();
_setData( (MsgData*)buf, true );
}
-
+
// vector swap() so this is fast
Message& operator=(Message& r) {
assert( empty() );
@@ -401,7 +402,7 @@ struct OP_GETMORE : public MSGHEADER {
_data.push_back( make_pair( d, size ) );
header()->len += size;
}
-
+
// use to set first buffer if empty
void setData(MsgData *d, bool freeIt) {
assert( empty() );
@@ -430,7 +431,8 @@ struct OP_GETMORE : public MSGHEADER {
}
if ( _buf != 0 ) {
p.send( (char*)_buf, _buf->len, context );
- } else {
+ }
+ else {
p.send( _data, context );
}
}
@@ -451,13 +453,13 @@ struct OP_GETMORE : public MSGHEADER {
class SocketException : public DBException {
public:
const enum Type { CLOSED , RECV_ERROR , SEND_ERROR, RECV_TIMEOUT, SEND_TIMEOUT, FAILED_STATE, CONNECT_ERROR } _type;
- SocketException( Type t ) : DBException( "socket exception" , 9001 ) , _type(t) { }
- bool shouldPrint() const { return _type != CLOSED; }
+ SocketException( Type t ) : DBException( "socket exception" , 9001 ) , _type(t) { }
+ bool shouldPrint() const { return _type != CLOSED; }
virtual string toString() const {
- stringstream ss;
+ stringstream ss;
ss << "9001 socket exception " << _type;
return ss.str();
- }
+ }
};
MSGID nextMessageId();
@@ -470,7 +472,7 @@ struct OP_GETMORE : public MSGHEADER {
class ElapsedTracker {
public:
ElapsedTracker( int hitsBetweenMarks , int msBetweenMarks )
- : _h( hitsBetweenMarks ) , _ms( msBetweenMarks ) , _pings(0){
+ : _h( hitsBetweenMarks ) , _ms( msBetweenMarks ) , _pings(0) {
_last = Listener::getElapsedTimeMillis();
}
@@ -478,18 +480,18 @@ struct OP_GETMORE : public MSGHEADER {
* call this for every iteration
* returns true if one of the triggers has gone off
*/
- bool ping(){
- if ( ( ++_pings % _h ) == 0 ){
+ bool ping() {
+ if ( ( ++_pings % _h ) == 0 ) {
_last = Listener::getElapsedTimeMillis();
return true;
}
-
+
long long now = Listener::getElapsedTimeMillis();
- if ( now - _last > _ms ){
+ if ( now - _last > _ms ) {
_last = now;
return true;
}
-
+
return false;
}
@@ -500,7 +502,7 @@ struct OP_GETMORE : public MSGHEADER {
unsigned long long _pings;
long long _last;
-
+
};
-
+
} // namespace mongo
diff --git a/util/message_server.h b/util/message_server.h
index 9d6a8f25d7c..39375c8b2ba 100644
--- a/util/message_server.h
+++ b/util/message_server.h
@@ -25,10 +25,10 @@
#include "../pch.h"
namespace mongo {
-
+
class MessageHandler {
public:
- virtual ~MessageHandler(){}
+ virtual ~MessageHandler() {}
virtual void process( Message& m , AbstractMessagingPort* p ) = 0;
virtual void disconnected( AbstractMessagingPort* p ) = 0;
};
@@ -39,14 +39,14 @@ namespace mongo {
int port; // port to bind to
string ipList; // addresses to bind to
- Options() : port(0), ipList(""){}
+ Options() : port(0), ipList("") {}
};
- virtual ~MessageServer(){}
+ virtual ~MessageServer() {}
virtual void run() = 0;
virtual void setAsTimeTracker() = 0;
};
- // TODO use a factory here to decide between port and asio variations
+ // TODO use a factory here to decide between port and asio variations
MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler );
}
diff --git a/util/message_server_asio.cpp b/util/message_server_asio.cpp
index 0c9479c48b6..0c6a7d925da 100644
--- a/util/message_server_asio.cpp
+++ b/util/message_server_asio.cpp
@@ -37,29 +37,29 @@ namespace mongo {
class MessageServerSession;
namespace {
- class StickyThread{
+ class StickyThread {
public:
StickyThread()
: _thread(boost::ref(*this))
{}
- ~StickyThread(){
+ ~StickyThread() {
_mss.put(boost::shared_ptr<MessageServerSession>());
_thread.join();
}
- void ready(boost::shared_ptr<MessageServerSession> mss){
+ void ready(boost::shared_ptr<MessageServerSession> mss) {
_mss.put(mss);
}
- void operator() (){
+ void operator() () {
boost::shared_ptr<MessageServerSession> mss;
- while((mss = _mss.take())){ // intentionally not using ==
+ while((mss = _mss.take())) { // intentionally not using ==
task(mss.get());
mss.reset();
}
}
-
+
private:
boost::thread _thread;
inline void task(MessageServerSession* mss); // must be defined after MessageServerSession
@@ -79,34 +79,34 @@ namespace mongo {
, _portCache(0)
{ }
- ~MessageServerSession(){
+ ~MessageServerSession() {
cout << "disconnect from: " << _socket.remote_endpoint() << endl;
}
- tcp::socket& socket(){
+ tcp::socket& socket() {
return _socket;
}
- void start(){
+ void start() {
cout << "MessageServerSession start from:" << _socket.remote_endpoint() << endl;
_startHeaderRead();
}
-
- void handleReadHeader( const boost::system::error_code& error ){
+
+ void handleReadHeader( const boost::system::error_code& error ) {
if ( _inHeader.len == 0 )
return;
- if ( ! _inHeader.valid() ){
+ if ( ! _inHeader.valid() ) {
cout << " got invalid header from: " << _socket.remote_endpoint() << " closing connected" << endl;
return;
}
-
+
char * raw = (char*)malloc( _inHeader.len );
-
+
MsgData * data = (MsgData*)raw;
memcpy( data , &_inHeader , sizeof( _inHeader ) );
assert( data->len == _inHeader.len );
-
+
uassert( 10273 , "_cur not empty! pipelining requests not supported" , ! _cur.data );
_cur.setData( data , true );
@@ -114,11 +114,11 @@ namespace mongo {
buffer( raw + sizeof( _inHeader ) , _inHeader.len - sizeof( _inHeader ) ) ,
boost::bind( &MessageServerSession::handleReadBody , shared_from_this() , boost::asio::placeholders::error ) );
}
-
- void handleReadBody( const boost::system::error_code& error ){
- if (!_myThread){
+
+ void handleReadBody( const boost::system::error_code& error ) {
+ if (!_myThread) {
mongo::mutex::scoped_lock(tp_mutex);
- if (!thread_pool.empty()){
+ if (!thread_pool.empty()) {
_myThread = thread_pool.back();
thread_pool.pop_back();
}
@@ -132,20 +132,21 @@ namespace mongo {
_myThread->ready(shared_from_this());
}
- void process(){
+ void process() {
_handler->process( _cur , this );
- if (_reply.data){
+ if (_reply.data) {
async_write( _socket ,
buffer( (char*)_reply.data , _reply.data->len ) ,
boost::bind( &MessageServerSession::handleWriteDone , shared_from_this() , boost::asio::placeholders::error ) );
- } else {
+ }
+ else {
_cur.reset();
_startHeaderRead();
}
}
-
- void handleWriteDone( const boost::system::error_code& error ){
+
+ void handleWriteDone( const boost::system::error_code& error ) {
{
// return thread to pool after we have sent data to the client
mongo::mutex::scoped_lock(tp_mutex);
@@ -157,12 +158,12 @@ namespace mongo {
_reply.reset();
_startHeaderRead();
}
-
- virtual void reply( Message& received, Message& response ){
+
+ virtual void reply( Message& received, Message& response ) {
reply( received , response , received.data->id );
}
-
- virtual void reply( Message& query , Message& toSend, MSGID responseTo ){
+
+ virtual void reply( Message& query , Message& toSend, MSGID responseTo ) {
_reply = toSend;
_reply.data->id = nextMessageId();
@@ -170,22 +171,22 @@ namespace mongo {
uassert( 10274 , "pipelining requests doesn't work yet" , query.data->id == _cur.data->id );
}
-
- virtual unsigned remotePort(){
+
+ virtual unsigned remotePort() {
if (!_portCache)
_portCache = _socket.remote_endpoint().port(); //this is expensive
return _portCache;
}
-
- private:
-
- void _startHeaderRead(){
+
+ private:
+
+ void _startHeaderRead() {
_inHeader.len = 0;
- async_read( _socket ,
+ async_read( _socket ,
buffer( &_inHeader , sizeof( _inHeader ) ) ,
boost::bind( &MessageServerSession::handleReadHeader , shared_from_this() , boost::asio::placeholders::error ) );
}
-
+
MessageHandler * _handler;
tcp::socket _socket;
MsgData _inHeader;
@@ -197,10 +198,10 @@ namespace mongo {
boost::shared_ptr<StickyThread> _myThread;
};
- void StickyThread::task(MessageServerSession* mss){
+ void StickyThread::task(MessageServerSession* mss) {
mss->process();
}
-
+
class AsyncMessageServer : public MessageServer {
public:
@@ -209,39 +210,38 @@ namespace mongo {
: _port( opts.port )
, _handler(handler)
, _endpoint( tcp::v4() , opts.port )
- , _acceptor( _ioservice , _endpoint )
- {
+ , _acceptor( _ioservice , _endpoint ) {
_accept();
}
- virtual ~AsyncMessageServer(){
-
+ virtual ~AsyncMessageServer() {
+
}
- void run(){
+ void run() {
cout << "AsyncMessageServer starting to listen on: " << _port << endl;
boost::thread other(boost::bind(&io_service::run, &_ioservice));
_ioservice.run();
cout << "AsyncMessageServer done listening on: " << _port << endl;
}
-
- void handleAccept( shared_ptr<MessageServerSession> session ,
- const boost::system::error_code& error ){
- if ( error ){
+
+ void handleAccept( shared_ptr<MessageServerSession> session ,
+ const boost::system::error_code& error ) {
+ if ( error ) {
cout << "handleAccept error!" << endl;
return;
}
session->start();
_accept();
}
-
- void _accept( ){
+
+ void _accept( ) {
shared_ptr<MessageServerSession> session( new MessageServerSession( _handler , _ioservice ) );
_acceptor.async_accept( session->socket() ,
- boost::bind( &AsyncMessageServer::handleAccept,
- this,
- session,
- boost::asio::placeholders::error )
- );
+ boost::bind( &AsyncMessageServer::handleAccept,
+ this,
+ session,
+ boost::asio::placeholders::error )
+ );
}
private:
@@ -252,9 +252,9 @@ namespace mongo {
tcp::acceptor _acceptor;
};
- MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ){
+ MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ) {
return new AsyncMessageServer( opts , handler );
- }
+ }
}
diff --git a/util/message_server_port.cpp b/util/message_server_port.cpp
index cb789366a25..698359749e4 100644
--- a/util/message_server_port.cpp
+++ b/util/message_server_port.cpp
@@ -30,22 +30,22 @@ namespace mongo {
namespace pms {
MessageHandler * handler;
-
- void threadRun( MessagingPort * inPort){
+
+ void threadRun( MessagingPort * inPort) {
assert( inPort );
-
+
setThreadName( "conn" );
TicketHolderReleaser connTicketReleaser( &connTicketHolder );
auto_ptr<MessagingPort> p( inPort );
-
+
string otherSide;
-
+
Message m;
try {
otherSide = p->farEnd.toString();
- while ( 1 ){
+ while ( 1 ) {
m.reset();
p->clearCounters();
@@ -55,21 +55,21 @@ namespace mongo {
p->shutdown();
break;
}
-
+
handler->process( m , p.get() );
networkCounter.hit( p->getBytesIn() , p->getBytesOut() );
}
}
- catch ( const SocketException& ){
+ catch ( const SocketException& ) {
log() << "unclean socket shutdown from: " << otherSide << endl;
}
- catch ( const std::exception& e ){
+ catch ( const std::exception& e ) {
problem() << "uncaught exception (" << e.what() << ")(" << demangleName( typeid(e) ) <<") in PortMessageServer::threadRun, closing connection" << endl;
}
- catch ( ... ){
+ catch ( ... ) {
problem() << "uncaught exception in PortMessageServer::threadRun, closing connection" << endl;
- }
-
+ }
+
handler->disconnected( p.get() );
}
@@ -78,15 +78,15 @@ namespace mongo {
class PortMessageServer : public MessageServer , public Listener {
public:
PortMessageServer( const MessageServer::Options& opts, MessageHandler * handler ) :
- Listener( opts.ipList, opts.port ){
-
+ Listener( opts.ipList, opts.port ) {
+
uassert( 10275 , "multiple PortMessageServer not supported" , ! pms::handler );
pms::handler = handler;
}
-
+
virtual void accepted(MessagingPort * p) {
-
- if ( ! connTicketHolder.tryAcquire() ){
+
+ if ( ! connTicketHolder.tryAcquire() ) {
log() << "connection refused because too many open connections: " << connTicketHolder.used() << endl;
// TODO: would be nice if we notified them...
@@ -100,7 +100,7 @@ namespace mongo {
try {
boost::thread thr( boost::bind( &pms::threadRun , p ) );
}
- catch ( boost::thread_resource_error& ){
+ catch ( boost::thread_resource_error& ) {
log() << "can't create new thread, closing connection" << endl;
p->shutdown();
@@ -109,21 +109,21 @@ namespace mongo {
sleepmillis(2);
}
}
-
- virtual void setAsTimeTracker(){
+
+ virtual void setAsTimeTracker() {
Listener::setAsTimeTracker();
}
- void run(){
+ void run() {
initAndListen();
}
};
- MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ){
+ MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ) {
return new PortMessageServer( opts , handler );
- }
+ }
}
diff --git a/util/miniwebserver.cpp b/util/miniwebserver.cpp
index 0193c5d8ef0..e700112d697 100644
--- a/util/miniwebserver.cpp
+++ b/util/miniwebserver.cpp
@@ -55,10 +55,10 @@ namespace mongo {
void MiniWebServer::parseParams( BSONObj & params , string query ) {
if ( query.size() == 0 )
return;
-
+
BSONObjBuilder b;
while ( query.size() ) {
-
+
string::size_type amp = query.find( "&" );
string cur;
@@ -77,7 +77,7 @@ namespace mongo {
b.append( urlDecode(cur.substr(0,eq)) , urlDecode(cur.substr(eq+1) ) );
}
-
+
params = b.obj();
}
@@ -132,16 +132,16 @@ namespace mongo {
string responseMsg;
int responseCode = 599;
vector<string> headers;
-
+
try {
doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers, from);
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
responseCode = 500;
responseMsg = "error loading page: ";
responseMsg += e.what();
}
- catch ( ... ){
+ catch ( ... ) {
responseCode = 500;
responseMsg = "unknown error loading page";
}
@@ -168,32 +168,34 @@ namespace mongo {
::send(s, response.c_str(), response.size(), 0);
closesocket(s);
}
-
- string MiniWebServer::getHeader( const char * req , string wanted ){
+
+ string MiniWebServer::getHeader( const char * req , string wanted ) {
const char * headers = strchr( req , '\n' );
if ( ! headers )
return "";
pcrecpp::StringPiece input( headers + 1 );
-
+
string name;
string val;
pcrecpp::RE re("([\\w\\-]+): (.*?)\r?\n");
- while ( re.Consume( &input, &name, &val) ){
+ while ( re.Consume( &input, &name, &val) ) {
if ( name == wanted )
return val;
}
return "";
}
- string MiniWebServer::urlDecode(const char* s){
+ string MiniWebServer::urlDecode(const char* s) {
stringstream out;
- while(*s){
- if (*s == '+'){
+ while(*s) {
+ if (*s == '+') {
out << ' ';
- }else if (*s == '%'){
+ }
+ else if (*s == '%') {
out << fromHex(s+1);
s+=2;
- }else{
+ }
+ else {
out << *s;
}
s++;
diff --git a/util/miniwebserver.h b/util/miniwebserver.h
index bbd1ba2422e..b385afc11c1 100644
--- a/util/miniwebserver.h
+++ b/util/miniwebserver.h
@@ -41,7 +41,7 @@ namespace mongo {
// --- static helpers ----
static void parseParams( BSONObj & params , string query );
-
+
static string parseURL( const char * buf );
static string parseMethod( const char * headers );
static string getHeader( const char * headers , string name );
diff --git a/util/mmap.cpp b/util/mmap.cpp
index 3125995cda2..7b69e33cd58 100644
--- a/util/mmap.cpp
+++ b/util/mmap.cpp
@@ -25,7 +25,7 @@ namespace mongo {
set<MongoFile*> MongoFile::mmfiles;
map<string,MongoFile*> MongoFile::pathToFile;
- /* Create. Must not exist.
+ /* Create. Must not exist.
@param zero fill file with zeros when true
*/
void* MemoryMappedFile::create(string filename, unsigned long long len, bool zero) {
@@ -56,21 +56,21 @@ namespace mongo {
return map( filename , l, options );
}
- void printMemInfo( const char * where ){
+ void printMemInfo( const char * where ) {
cout << "mem info: ";
- if ( where )
- cout << where << " ";
+ if ( where )
+ cout << where << " ";
ProcessInfo pi;
- if ( ! pi.supported() ){
+ if ( ! pi.supported() ) {
cout << " not supported" << endl;
return;
}
-
+
cout << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize() << " mapped: " << ( MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) ) << endl;
}
/* --- MongoFile -------------------------------------------------
- this is the administrative stuff
+ this is the administrative stuff
*/
RWLock MongoFile::mmmutex("rw:mmmutex");
@@ -93,7 +93,7 @@ namespace mongo {
rwlock lk( mmmutex , true );
ProgressMeter pm( mmfiles.size() , 2 , 1 );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
(*i)->close();
pm.hit();
}
@@ -101,9 +101,9 @@ namespace mongo {
--closingAllFiles;
}
- /*static*/ long long MongoFile::totalMappedLength(){
+ /*static*/ long long MongoFile::totalMappedLength() {
unsigned long long total = 0;
-
+
rwlock lk( mmmutex , false );
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
total += (*i)->length();
@@ -117,35 +117,35 @@ namespace mongo {
void (*MongoFile::notifyPreFlush)() = nullFunc;
void (*MongoFile::notifyPostFlush)() = nullFunc;
- /*static*/ int MongoFile::flushAll( bool sync ){
+ /*static*/ int MongoFile::flushAll( bool sync ) {
notifyPreFlush();
int x = _flushAll(sync);
notifyPostFlush();
return x;
}
- /*static*/ int MongoFile::_flushAll( bool sync ){
- if ( ! sync ){
+ /*static*/ int MongoFile::_flushAll( bool sync ) {
+ if ( ! sync ) {
int num = 0;
rwlock lk( mmmutex , false );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
num++;
MongoFile * mmf = *i;
if ( ! mmf )
continue;
-
+
mmf->flush( sync );
}
return num;
}
-
+
// want to do it sync
set<MongoFile*> seen;
- while ( true ){
+ while ( true ) {
auto_ptr<Flushable> f;
{
rwlock lk( mmmutex , false );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if ( ! mmf )
continue;
@@ -158,18 +158,18 @@ namespace mongo {
}
if ( ! f.get() )
break;
-
+
f->flush();
}
return seen.size();
}
- void MongoFile::created(){
+ void MongoFile::created() {
rwlock lk( mmmutex , true );
mmfiles.insert(this);
}
-
- void MongoFile::setFilename(string fn) {
+
+ void MongoFile::setFilename(string fn) {
rwlock( mmmutex, true );
assert( _filename.empty() );
_filename = fn;
@@ -182,7 +182,7 @@ namespace mongo {
void MongoFile::markAllWritable() {
rwlock lk( mmmutex , false );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if (mmf) mmf->_lock();
}
@@ -190,7 +190,7 @@ namespace mongo {
void MongoFile::unmarkAllWritable() {
rwlock lk( mmmutex , false );
- for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ){
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
MongoFile * mmf = *i;
if (mmf) mmf->_unlock();
}
diff --git a/util/mmap.h b/util/mmap.h
index 0f4fd75bfee..8b3c7f22723 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -20,9 +20,9 @@
#include "concurrency/rwlock.h"
namespace mongo {
-
+
/* the administrative-ish stuff here */
- class MongoFile : boost::noncopyable {
+ class MongoFile : boost::noncopyable {
public:
/** Flushable has to fail nicely if the underlying object gets killed */
class Flushable {
@@ -30,7 +30,7 @@ namespace mongo {
virtual ~Flushable() {}
virtual void flush() = 0;
};
-
+
virtual ~MongoFile() {}
enum Options {
@@ -38,8 +38,8 @@ namespace mongo {
READONLY = 2 // not contractually guaranteed, but if specified the impl has option to fault writes
};
- /** @param fun is called for each MongoFile.
- calledl from within a mutex that MongoFile uses. so be careful not to deadlock.
+ /** @param fun is called for each MongoFile.
+ calledl from within a mutex that MongoFile uses. so be careful not to deadlock.
*/
template < class F >
static void forEach( F fun );
@@ -77,7 +77,7 @@ namespace mongo {
* Flushable has to fail nicely if the underlying object gets killed
*/
virtual Flushable * prepareFlush() = 0;
-
+
void created(); /* subclass must call after create */
void destroyed(); /* subclass must call in destructor */
@@ -99,7 +99,7 @@ namespace mongo {
inline void MongoFile::unmarkAllWritable() {}
#endif
- /** look up a MMF by filename. scoped mutex locking convention.
+ /** look up a MMF by filename. scoped mutex locking convention.
example:
MMFFinderByName finder;
MongoMMF *a = finder.find("file_name_a");
@@ -109,10 +109,10 @@ namespace mongo {
public:
MongoFileFinder() : _lk(MongoFile::mmmutex,false) { }
- /** @return The MongoFile object associated with the specified file name. If no file is open
+ /** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null.
*/
- MongoFile* findByPath(string path) {
+ MongoFile* findByPath(string path) {
map<string,MongoFile*>::iterator i = MongoFile::pathToFile.find(path);
return i == MongoFile::pathToFile.end() ? 0 : i->second;
}
@@ -122,10 +122,10 @@ namespace mongo {
};
struct MongoFileAllowWrites {
- MongoFileAllowWrites(){
+ MongoFileAllowWrites() {
MongoFile::markAllWritable();
}
- ~MongoFileAllowWrites(){
+ ~MongoFileAllowWrites() {
MongoFile::unmarkAllWritable();
}
};
@@ -151,7 +151,7 @@ namespace mongo {
*/
void* map(const char *filename, unsigned long long &length, int options = 0 );
- /* Create. Must not exist.
+ /* Create. Must not exist.
@param zero fill file with zeros when true
*/
void* create(string filename, unsigned long long len, bool zero);
@@ -162,15 +162,15 @@ namespace mongo {
long shortLength() const { return (long) len; }
unsigned long long length() const { return len; }
- /** create a new view with the specified properties.
- automatically cleaned up upon close/destruction of the MemoryMappedFile object.
+ /** create a new view with the specified properties.
+ automatically cleaned up upon close/destruction of the MemoryMappedFile object.
*/
void* createReadOnlyMap();
void* createPrivateMap();
private:
static void updateLength( const char *filename, unsigned long long &length );
-
+
HANDLE fd;
HANDLE maphandle;
vector<void *> views;
@@ -189,13 +189,13 @@ namespace mongo {
void* remapPrivateView(void *oldPrivateAddr);
};
- void printMemInfo( const char * where );
+ void printMemInfo( const char * where );
typedef MemoryMappedFile MMF;
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
template < class F >
- inline void MongoFile::forEach( F p ) {
+ inline void MongoFile::forEach( F p ) {
rwlock lk( mmmutex , false );
for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
p(*i);
diff --git a/util/mmap_mm.cpp b/util/mmap_mm.cpp
index 3cbb0d21a61..ec2400e02d3 100644
--- a/util/mmap_mm.cpp
+++ b/util/mmap_mm.cpp
@@ -44,9 +44,9 @@ namespace mongo {
void MemoryMappedFile::flush(bool sync) {
}
-
+
void MemoryMappedFile::_lock() {}
void MemoryMappedFile::_unlock() {}
-}
+}
diff --git a/util/mmap_posix.cpp b/util/mmap_posix.cpp
index 3ce84779108..067c59de805 100644
--- a/util/mmap_posix.cpp
+++ b/util/mmap_posix.cpp
@@ -37,7 +37,7 @@ namespace mongo {
len = 0;
created();
}
-
+
void MemoryMappedFile::close() {
for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
munmap(*i,len);
@@ -59,7 +59,7 @@ namespace mongo {
len = length;
massert( 10446 , str::stream() << "mmap: can't map area of size 0 file: " << filename, length > 0 );
-
+
fd = open(filename, O_RDWR | O_NOATIME);
if ( fd <= 0 ) {
log() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
@@ -67,17 +67,17 @@ namespace mongo {
}
unsigned long long filelen = lseek(fd, 0, SEEK_END);
- uassert(10447, str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen << ' ' << sizeof(size_t), filelen == length );
+ uassert(10447, str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen << ' ' << sizeof(size_t), filelen == length );
lseek( fd, 0, SEEK_SET );
-
+
void * view = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if ( view == MAP_FAILED ) {
error() << " mmap() failed for " << filename << " len:" << length << " " << errnoWithDescription() << endl;
if ( errno == ENOMEM ) {
- if( sizeof(void*) == 4 )
- error() << "mmap failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- else
- error() << "mmap failed with out of memory. (64 bit build)" << endl;
+ if( sizeof(void*) == 4 )
+ error() << "mmap failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap failed with out of memory. (64 bit build)" << endl;
}
return 0;
}
@@ -86,14 +86,14 @@ namespace mongo {
#if defined(__sunos__)
#warning madvise not supported on solaris yet
#else
- if ( options & SEQUENTIAL ){
- if ( madvise( view , length , MADV_SEQUENTIAL ) ){
+ if ( options & SEQUENTIAL ) {
+ if ( madvise( view , length , MADV_SEQUENTIAL ) ) {
warning() << "map: madvise failed for " << filename << ' ' << errnoWithDescription() << endl;
}
}
#endif
- DEV if (! dbMutex.info().isLocked()){
+ DEV if (! dbMutex.info().isLocked()) {
_unlock();
}
@@ -103,32 +103,33 @@ namespace mongo {
}
void* MemoryMappedFile::createReadOnlyMap() {
- void * x = mmap( /*start*/0 , len , PROT_READ , MAP_SHARED , fd , 0 );
- if( x == MAP_FAILED ) {
- if ( errno == ENOMEM ) {
- if( sizeof(void*) == 4 )
- error() << "mmap ro failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- else
- error() << "mmap ro failed with out of memory. (64 bit build)" << endl;
- }
- return 0;
- }
- return x;
+ void * x = mmap( /*start*/0 , len , PROT_READ , MAP_SHARED , fd , 0 );
+ if( x == MAP_FAILED ) {
+ if ( errno == ENOMEM ) {
+ if( sizeof(void*) == 4 )
+ error() << "mmap ro failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap ro failed with out of memory. (64 bit build)" << endl;
+ }
+ return 0;
+ }
+ return x;
}
-
+
void* MemoryMappedFile::createPrivateMap() {
- void * x = mmap( /*start*/0 , len , PROT_READ|PROT_WRITE , MAP_PRIVATE , fd , 0 );
+ void * x = mmap( /*start*/0 , len , PROT_READ|PROT_WRITE , MAP_PRIVATE , fd , 0 );
if( x == MAP_FAILED ) {
if ( errno == ENOMEM ) {
if( sizeof(void*) == 4 ) {
error() << "mmap private failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
- } else {
+ }
+ else {
error() << "mmap private failed with out of memory. (64 bit build)" << endl;
}
}
return 0;
}
- else {
+ else {
views.push_back(x);
}
return x;
@@ -136,7 +137,7 @@ namespace mongo {
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
assert( munmap(oldPrivateAddr,len) == 0 );
- void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_FIXED , fd , 0 );
+ void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_FIXED , fd , 0 );
if( x == MAP_FAILED ) {
int err = errno;
remove(views.begin(), views.end(), oldPrivateAddr);
@@ -145,25 +146,25 @@ namespace mongo {
assert( x == oldPrivateAddr );
return x;
}
-
+
void MemoryMappedFile::flush(bool sync) {
if ( views.empty() || fd == 0 )
return;
if ( msync(views[0], len, sync ? MS_SYNC : MS_ASYNC) )
problem() << "msync " << errnoWithDescription() << endl;
}
-
+
class PosixFlushable : public MemoryMappedFile::Flushable {
public:
PosixFlushable( void * view , HANDLE fd , long len )
- : _view( view ) , _fd( fd ) , _len(len){
+ : _view( view ) , _fd( fd ) , _len(len) {
}
- void flush(){
+ void flush() {
if ( _view && _fd )
if ( msync(_view, _len, MS_SYNC ) )
problem() << "msync " << errnoWithDescription() << endl;
-
+
}
void * _view;
@@ -171,7 +172,7 @@ namespace mongo {
long _len;
};
- MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush(){
+ MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
return new PosixFlushable( views.empty() ? 0 : views[0] , fd , len );
}
diff --git a/util/mmap_win.cpp b/util/mmap_win.cpp
index 6b75a46ffce..4a9fd48061d 100644
--- a/util/mmap_win.cpp
+++ b/util/mmap_win.cpp
@@ -23,8 +23,7 @@
namespace mongo {
MemoryMappedFile::MemoryMappedFile()
- : _flushMutex(new mutex("flushMutex"))
- {
+ : _flushMutex(new mutex("flushMutex")) {
fd = 0;
maphandle = 0;
len = 0;
@@ -43,7 +42,7 @@ namespace mongo {
CloseHandle(fd);
fd = 0;
}
-
+
unsigned long long mapped = 0;
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
@@ -51,7 +50,7 @@ namespace mongo {
assert(ok);
// we want the new address to be the same as the old address in case things keep pointers around (as namespaceindex does).
- void *p = MapViewOfFileEx(maphandle, FILE_MAP_COPY, 0, 0,
+ void *p = MapViewOfFileEx(maphandle, FILE_MAP_COPY, 0, 0,
/*dwNumberOfBytesToMap 0 means to eof*/0 /*len*/,
oldPrivateAddr);
assert(p);
@@ -59,14 +58,14 @@ namespace mongo {
return p;
}
- void* MemoryMappedFile::createPrivateMap() {
+ void* MemoryMappedFile::createPrivateMap() {
assert( maphandle );
void *p = MapViewOfFile(maphandle, FILE_MAP_COPY, /*f ofs hi*/0, /*f ofs lo*/ 0, /*dwNumberOfBytesToMap 0 means to eof*/0);
if ( p == 0 ) {
DWORD e = GetLastError();
log() << "FILE_MAP_COPY MapViewOfFile failed " << filename() << " " << errnoWithDescription(e) << endl;
}
- else {
+ else {
views.push_back(p);
}
return p;
@@ -79,7 +78,7 @@ namespace mongo {
DWORD e = GetLastError();
log() << "FILE_MAP_READ MapViewOfFile failed " << filename() << " " << errnoWithDescription(e) << endl;
}
- else {
+ else {
views.push_back(p);
}
return p;
@@ -91,13 +90,13 @@ namespace mongo {
char filename[256];
strncpy(filename, filenameIn, 255);
filename[255] = 0;
- {
+ {
size_t len = strlen( filename );
- for ( size_t i=len-1; i>=0; i-- ){
+ for ( size_t i=len-1; i>=0; i-- ) {
if ( filename[i] == '/' ||
- filename[i] == '\\' )
+ filename[i] == '\\' )
break;
-
+
if ( filename[i] == ':' )
filename[i] = '_';
}
@@ -129,10 +128,10 @@ namespace mongo {
{
DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
- maphandle = CreateFileMapping(fd, NULL, flProtect,
- length >> 32 /*maxsizehigh*/,
- (unsigned) length /*maxsizelow*/,
- NULL/*lpName*/);
+ maphandle = CreateFileMapping(fd, NULL, flProtect,
+ length >> 32 /*maxsizehigh*/,
+ (unsigned) length /*maxsizelow*/,
+ NULL/*lpName*/);
if ( maphandle == NULL ) {
DWORD e = GetLastError(); // log() call was killing lasterror before we get to that point in the stream
log() << "CreateFileMapping failed " << filename << ' ' << errnoWithDescription(e) << endl;
@@ -149,14 +148,14 @@ namespace mongo {
DWORD e = GetLastError();
log() << "MapViewOfFile failed " << filename << " " << errnoWithDescription(e) << endl;
}
- else {
+ else {
views.push_back(view);
}
len = length;
#if 0
{
- if( !( options & READONLY ) ) {
+ if( !( options & READONLY ) ) {
log() << "dur: not readonly view which is wrong : " << filename << endl;
}
void *p = MapViewOfFile(maphandle, FILE_MAP_ALL_ACCESS, /*f ofs hi*/0, /*f ofs lo*/ 0, /*dwNumberOfBytesToMap 0 means to eof*/0);
@@ -180,32 +179,32 @@ namespace mongo {
WindowsFlushable( void * view , HANDLE fd , string filename , boost::shared_ptr<mutex> flushMutex )
: _view(view) , _fd(fd) , _filename(filename) , _flushMutex(flushMutex)
{}
-
- void flush(){
- if (!_view || !_fd)
+
+ void flush() {
+ if (!_view || !_fd)
return;
scoped_lock lk(*_flushMutex);
bool success = FlushViewOfFile(_view, 0); // 0 means whole mapping
- if (!success){
+ if (!success) {
int err = GetLastError();
out() << "FlushViewOfFile failed " << err << " file: " << _filename << endl;
}
-
+
success = FlushFileBuffers(_fd);
- if (!success){
+ if (!success) {
int err = GetLastError();
out() << "FlushFileBuffers failed " << err << " file: " << _filename << endl;
}
}
-
+
void * _view;
HANDLE _fd;
string _filename;
boost::shared_ptr<mutex> _flushMutex;
};
-
+
void MemoryMappedFile::flush(bool sync) {
uassert(13056, "Async flushing not supported on windows", sync);
if( !views.empty() ) {
@@ -220,4 +219,4 @@ namespace mongo {
void MemoryMappedFile::_lock() {}
void MemoryMappedFile::_unlock() {}
-}
+}
diff --git a/util/mongoutils/checksum.h b/util/mongoutils/checksum.h
index 200837bab5f..ea3d05131ce 100644
--- a/util/mongoutils/checksum.h
+++ b/util/mongoutils/checksum.h
@@ -22,7 +22,7 @@ namespace mongoutils {
/**
* this is a silly temporary implementation
*/
- inline int checksum( const char* x , int size ){
+ inline int checksum( const char* x , int size ) {
int ck = 0;
for ( int i=0; i<size; i++ )
ck += ( (int)x[i] * ( i + 1 ) );
diff --git a/util/mongoutils/hash.h b/util/mongoutils/hash.h
index 0a87a2a52ba..49f30b3242a 100644
--- a/util/mongoutils/hash.h
+++ b/util/mongoutils/hash.h
@@ -19,7 +19,7 @@
namespace mongoutils {
- /** @return hash of a pointer to an unsigned. so you get a 32 bit hash out, regardless of whether
+ /** @return hash of a pointer to an unsigned. so you get a 32 bit hash out, regardless of whether
pointers are 32 or 64 bit on the particular platform.
is there a faster way to impl this that hashes just as well?
diff --git a/util/mongoutils/html.h b/util/mongoutils/html.h
index e8502ecdd66..f79e6ca514f 100644
--- a/util/mongoutils/html.h
+++ b/util/mongoutils/html.h
@@ -2,7 +2,7 @@
#pragma once
-/* Things in the mongoutils namespace
+/* Things in the mongoutils namespace
(1) are not database specific, rather, true utilities
(2) are cross platform
(3) may require boost headers, but not libs
@@ -37,41 +37,41 @@ namespace mongoutils {
inline string _tr() { return "</tr>\n"; }
inline string tr() { return "<tr>"; }
- inline string tr(string a, string b) {
+ inline string tr(string a, string b) {
stringstream ss;
ss << "<tr><td>" << a << "</td><td>" << b << "</td></tr>\n";
return ss.str();
}
template <class T>
- inline string td(T x) {
+ inline string td(T x) {
stringstream ss;
ss << "<td>" << x << "</td>";
return ss.str();
}
- inline string td(string x) {
+ inline string td(string x) {
return "<td>" + x + "</td>";
}
- inline string th(string x) {
+ inline string th(string x) {
return "<th>" + x + "</th>";
}
- inline void tablecell( stringstream& ss , bool b ){
+ inline void tablecell( stringstream& ss , bool b ) {
ss << "<td>" << (b ? "<b>X</b>" : "") << "</td>";
}
- template< typename T>
- inline void tablecell( stringstream& ss , const T& t ){
+ template< typename T>
+ inline void tablecell( stringstream& ss , const T& t ) {
ss << "<td>" << t << "</td>";
}
-
- inline string table(const char *headers[] = 0, bool border = true) {
+
+ inline string table(const char *headers[] = 0, bool border = true) {
stringstream ss;
- ss << "\n<table "
- << (border?"border=1 ":"")
- << "cellpadding=2 cellspacing=0>\n";
- if( headers ) {
+ ss << "\n<table "
+ << (border?"border=1 ":"")
+ << "cellpadding=2 cellspacing=0>\n";
+ if( headers ) {
ss << "<tr>";
- while( *headers ) {
+ while( *headers ) {
ss << "<th>" << *headers << "</th>";
headers++;
}
@@ -80,18 +80,18 @@ namespace mongoutils {
return ss.str();
}
- inline string start(string title) {
+ inline string start(string title) {
stringstream ss;
ss << "<html><head>\n<title>";
ss << title;
ss << "</title>\n";
ss << "<style type=\"text/css\" media=\"screen\">"
- "body { font-family: helvetica, arial, san-serif }\n"
- "table { border-collapse:collapse; border-color:#999; margin-top:.5em }\n"
- "th { background-color:#bbb; color:#000 }\n"
- "td,th { padding:.25em }\n"
- "</style>\n";
+ "body { font-family: helvetica, arial, san-serif }\n"
+ "table { border-collapse:collapse; border-color:#999; margin-top:.5em }\n"
+ "th { background-color:#bbb; color:#000 }\n"
+ "td,th { padding:.25em }\n"
+ "</style>\n";
ss << "</head>\n<body>\n";
return ss.str();
@@ -141,7 +141,7 @@ namespace mongoutils {
}
/* does NOT escape the strings. */
- inline string a(string href, string title="", string contentHtml = "") {
+ inline string a(string href, string title="", string contentHtml = "") {
stringstream ss;
ss << "<a";
if( !href.empty() ) ss << " href=\"" << href << '"';
diff --git a/util/mongoutils/str.h b/util/mongoutils/str.h
index ea0ce175b9c..ea8f9384d62 100644
--- a/util/mongoutils/str.h
+++ b/util/mongoutils/str.h
@@ -17,14 +17,14 @@
#pragma once
-/* Things in the mongoutils namespace
+/* Things in the mongoutils namespace
(1) are not database specific, rather, true utilities
(2) are cross platform
(3) may require boost headers, but not libs
(4) are clean and easy to use in any c++ project without pulling in lots of other stuff
- Note: within this module, we use int for all offsets -- there are no unsigned offsets
- and no size_t's. If you need 3 gigabyte long strings, don't use this module.
+ Note: within this module, we use int for all offsets -- there are no unsigned offsets
+ and no size_t's. If you need 3 gigabyte long strings, don't use this module.
*/
#include <string>
@@ -37,12 +37,12 @@ namespace mongoutils {
typedef std::string string;
- /** the idea here is to make one liners easy. e.g.:
+ /** the idea here is to make one liners easy. e.g.:
return str::stream() << 1 << ' ' << 2;
since the following doesn't work:
-
+
(stringstream() << 1).str();
*/
class stream {
@@ -61,7 +61,7 @@ namespace mongoutils {
inline bool startsWith(const char *str, const char *prefix) {
const char *s = str;
const char *p = prefix;
- while( *p ) {
+ while( *p ) {
if( *p != *s ) return false;
p++; s++;
}
@@ -69,7 +69,7 @@ namespace mongoutils {
}
inline bool startsWith(string s, string p) { return startsWith(s.c_str(), p.c_str()); }
- inline bool endsWith(string s, string p) {
+ inline bool endsWith(string s, string p) {
int l = p.size();
int x = s.size();
if( x < l ) return false;
@@ -81,36 +81,44 @@ namespace mongoutils {
/** find char x, and return rest of string thereafter, or "" if not found */
inline const char * after(const char *s, char x) {
const char *p = strchr(s, x);
- return (p != 0) ? p+1 : ""; }
+ return (p != 0) ? p+1 : "";
+ }
inline string after(const string& s, char x) {
const char *p = strchr(s.c_str(), x);
- return (p != 0) ? string(p+1) : ""; }
+ return (p != 0) ? string(p+1) : "";
+ }
/** find string x, and return rest of string thereafter, or "" if not found */
inline const char * after(const char *s, const char *x) {
const char *p = strstr(s, x);
- return (p != 0) ? p+strlen(x) : ""; }
+ return (p != 0) ? p+strlen(x) : "";
+ }
inline string after(string s, string x) {
const char *p = strstr(s.c_str(), x.c_str());
- return (p != 0) ? string(p+x.size()) : ""; }
+ return (p != 0) ? string(p+x.size()) : "";
+ }
/** @return true if s contains x */
- inline bool contains(string s, string x) {
- return strstr(s.c_str(), x.c_str()) != 0; }
- inline bool contains(string s, char x) {
- return strchr(s.c_str(), x) != 0; }
+ inline bool contains(string s, string x) {
+ return strstr(s.c_str(), x.c_str()) != 0;
+ }
+ inline bool contains(string s, char x) {
+ return strchr(s.c_str(), x) != 0;
+ }
/** @return everything befor the character x, else entire string */
inline string before(const string& s, char x) {
const char *p = strchr(s.c_str(), x);
- return (p != 0) ? s.substr(0, p-s.c_str()) : s; }
+ return (p != 0) ? s.substr(0, p-s.c_str()) : s;
+ }
/** @return everything befor the string x, else entire string */
inline string before(const string& s, const string& x) {
const char *p = strstr(s.c_str(), x.c_str());
- return (p != 0) ? s.substr(0, p-s.c_str()) : s; }
+ return (p != 0) ? s.substr(0, p-s.c_str()) : s;
+ }
- /** check if if strings share a common starting prefix
+ /** check if if strings share a common starting prefix
@return offset of divergence (or length if equal). 0=nothing in common. */
inline int shareCommonPrefix(const char *p, const char *q) {
int ofs = 0;
@@ -121,7 +129,8 @@ namespace mongoutils {
break;
p++; q++; ofs++;
}
- return ofs; }
+ return ofs;
+ }
inline int shareCommonPrefix(const string &a, const string &b)
{ return shareCommonPrefix(a.c_str(), b.c_str()); }
@@ -129,7 +138,7 @@ namespace mongoutils {
inline unsigned toUnsigned(const string& a) {
unsigned x = 0;
const char *p = a.c_str();
- while( 1 ) {
+ while( 1 ) {
if( !isdigit(*p) )
break;
x = x * 10 + (*p - '0');
@@ -138,7 +147,7 @@ namespace mongoutils {
return x;
}
- /** split a string on a specific char. We don't split N times, just once
+ /** split a string on a specific char. We don't split N times, just once
on the first occurrence. If char not present entire string is in L
and R is empty.
@return true if char found
@@ -146,7 +155,7 @@ namespace mongoutils {
inline bool splitOn(const string &s, char c, string& L, string& R) {
const char *start = s.c_str();
const char *p = strchr(start, c);
- if( p == 0 ) {
+ if( p == 0 ) {
L = s; R.clear();
return false;
}
@@ -158,7 +167,7 @@ namespace mongoutils {
inline bool rSplitOn(const string &s, char c, string& L, string& R) {
const char *start = s.c_str();
const char *p = strrchr(start, c);
- if( p == 0 ) {
+ if( p == 0 ) {
L = s; R.clear();
return false;
}
@@ -168,7 +177,7 @@ namespace mongoutils {
}
/** @return number of occurrences of c in s */
- inline unsigned count( const string& s , char c ){
+ inline unsigned count( const string& s , char c ) {
unsigned n=0;
for ( unsigned i=0; i<s.size(); i++ )
if ( s[i] == c )
@@ -177,18 +186,18 @@ namespace mongoutils {
}
/** trim leading spaces. spaces only, not tabs etc. */
- inline string ltrim(const string& s) {
- const char *p = s.c_str();
- while( *p == ' ' ) p++;
- return p;
- }
-
+ inline string ltrim(const string& s) {
+ const char *p = s.c_str();
+ while( *p == ' ' ) p++;
+ return p;
+ }
+
/** remove trailing chars in place */
- inline void stripTrailing(string& s, const char *chars) {
+ inline void stripTrailing(string& s, const char *chars) {
string::iterator i = s.end();
- while( s.begin() != i ) {
+ while( s.begin() != i ) {
i--;
- if( contains(chars, *i) ) {
+ if( contains(chars, *i) ) {
s.erase(i);
}
}
diff --git a/util/mongoutils/test.cpp b/util/mongoutils/test.cpp
index 442691a5dce..d8ee46173b2 100755..100644
--- a/util/mongoutils/test.cpp
+++ b/util/mongoutils/test.cpp
@@ -1,45 +1,45 @@
-/* @file test.cpp
- utils/mongoutils/test.cpp
- unit tests for mongoutils
-*/
-
-/*
- * Copyright 2010 10gen Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "str.h"
-#include "html.h"
-#include <assert.h>
-
-using namespace std;
-using namespace mongoutils;
-
-int main() {
- {
- string s = "abcde";
- str::stripTrailing(s, "ef");
- assert( s == "abcd" );
- str::stripTrailing(s, "abcd");
- assert( s.empty() );
- s = "abcddd";
- str::stripTrailing(s, "d");
- assert( s == "abc" );
- }
-
- string x = str::after("abcde", 'c');
- assert( x == "de" );
- assert( str::after("abcde", 'x') == "" );
- return 0;
-}
+/* @file test.cpp
+ utils/mongoutils/test.cpp
+ unit tests for mongoutils
+*/
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "str.h"
+#include "html.h"
+#include <assert.h>
+
+using namespace std;
+using namespace mongoutils;
+
+int main() {
+ {
+ string s = "abcde";
+ str::stripTrailing(s, "ef");
+ assert( s == "abcd" );
+ str::stripTrailing(s, "abcd");
+ assert( s.empty() );
+ s = "abcddd";
+ str::stripTrailing(s, "d");
+ assert( s == "abc" );
+ }
+
+ string x = str::after("abcde", 'c');
+ assert( x == "de" );
+ assert( str::after("abcde", 'x') == "" );
+ return 0;
+}
diff --git a/util/moveablebuffer.h b/util/moveablebuffer.h
index 375e26b9f22..e01f2d8d9a4 100644
--- a/util/moveablebuffer.h
+++ b/util/moveablebuffer.h
@@ -21,9 +21,9 @@
namespace mongo {
/** this is a sort of smart pointer class where we can move where something is and all the pointers will adjust.
- not threadsafe.
+ not threadsafe.
*/
- struct MoveableBuffer {
+ struct MoveableBuffer {
MoveableBuffer();
MoveableBuffer(void *);
MoveableBuffer& operator=(const MoveableBuffer&);
@@ -40,12 +40,12 @@ namespace mongo {
inline MoveableBuffer::MoveableBuffer(void *_p) : p(_p) { }
- inline MoveableBuffer& MoveableBuffer::operator=(const MoveableBuffer& r) {
+ inline MoveableBuffer& MoveableBuffer::operator=(const MoveableBuffer& r) {
p = r.p;
return *this;
}
- inline MoveableBuffer::~MoveableBuffer() {
+ inline MoveableBuffer::~MoveableBuffer() {
}
}
diff --git a/util/ntservice.cpp b/util/ntservice.cpp
index eb2991099e3..ccf2981b071 100644
--- a/util/ntservice.cpp
+++ b/util/ntservice.cpp
@@ -25,13 +25,13 @@
namespace mongo {
- void shutdownServer();
+ void shutdownServer();
- SERVICE_STATUS_HANDLE ServiceController::_statusHandle = NULL;
- std::wstring ServiceController::_serviceName;
- ServiceCallback ServiceController::_serviceCallback = NULL;
+ SERVICE_STATUS_HANDLE ServiceController::_statusHandle = NULL;
+ std::wstring ServiceController::_serviceName;
+ ServiceCallback ServiceController::_serviceCallback = NULL;
- ServiceController::ServiceController() {}
+ ServiceController::ServiceController() {}
bool initService();
@@ -49,14 +49,14 @@ namespace mongo {
std::wstring windowsServicePassword = L"";
if (params.count("install")) {
- if ( ! params.count( "logpath" ) ){
+ if ( ! params.count( "logpath" ) ) {
cerr << "--install has to be used with --logpath" << endl;
::exit(-1);
}
installService = true;
}
if (params.count("reinstall")) {
- if ( ! params.count( "logpath" ) ){
+ if ( ! params.count( "logpath" ) ) {
cerr << "--reinstall has to be used with --logpath" << endl;
::exit(-1);
}
@@ -69,46 +69,46 @@ namespace mongo {
startService = true;
}
- if (params.count("serviceName")){
+ if (params.count("serviceName")) {
string x = params["serviceName"].as<string>();
windowsServiceName = wstring(x.size(),L' ');
for ( size_t i=0; i<x.size(); i++) {
windowsServiceName[i] = x[i];
- }
+ }
}
- if (params.count("serviceDisplayName")){
+ if (params.count("serviceDisplayName")) {
string x = params["serviceDisplayName"].as<string>();
windowsServiceDisplayName = wstring(x.size(),L' ');
for ( size_t i=0; i<x.size(); i++) {
windowsServiceDisplayName[i] = x[i];
- }
+ }
}
- if (params.count("serviceDescription")){
+ if (params.count("serviceDescription")) {
string x = params["serviceDescription"].as<string>();
windowsServiceDescription = wstring(x.size(),L' ');
for ( size_t i=0; i<x.size(); i++) {
windowsServiceDescription[i] = x[i];
- }
+ }
}
- if (params.count("serviceUser")){
+ if (params.count("serviceUser")) {
string x = params["serviceUser"].as<string>();
windowsServiceUser = wstring(x.size(),L' ');
for ( size_t i=0; i<x.size(); i++) {
windowsServiceUser[i] = x[i];
- }
+ }
}
- if (params.count("servicePassword")){
+ if (params.count("servicePassword")) {
string x = params["servicePassword"].as<string>();
windowsServicePassword = wstring(x.size(),L' ');
for ( size_t i=0; i<x.size(); i++) {
windowsServicePassword[i] = x[i];
- }
+ }
}
if ( reinstallService ) {
ServiceController::removeService( windowsServiceName );
- }
- if ( installService || reinstallService ) {
+ }
+ if ( installService || reinstallService ) {
if ( !ServiceController::installService( windowsServiceName , windowsServiceDisplayName, windowsServiceDescription, windowsServiceUser, windowsServicePassword, dbpath, argc, argv ) )
dbexit( EXIT_NTSERVICE_ERROR );
dbexit( EXIT_CLEAN );
@@ -121,11 +121,11 @@ namespace mongo {
else if ( startService ) {
if ( !ServiceController::startService( windowsServiceName , mongo::initService ) )
dbexit( EXIT_NTSERVICE_ERROR );
- return true;
+ return true;
}
- return false;
+ return false;
}
-
+
bool ServiceController::installService( const std::wstring& serviceName, const std::wstring& displayName, const std::wstring& serviceDesc, const std::wstring& serviceUser, const std::wstring& servicePassword, const std::string dbpath, int argc, char* argv[] ) {
assert(argc >= 1);
@@ -133,26 +133,30 @@ namespace mongo {
if ( strchr(argv[0], ':') ) { // a crude test for fully qualified path
commandLine << '"' << argv[0] << "\" ";
- } else {
+ }
+ else {
char buffer[256];
assert( _getcwd(buffer, 256) );
commandLine << '"' << buffer << '\\' << argv[0] << "\" ";
}
-
+
for ( int i = 1; i < argc; i++ ) {
std::string arg( argv[ i ] );
// replace install command to indicate process is being started as a service
if ( arg == "--install" || arg == "--reinstall" ) {
arg = "--service";
- } else if ( arg == "--dbpath" && i + 1 < argc ) {
+ }
+ else if ( arg == "--dbpath" && i + 1 < argc ) {
commandLine << arg << " \"" << dbpath << "\" ";
i++;
continue;
- } else if ( arg == "--logpath" && i + 1 < argc ) {
+ }
+ else if ( arg == "--logpath" && i + 1 < argc ) {
commandLine << arg << " \"" << argv[i+1] << "\" ";
i++;
continue;
- } else if ( arg.length() > 9 && arg.substr(0, 9) == "--service" ) {
+ }
+ else if ( arg.length() > 9 && arg.substr(0, 9) == "--service" ) {
// Strip off --service(Name|User|Password) arguments
i++;
continue;
@@ -167,25 +171,25 @@ namespace mongo {
return false;
}
- // Make sure servise doesn't already exist.
- // TODO: Check to see if service is in "Deleting" status, suggest the user close down Services MMC snap-ins.
- SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
- if ( schService != NULL ) {
- cerr << "There is already a service named " << toUtf8String(serviceName) << ". Aborting" << endl;
- ::CloseServiceHandle( schService );
- ::CloseServiceHandle( schSCManager );
- return false;
- }
- std::basic_ostringstream< TCHAR > commandLineWide;
- commandLineWide << commandLine.str().c_str();
-
- cerr << "Creating service " << toUtf8String(serviceName) << "." << endl;
-
- // create new service
- schService = ::CreateService( schSCManager, serviceName.c_str(), displayName.c_str(),
- SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
- SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
- commandLineWide.str().c_str(), NULL, NULL, L"\0\0", NULL, NULL );
+ // Make sure servise doesn't already exist.
+ // TODO: Check to see if service is in "Deleting" status, suggest the user close down Services MMC snap-ins.
+ SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
+ if ( schService != NULL ) {
+ cerr << "There is already a service named " << toUtf8String(serviceName) << ". Aborting" << endl;
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+ return false;
+ }
+ std::basic_ostringstream< TCHAR > commandLineWide;
+ commandLineWide << commandLine.str().c_str();
+
+ cerr << "Creating service " << toUtf8String(serviceName) << "." << endl;
+
+ // create new service
+ schService = ::CreateService( schSCManager, serviceName.c_str(), displayName.c_str(),
+ SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
+ SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
+ commandLineWide.str().c_str(), NULL, NULL, L"\0\0", NULL, NULL );
if ( schService == NULL ) {
DWORD err = ::GetLastError();
cerr << "Error creating service: " << GetWinErrMsg(err) << endl;
@@ -193,56 +197,56 @@ namespace mongo {
return false;
}
- cerr << "Service creation successful." << endl;
- cerr << "Service can be started from the command line via 'net start \"" << toUtf8String(serviceName) << "\"'." << endl;
-
- bool serviceInstalled;
-
- // TODO: If neccessary grant user "Login as a Service" permission.
- if ( !serviceUser.empty() ) {
- std::wstring actualServiceUser;
- if ( serviceUser.find(L"\\") == string::npos ) {
- actualServiceUser = L".\\" + serviceUser;
- }
- else {
- actualServiceUser = serviceUser;
- }
-
- cerr << "Setting service login credentials. User: " << toUtf8String(actualServiceUser) << endl;
- serviceInstalled = ::ChangeServiceConfig( schService, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, NULL, NULL, NULL, NULL, actualServiceUser.c_str(), servicePassword.c_str(), NULL );
- if ( !serviceInstalled ) {
- cerr << "Setting service login failed. Service has 'LocalService' permissions." << endl;
- }
- }
-
- // set the service description
- SERVICE_DESCRIPTION serviceDescription;
- serviceDescription.lpDescription = (LPTSTR)serviceDesc.c_str();
- serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_DESCRIPTION, &serviceDescription );
-
-
- if ( serviceInstalled ) {
- SC_ACTION aActions[ 3 ] = { { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 } };
-
- SERVICE_FAILURE_ACTIONS serviceFailure;
- ZeroMemory( &serviceFailure, sizeof( SERVICE_FAILURE_ACTIONS ) );
- serviceFailure.cActions = 3;
- serviceFailure.lpsaActions = aActions;
-
- // set service recovery options
- serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailure );
-
- }
- else {
- cerr << "Could not set service description. Check the event log for more details." << endl;
- }
-
- ::CloseServiceHandle( schService );
- ::CloseServiceHandle( schSCManager );
-
- return serviceInstalled;
+ cerr << "Service creation successful." << endl;
+ cerr << "Service can be started from the command line via 'net start \"" << toUtf8String(serviceName) << "\"'." << endl;
+
+ bool serviceInstalled;
+
+ // TODO: If neccessary grant user "Login as a Service" permission.
+ if ( !serviceUser.empty() ) {
+ std::wstring actualServiceUser;
+ if ( serviceUser.find(L"\\") == string::npos ) {
+ actualServiceUser = L".\\" + serviceUser;
+ }
+ else {
+ actualServiceUser = serviceUser;
+ }
+
+ cerr << "Setting service login credentials. User: " << toUtf8String(actualServiceUser) << endl;
+ serviceInstalled = ::ChangeServiceConfig( schService, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, NULL, NULL, NULL, NULL, actualServiceUser.c_str(), servicePassword.c_str(), NULL );
+ if ( !serviceInstalled ) {
+ cerr << "Setting service login failed. Service has 'LocalService' permissions." << endl;
+ }
+ }
+
+ // set the service description
+ SERVICE_DESCRIPTION serviceDescription;
+ serviceDescription.lpDescription = (LPTSTR)serviceDesc.c_str();
+ serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_DESCRIPTION, &serviceDescription );
+
+
+ if ( serviceInstalled ) {
+ SC_ACTION aActions[ 3 ] = { { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 } };
+
+ SERVICE_FAILURE_ACTIONS serviceFailure;
+ ZeroMemory( &serviceFailure, sizeof( SERVICE_FAILURE_ACTIONS ) );
+ serviceFailure.cActions = 3;
+ serviceFailure.lpsaActions = aActions;
+
+ // set service recovery options
+ serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailure );
+
+ }
+ else {
+ cerr << "Could not set service description. Check the event log for more details." << endl;
+ }
+
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+
+ return serviceInstalled;
}
-
+
bool ServiceController::removeService( const std::wstring& serviceName ) {
SC_HANDLE schSCManager = ::OpenSCManager( NULL, NULL, SC_MANAGER_ALL_ACCESS );
if ( schSCManager == NULL ) {
@@ -251,97 +255,96 @@ namespace mongo {
return false;
}
- SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
- if ( schService == NULL ) {
- cerr << "Could not find a service named " << toUtf8String(serviceName) << " to uninstall." << endl;
- ::CloseServiceHandle( schSCManager );
- return false;
- }
-
- SERVICE_STATUS serviceStatus;
-
- // stop service if its running
- if ( ::ControlService( schService, SERVICE_CONTROL_STOP, &serviceStatus ) ) {
- cerr << "Service " << toUtf8String(serviceName) << " is currently running. Stopping service." << endl;
- while ( ::QueryServiceStatus( schService, &serviceStatus ) ) {
- if ( serviceStatus.dwCurrentState == SERVICE_STOP_PENDING )
- {
- Sleep( 1000 );
- }
- else { break; }
- }
- cerr << "Service stopped." << endl;
- }
-
- cerr << "Deleting service " << toUtf8String(serviceName) << "." << endl;
- bool serviceRemoved = ::DeleteService( schService );
-
- ::CloseServiceHandle( schService );
- ::CloseServiceHandle( schSCManager );
-
- if (serviceRemoved) {
- cerr << "Service deleted successfully." << endl;
- }
- else {
- cerr << "Failed to delete service." << endl;
- }
-
- return serviceRemoved;
+ SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
+ if ( schService == NULL ) {
+ cerr << "Could not find a service named " << toUtf8String(serviceName) << " to uninstall." << endl;
+ ::CloseServiceHandle( schSCManager );
+ return false;
+ }
+
+ SERVICE_STATUS serviceStatus;
+
+ // stop service if its running
+ if ( ::ControlService( schService, SERVICE_CONTROL_STOP, &serviceStatus ) ) {
+ cerr << "Service " << toUtf8String(serviceName) << " is currently running. Stopping service." << endl;
+ while ( ::QueryServiceStatus( schService, &serviceStatus ) ) {
+ if ( serviceStatus.dwCurrentState == SERVICE_STOP_PENDING ) {
+ Sleep( 1000 );
+ }
+ else { break; }
+ }
+ cerr << "Service stopped." << endl;
+ }
+
+ cerr << "Deleting service " << toUtf8String(serviceName) << "." << endl;
+ bool serviceRemoved = ::DeleteService( schService );
+
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+
+ if (serviceRemoved) {
+ cerr << "Service deleted successfully." << endl;
+ }
+ else {
+ cerr << "Failed to delete service." << endl;
+ }
+
+ return serviceRemoved;
}
-
+
bool ServiceController::startService( const std::wstring& serviceName, ServiceCallback startService ) {
_serviceName = serviceName;
- _serviceCallback = startService;
-
+ _serviceCallback = startService;
+
SERVICE_TABLE_ENTRY dispTable[] = {
- { (LPTSTR)serviceName.c_str(), (LPSERVICE_MAIN_FUNCTION)ServiceController::initService },
- { NULL, NULL }
- };
+ { (LPTSTR)serviceName.c_str(), (LPSERVICE_MAIN_FUNCTION)ServiceController::initService },
+ { NULL, NULL }
+ };
- return StartServiceCtrlDispatcher( dispTable );
+ return StartServiceCtrlDispatcher( dispTable );
}
-
+
bool ServiceController::reportStatus( DWORD reportState, DWORD waitHint ) {
- if ( _statusHandle == NULL )
- return false;
-
- static DWORD checkPoint = 1;
-
- SERVICE_STATUS ssStatus;
-
- ssStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
- ssStatus.dwServiceSpecificExitCode = 0;
- ssStatus.dwControlsAccepted = reportState == SERVICE_START_PENDING ? 0 : SERVICE_ACCEPT_STOP;
- ssStatus.dwCurrentState = reportState;
- ssStatus.dwWin32ExitCode = NO_ERROR;
- ssStatus.dwWaitHint = waitHint;
- ssStatus.dwCheckPoint = ( reportState == SERVICE_RUNNING || reportState == SERVICE_STOPPED ) ? 0 : checkPoint++;
-
- return SetServiceStatus( _statusHandle, &ssStatus );
- }
-
+ if ( _statusHandle == NULL )
+ return false;
+
+ static DWORD checkPoint = 1;
+
+ SERVICE_STATUS ssStatus;
+
+ ssStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ ssStatus.dwServiceSpecificExitCode = 0;
+ ssStatus.dwControlsAccepted = reportState == SERVICE_START_PENDING ? 0 : SERVICE_ACCEPT_STOP;
+ ssStatus.dwCurrentState = reportState;
+ ssStatus.dwWin32ExitCode = NO_ERROR;
+ ssStatus.dwWaitHint = waitHint;
+ ssStatus.dwCheckPoint = ( reportState == SERVICE_RUNNING || reportState == SERVICE_STOPPED ) ? 0 : checkPoint++;
+
+ return SetServiceStatus( _statusHandle, &ssStatus );
+ }
+
void WINAPI ServiceController::initService( DWORD argc, LPTSTR *argv ) {
- _statusHandle = RegisterServiceCtrlHandler( _serviceName.c_str(), serviceCtrl );
- if ( !_statusHandle )
- return;
-
- reportStatus( SERVICE_START_PENDING, 1000 );
-
- _serviceCallback();
- dbexit( EXIT_CLEAN );
-
- reportStatus( SERVICE_STOPPED );
- }
-
- void WINAPI ServiceController::serviceCtrl( DWORD ctrlCode ) {
- switch ( ctrlCode ) {
- case SERVICE_CONTROL_STOP:
- case SERVICE_CONTROL_SHUTDOWN:
- shutdownServer();
- reportStatus( SERVICE_STOPPED );
- return;
- }
- }
+ _statusHandle = RegisterServiceCtrlHandler( _serviceName.c_str(), serviceCtrl );
+ if ( !_statusHandle )
+ return;
+
+ reportStatus( SERVICE_START_PENDING, 1000 );
+
+ _serviceCallback();
+ dbexit( EXIT_CLEAN );
+
+ reportStatus( SERVICE_STOPPED );
+ }
+
+ void WINAPI ServiceController::serviceCtrl( DWORD ctrlCode ) {
+ switch ( ctrlCode ) {
+ case SERVICE_CONTROL_STOP:
+ case SERVICE_CONTROL_SHUTDOWN:
+ shutdownServer();
+ reportStatus( SERVICE_STOPPED );
+ return;
+ }
+ }
} // namespace mongo
diff --git a/util/ntservice.h b/util/ntservice.h
index 34918fee5c0..4958d0395aa 100644
--- a/util/ntservice.h
+++ b/util/ntservice.h
@@ -22,26 +22,26 @@
namespace mongo {
- typedef bool ( *ServiceCallback )( void );
+ typedef bool ( *ServiceCallback )( void );
bool serviceParamsCheck( program_options::variables_map& params, const std::string dbpath, int argc, char* argv[] );
class ServiceController {
public:
ServiceController();
virtual ~ServiceController() {}
-
+
static bool installService( const std::wstring& serviceName, const std::wstring& displayName, const std::wstring& serviceDesc, const std::wstring& serviceUser, const std::wstring& servicePassword, const std::string dbpath, int argc, char* argv[] );
static bool removeService( const std::wstring& serviceName );
static bool startService( const std::wstring& serviceName, ServiceCallback startService );
static bool reportStatus( DWORD reportState, DWORD waitHint = 0 );
-
+
static void WINAPI initService( DWORD argc, LPTSTR *argv );
- static void WINAPI serviceCtrl( DWORD ctrlCode );
-
+ static void WINAPI serviceCtrl( DWORD ctrlCode );
+
protected:
- static std::wstring _serviceName;
- static SERVICE_STATUS_HANDLE _statusHandle;
- static ServiceCallback _serviceCallback;
+ static std::wstring _serviceName;
+ static SERVICE_STATUS_HANDLE _statusHandle;
+ static ServiceCallback _serviceCallback;
};
} // namespace mongo
diff --git a/util/optime.h b/util/optime.h
index 4e4340c34bf..6eb55abb86c 100644
--- a/util/optime.h
+++ b/util/optime.h
@@ -21,9 +21,9 @@
namespace mongo {
void exitCleanly( ExitCode code );
-
+
struct ClockSkewException : public DBException {
- ClockSkewException() : DBException( "clock skew exception" , 20001 ){}
+ ClockSkewException() : DBException( "clock skew exception" , 20001 ) {}
};
/* replsets use RSOpTime.
@@ -62,7 +62,7 @@ namespace mongo {
}
static OpTime now() {
unsigned t = (unsigned) time(0);
- if ( t < last.secs ){
+ if ( t < last.secs ) {
bool toLog = false;
ONCE toLog = true;
RARELY toLog = true;
@@ -81,13 +81,13 @@ namespace mongo {
return last;
}
last = OpTime(t, 1);
- return last;
+ return last;
}
-
+
/* We store OpTime's in the database as BSON Date datatype -- we needed some sort of
64 bit "container" for these values. While these are not really "Dates", that seems a
better choice for now than say, Number, which is floating point. Note the BinData type
- is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has 5
+ is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has 5
bytes of overhead.
*/
unsigned long long asDate() const {
@@ -96,9 +96,9 @@ namespace mongo {
long long asLL() const {
return reinterpret_cast<const long long*>(&i)[0];
}
-
+
bool isNull() const { return secs == 0; }
-
+
string toStringLong() const {
char buf[64];
time_t_to_String(secs, buf);
@@ -107,13 +107,13 @@ namespace mongo {
ss << hex << secs << ':' << i;
return ss.str();
}
-
+
string toStringPretty() const {
stringstream ss;
ss << time_t_to_String_short(secs) << ':' << hex << i;
return ss.str();
}
-
+
string toString() const {
stringstream ss;
ss << hex << secs << ':' << i;
@@ -131,10 +131,10 @@ namespace mongo {
return secs < r.secs;
return i < r.i;
}
- bool operator<=(const OpTime& r) const {
+ bool operator<=(const OpTime& r) const {
return *this < r || *this == r;
}
- bool operator>(const OpTime& r) const {
+ bool operator>(const OpTime& r) const {
return !(*this <= r);
}
bool operator>=(const OpTime& r) const {
@@ -142,5 +142,5 @@ namespace mongo {
}
};
#pragma pack()
-
+
} // namespace mongo
diff --git a/util/password.h b/util/password.h
index 18294b2bd0b..519f712ee7e 100644
--- a/util/password.h
+++ b/util/password.h
@@ -39,7 +39,7 @@ namespace mongo {
return false;
}
- void xparse( boost::any& value_store,
+ void xparse( boost::any& value_store,
const std::vector<std::string>& new_tokens ) const {
if ( !value_store.empty() )
#if BOOST_VERSION >= 104200
@@ -49,7 +49,7 @@ namespace mongo {
#endif
else if ( !new_tokens.empty() )
boost::program_options::typed_value<std::string>::xparse
- (value_store, new_tokens);
+ (value_store, new_tokens);
else
value_store = std::string();
}
diff --git a/util/paths.h b/util/paths.h
index 322795c2f51..ce0a3785090 100644
--- a/util/paths.h
+++ b/util/paths.h
@@ -24,37 +24,37 @@ using namespace mongoutils;
namespace mongo {
- extern string dbpath;
+ extern string dbpath;
- /** this is very much like a boost::path. however, we define a new type to get some type
+ /** this is very much like a boost::path. however, we define a new type to get some type
checking. if you want to say 'my param MUST be a relative path", use this.
*/
- struct RelativePath {
+ struct RelativePath {
string _p;
bool empty() const { return _p.empty(); }
- static RelativePath fromRelativePath(string f) {
+ static RelativePath fromRelativePath(string f) {
RelativePath rp;
rp._p = f;
return rp;
}
/** from a full path */
- static RelativePath fromFullPath(path f) {
+ static RelativePath fromFullPath(path f) {
path dbp(dbpath); // normalizes / and backslash
string fullpath = f.string();
string relative = str::after(fullpath, dbp.string());
- if( relative.empty() ) {
+ if( relative.empty() ) {
log() << "warning file is not under db path? " << fullpath << ' ' << dbp.string() << endl;
RelativePath rp;
rp._p = fullpath;
return rp;
}
- /*uassert(13600,
- str::stream() << "file path is not under the db path? " << fullpath << ' ' << dbpath,
+ /*uassert(13600,
+ str::stream() << "file path is not under the db path? " << fullpath << ' ' << dbpath,
relative != fullpath);*/
- if( str::startsWith(relative, "/") || str::startsWith(relative, "\\") ) {
+ if( str::startsWith(relative, "/") || str::startsWith(relative, "\\") ) {
relative.erase(0, 1);
}
RelativePath rp;
@@ -68,11 +68,11 @@ namespace mongo {
bool operator==(const RelativePath& r) const { return _p == r._p; }
bool operator<(const RelativePath& r) const { return _p < r._p; }
- string asFullPath() const {
+ string asFullPath() const {
path x(dbpath);
x /= _p;
return x.string();
- }
+ }
};
diff --git a/util/processinfo.cpp b/util/processinfo.cpp
index 3257b5edadf..d6ba6baf1f7 100644
--- a/util/processinfo.cpp
+++ b/util/processinfo.cpp
@@ -22,26 +22,26 @@
using namespace std;
namespace mongo {
-
+
class PidFileWiper {
public:
- ~PidFileWiper(){
+ ~PidFileWiper() {
ofstream out( path.c_str() , ios_base::out );
- out.close();
+ out.close();
}
-
- void write( const string& p ){
+
+ void write( const string& p ) {
path = p;
ofstream out( path.c_str() , ios_base::out );
out << getpid() << endl;
out.close();
}
-
+
string path;
} pidFileWiper;
-
- void writePidFile( const string& path ){
+
+ void writePidFile( const string& path ) {
pidFileWiper.write( path );
- }
+ }
}
diff --git a/util/processinfo.h b/util/processinfo.h
index 8e20bebe856..52b7fa3a8e6 100644
--- a/util/processinfo.h
+++ b/util/processinfo.h
@@ -30,12 +30,12 @@ int getpid();
namespace mongo {
class BSONObjBuilder;
-
+
class ProcessInfo {
public:
ProcessInfo( pid_t pid = getpid() );
~ProcessInfo();
-
+
/**
* @return mbytes
*/
@@ -50,7 +50,7 @@ namespace mongo {
* Append platform-specific data to obj
*/
void getExtraInfo(BSONObjBuilder& info);
-
+
bool supported();
bool blockCheckSupported();
@@ -59,7 +59,7 @@ namespace mongo {
private:
pid_t _pid;
};
-
+
void writePidFile( const std::string& path );
-
+
}
diff --git a/util/processinfo_darwin.cpp b/util/processinfo_darwin.cpp
index cb54bed1a42..c1190aec438 100644
--- a/util/processinfo_darwin.cpp
+++ b/util/processinfo_darwin.cpp
@@ -36,58 +36,58 @@
using namespace std;
namespace mongo {
-
- ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ){
+
+ ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ) {
}
- ProcessInfo::~ProcessInfo(){
+ ProcessInfo::~ProcessInfo() {
}
- bool ProcessInfo::supported(){
+ bool ProcessInfo::supported() {
return true;
}
-
- int ProcessInfo::getVirtualMemorySize(){
+
+ int ProcessInfo::getVirtualMemorySize() {
task_t result;
-
+
mach_port_t task;
-
- if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ){
+
+ if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ) {
cout << "error getting task\n";
return 0;
}
-
+
#if !defined(__LP64__)
task_basic_info_32 ti;
#else
task_basic_info_64 ti;
#endif
mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
- if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ){
+ if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ) {
cout << "error getting task_info: " << result << endl;
return 0;
}
return (int)((double)ti.virtual_size / (1024.0 * 1024 ) );
}
-
- int ProcessInfo::getResidentSize(){
+
+ int ProcessInfo::getResidentSize() {
task_t result;
-
+
mach_port_t task;
-
- if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ){
+
+ if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ) {
cout << "error getting task\n";
return 0;
}
-
-
+
+
#if !defined(__LP64__)
task_basic_info_32 ti;
#else
task_basic_info_64 ti;
#endif
mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
- if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ){
+ if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ) {
cout << "error getting task_info: " << result << endl;
return 0;
}
@@ -96,18 +96,18 @@ namespace mongo {
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
- bool ProcessInfo::blockCheckSupported(){
+ bool ProcessInfo::blockCheckSupported() {
return true;
}
-
- bool ProcessInfo::blockInMemory( char * start ){
+
+ bool ProcessInfo::blockInMemory( char * start ) {
static long pageSize = 0;
- if ( pageSize == 0 ){
+ if ( pageSize == 0 ) {
pageSize = sysconf( _SC_PAGESIZE );
}
start = start - ( (unsigned long long)start % pageSize );
char x = 0;
- if ( mincore( start , 128 , &x ) ){
+ if ( mincore( start , 128 , &x ) ) {
log() << "mincore failed: " << errnoWithDescription() << endl;
return 1;
}
diff --git a/util/processinfo_linux2.cpp b/util/processinfo_linux2.cpp
index adea1912909..e82e2d154aa 100644
--- a/util/processinfo_linux2.cpp
+++ b/util/processinfo_linux2.cpp
@@ -30,15 +30,15 @@ using namespace std;
#define KLF "l"
namespace mongo {
-
+
class LinuxProc {
public:
- LinuxProc( pid_t pid = getpid() ){
+ LinuxProc( pid_t pid = getpid() ) {
char name[128];
sprintf( name , "/proc/%d/stat" , pid );
-
+
FILE * f = fopen( name , "r");
- if ( ! f ){
+ if ( ! f ) {
stringstream ss;
ss << "couldn't open [" << name << "] " << errnoWithDescription();
string s = ss.str();
@@ -46,173 +46,173 @@ namespace mongo {
msgassertedNoTrace( 13538 , s.c_str() );
}
int found = fscanf(f,
- "%d %s %c "
- "%d %d %d %d %d "
- "%lu %lu %lu %lu %lu "
- "%lu %lu %ld %ld " /* utime stime cutime cstime */
- "%ld %ld "
- "%ld "
- "%ld "
- "%lu " /* start_time */
- "%lu "
- "%ld " // rss
- "%lu %"KLF"u %"KLF"u %"KLF"u %"KLF"u %"KLF"u "
- /*
- "%*s %*s %*s %*s "
- "%"KLF"u %*lu %*lu "
- "%d %d "
- "%lu %lu"
- */
-
- ,
-
- &_pid,
- _comm,
- &_state,
- &_ppid, &_pgrp, &_session, &_tty, &_tpgid,
- &_flags, &_min_flt, &_cmin_flt, &_maj_flt, &_cmaj_flt,
- &_utime, &_stime, &_cutime, &_cstime,
- &_priority, &_nice,
- &_alarm,
- &_nlwp,
- &_start_time,
- &_vsize,
- &_rss,
- &_rss_rlim, &_start_code, &_end_code, &_start_stack, &_kstk_esp, &_kstk_eip
-
- /*
- &_wchan,
- &_exit_signal, &_processor,
- &_rtprio, &_sched
- */
- );
- if ( found == 0 ){
+ "%d %s %c "
+ "%d %d %d %d %d "
+ "%lu %lu %lu %lu %lu "
+ "%lu %lu %ld %ld " /* utime stime cutime cstime */
+ "%ld %ld "
+ "%ld "
+ "%ld "
+ "%lu " /* start_time */
+ "%lu "
+ "%ld " // rss
+ "%lu %"KLF"u %"KLF"u %"KLF"u %"KLF"u %"KLF"u "
+ /*
+ "%*s %*s %*s %*s "
+ "%"KLF"u %*lu %*lu "
+ "%d %d "
+ "%lu %lu"
+ */
+
+ ,
+
+ &_pid,
+ _comm,
+ &_state,
+ &_ppid, &_pgrp, &_session, &_tty, &_tpgid,
+ &_flags, &_min_flt, &_cmin_flt, &_maj_flt, &_cmaj_flt,
+ &_utime, &_stime, &_cutime, &_cstime,
+ &_priority, &_nice,
+ &_alarm,
+ &_nlwp,
+ &_start_time,
+ &_vsize,
+ &_rss,
+ &_rss_rlim, &_start_code, &_end_code, &_start_stack, &_kstk_esp, &_kstk_eip
+
+ /*
+ &_wchan,
+ &_exit_signal, &_processor,
+ &_rtprio, &_sched
+ */
+ );
+ if ( found == 0 ) {
cout << "system error: reading proc info" << endl;
}
fclose( f );
}
-
- unsigned long getVirtualMemorySize(){
+
+ unsigned long getVirtualMemorySize() {
return _vsize;
}
-
- unsigned long getResidentSize(){
+
+ unsigned long getResidentSize() {
return (unsigned long)_rss * 4 * 1024;
}
-
- int _pid;
+
+ int _pid;
// The process ID.
-
- char _comm[128];
+
+ char _comm[128];
// The filename of the executable, in parentheses. This is visible whether or not the executable is swapped out.
-
+
char _state;
//One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible
// disk sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
-
+
int _ppid;
// The PID of the parent.
-
+
int _pgrp;
// The process group ID of the process.
-
+
int _session;
// The session ID of the process.
-
+
int _tty;
// The tty the process uses.
-
+
int _tpgid;
// The process group ID of the process which currently owns the tty that the process is connected to.
-
+
unsigned long _flags; // %lu
// The kernel flags word of the process. For bit meanings, see the PF_* defines in <linux/sched.h>. Details depend on the kernel version.
-
+
unsigned long _min_flt; // %lu
// The number of minor faults the process has made which have not required loading a memory page from disk.
-
+
unsigned long _cmin_flt; // %lu
// The number of minor faults that the process
-
+
unsigned long _maj_flt; // %lu
// The number of major faults the process has made which have required loading a memory page from disk.
-
+
unsigned long _cmaj_flt; // %lu
// The number of major faults that the process
-
+
unsigned long _utime; // %lu
// The number of jiffies that this process has been scheduled in user mode.
-
+
unsigned long _stime; // %lu
// The number of jiffies that this process has been scheduled in kernel mode.
-
+
long _cutime; // %ld
// The number of jiffies that this removed field.
-
+
long _cstime; // %ld
-
+
long _priority;
long _nice;
-
+
long _nlwp; // %ld
// The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
-
+
unsigned long _alarm;
-
+
unsigned long _start_time; // %lu
// The time in jiffies the process started after system boot.
-
+
unsigned long _vsize; // %lu
// Virtual memory size in bytes.
-
+
long _rss; // %ld
// Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which
// count towards text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out
-
+
unsigned long _rss_rlim; // %lu
// Current limit in bytes on the rss of the process (usually 4294967295 on i386).
-
+
unsigned long _start_code; // %lu
// The address above which program text can run.
-
+
unsigned long _end_code; // %lu
// The address below which program text can run.
-
+
unsigned long _start_stack; // %lu
// The address of the start of the stack.
-
+
unsigned long _kstk_esp; // %lu
// The current value of esp (stack pointer), as found in the kernel stack page for the process.
-
+
unsigned long _kstk_eip; // %lu
// The current EIP (instruction pointer).
-
-
-
+
+
+
};
- ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ){
+ ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ) {
}
- ProcessInfo::~ProcessInfo(){
+ ProcessInfo::~ProcessInfo() {
}
- bool ProcessInfo::supported(){
+ bool ProcessInfo::supported() {
return true;
}
-
- int ProcessInfo::getVirtualMemorySize(){
+
+ int ProcessInfo::getVirtualMemorySize() {
LinuxProc p(_pid);
return (int)( p.getVirtualMemorySize() / ( 1024.0 * 1024 ) );
}
-
- int ProcessInfo::getResidentSize(){
+
+ int ProcessInfo::getResidentSize() {
LinuxProc p(_pid);
return (int)( p.getResidentSize() / ( 1024.0 * 1024 ) );
}
- void ProcessInfo::getExtraInfo(BSONObjBuilder& info){
+ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
struct mallinfo malloc_info = mallinfo(); // structure has same name as function that returns it. (see malloc.h)
info.append("heap_usage_bytes", malloc_info.uordblks);
@@ -220,18 +220,18 @@ namespace mongo {
info.append("page_faults", (int)p._maj_flt);
}
- bool ProcessInfo::blockCheckSupported(){
+ bool ProcessInfo::blockCheckSupported() {
return true;
}
-
- bool ProcessInfo::blockInMemory( char * start ){
+
+ bool ProcessInfo::blockInMemory( char * start ) {
static long pageSize = 0;
- if ( pageSize == 0 ){
+ if ( pageSize == 0 ) {
pageSize = sysconf( _SC_PAGESIZE );
}
start = start - ( (unsigned long long)start % pageSize );
unsigned char x = 0;
- if ( mincore( start , 128 , &x ) ){
+ if ( mincore( start , 128 , &x ) ) {
log() << "mincore failed: " << errnoWithDescription() << endl;
return 1;
}
diff --git a/util/processinfo_none.cpp b/util/processinfo_none.cpp
index b54cb13cdb4..7d1e84d377c 100644
--- a/util/processinfo_none.cpp
+++ b/util/processinfo_none.cpp
@@ -22,32 +22,32 @@
using namespace std;
namespace mongo {
-
- ProcessInfo::ProcessInfo( pid_t pid ){
+
+ ProcessInfo::ProcessInfo( pid_t pid ) {
}
- ProcessInfo::~ProcessInfo(){
+ ProcessInfo::~ProcessInfo() {
}
- bool ProcessInfo::supported(){
+ bool ProcessInfo::supported() {
return false;
}
-
- int ProcessInfo::getVirtualMemorySize(){
+
+ int ProcessInfo::getVirtualMemorySize() {
return -1;
}
-
- int ProcessInfo::getResidentSize(){
+
+ int ProcessInfo::getResidentSize() {
return -1;
}
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
-
- bool ProcessInfo::blockCheckSupported(){
+
+ bool ProcessInfo::blockCheckSupported() {
return false;
}
-
- bool ProcessInfo::blockInMemory( char * start ){
+
+ bool ProcessInfo::blockInMemory( char * start ) {
assert(0);
return true;
}
diff --git a/util/processinfo_win32.cpp b/util/processinfo_win32.cpp
index 5fc6ab51809..d62b21ba435 100644
--- a/util/processinfo_win32.cpp
+++ b/util/processinfo_win32.cpp
@@ -25,27 +25,27 @@
using namespace std;
-int getpid(){
+int getpid() {
return GetCurrentProcessId();
}
namespace mongo {
-
- int _wconvertmtos( SIZE_T s ){
+
+ int _wconvertmtos( SIZE_T s ) {
return (int)( s / ( 1024 * 1024 ) );
}
-
- ProcessInfo::ProcessInfo( pid_t pid ){
+
+ ProcessInfo::ProcessInfo( pid_t pid ) {
}
- ProcessInfo::~ProcessInfo(){
+ ProcessInfo::~ProcessInfo() {
}
- bool ProcessInfo::supported(){
+ bool ProcessInfo::supported() {
return true;
}
-
- int ProcessInfo::getVirtualMemorySize(){
+
+ int ProcessInfo::getVirtualMemorySize() {
MEMORYSTATUSEX mse;
mse.dwLength = sizeof(mse);
assert( GlobalMemoryStatusEx( &mse ) );
@@ -53,8 +53,8 @@ namespace mongo {
assert( x <= 0x7fffffff );
return (int) x;
}
-
- int ProcessInfo::getResidentSize(){
+
+ int ProcessInfo::getResidentSize() {
PROCESS_MEMORY_COUNTERS pmc;
assert( GetProcessMemoryInfo( GetCurrentProcess() , &pmc, sizeof(pmc) ) );
return _wconvertmtos( pmc.WorkingSetSize );
@@ -62,11 +62,11 @@ namespace mongo {
void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
- bool ProcessInfo::blockCheckSupported(){
+ bool ProcessInfo::blockCheckSupported() {
return false;
}
-
- bool ProcessInfo::blockInMemory( char * start ){
+
+ bool ProcessInfo::blockInMemory( char * start ) {
assert(0);
return true;
}
diff --git a/util/queue.h b/util/queue.h
index 28af09b934f..6a1e33a302f 100644
--- a/util/queue.h
+++ b/util/queue.h
@@ -24,7 +24,7 @@
#include "../util/timer.h"
namespace mongo {
-
+
/**
* simple blocking queue
*/
@@ -32,47 +32,47 @@ namespace mongo {
public:
BlockingQueue() : _lock("BlockingQueue") { }
- void push(T const& t){
+ void push(T const& t) {
scoped_lock l( _lock );
_queue.push( t );
_condition.notify_one();
}
-
+
bool empty() const {
scoped_lock l( _lock );
return _queue.empty();
}
-
- bool tryPop( T & t ){
+
+ bool tryPop( T & t ) {
scoped_lock l( _lock );
if ( _queue.empty() )
return false;
-
+
t = _queue.front();
_queue.pop();
-
+
return true;
}
-
- T blockingPop(){
+
+ T blockingPop() {
scoped_lock l( _lock );
while( _queue.empty() )
_condition.wait( l.boost() );
-
+
T t = _queue.front();
_queue.pop();
- return t;
+ return t;
}
-
+
/**
* blocks waiting for an object until maxSecondsToWait passes
* if got one, return true and set in t
* otherwise return false and t won't be changed
*/
- bool blockingPop( T& t , int maxSecondsToWait ){
-
+ bool blockingPop( T& t , int maxSecondsToWait ) {
+
Timer timer;
boost::xtime xt;
@@ -80,19 +80,19 @@ namespace mongo {
xt.sec += maxSecondsToWait;
scoped_lock l( _lock );
- while( _queue.empty() ){
+ while( _queue.empty() ) {
if ( ! _condition.timed_wait( l.boost() , xt ) )
return false;
}
-
+
t = _queue.front();
_queue.pop();
return true;
}
-
+
private:
std::queue<T> _queue;
-
+
mutable mongo::mutex _lock;
boost::condition _condition;
};
diff --git a/util/ramlog.h b/util/ramlog.h
index 393527dd2e9..fc588e635ca 100644
--- a/util/ramlog.h
+++ b/util/ramlog.h
@@ -23,7 +23,7 @@
namespace mongo {
class RamLog : public Tee {
- enum {
+ enum {
N = 128,
C = 256
};
@@ -31,7 +31,7 @@ namespace mongo {
unsigned h, n;
public:
- RamLog() {
+ RamLog() {
h = 0; n = 0;
for( int i = 0; i < N; i++ )
lines[i][C-1] = 0;
@@ -52,7 +52,7 @@ namespace mongo {
v.push_back(lines[i]);
}
- static int repeats(const vector<const char *>& v, int i) {
+ static int repeats(const vector<const char *>& v, int i) {
for( int j = i-1; j >= 0 && j+8 > i; j-- ) {
if( strcmp(v[i]+20,v[j]+20) == 0 ) {
for( int x = 1; ; x++ ) {
@@ -67,14 +67,14 @@ namespace mongo {
}
- static string clean(const vector<const char *>& v, int i, string line="") {
+ static string clean(const vector<const char *>& v, int i, string line="") {
if( line.empty() ) line = v[i];
if( i > 0 && strncmp(v[i], v[i-1], 11) == 0 )
return string(" ") + line.substr(11);
return v[i];
}
- static string color(string line) {
+ static string color(string line) {
string s = str::after(line, "replSet ");
if( str::startsWith(s, "warning") || startsWith(s, "error") )
return html::red(line);
@@ -85,16 +85,16 @@ namespace mongo {
return html::yellow(line);
return line; //html::blue(line);
}
-
+
return line;
}
/* turn http:... into an anchor */
- string linkify(const char *s) {
+ string linkify(const char *s) {
const char *p = s;
const char *h = strstr(p, "http://");
if( h == 0 ) return s;
-
+
const char *sp = h + 7;
while( *sp && *sp != ' ' ) sp++;
@@ -115,15 +115,15 @@ namespace mongo {
int r = repeats(v, i);
if( r < 0 ) {
s << color( linkify( clean(v,i).c_str() ) );
- }
+ }
else {
stringstream x;
x << string(v[i], 0, 20);
int nr = (i-r);
int last = i+nr-1;
for( ; r < i ; r++ ) x << '.';
- if( 1 ) {
- stringstream r;
+ if( 1 ) {
+ stringstream r;
if( nr == 1 ) r << "repeat last line";
else r << "repeats last " << nr << " lines; ends " << string(v[last]+4,0,15);
first = false; s << html::a("", r.str(), clean(v,i,x.str()));
@@ -135,7 +135,7 @@ namespace mongo {
}
s << "</pre>\n";
}
-
+
};
diff --git a/util/signal_handlers.cpp b/util/signal_handlers.cpp
index 5eb28cd7dba..0e9ec7a9b15 100644
--- a/util/signal_handlers.cpp
+++ b/util/signal_handlers.cpp
@@ -35,88 +35,88 @@
namespace mongo {
-/*
- * WARNING: PLEASE READ BEFORE CHANGING THIS MODULE
- *
- * All code in this module should be singal-friendly. Before adding any system
- * call or other dependency, please make sure the latter still holds.
- *
- */
-
-static int rawWrite( int fd , char* c , int size ){
+ /*
+ * WARNING: PLEASE READ BEFORE CHANGING THIS MODULE
+ *
+ * All code in this module should be singal-friendly. Before adding any system
+ * call or other dependency, please make sure the latter still holds.
+ *
+ */
+
+ static int rawWrite( int fd , char* c , int size ) {
#if !defined(_WIN32)
- int toWrite = size;
- int writePos = 0;
- int wrote;
- while ( toWrite > 0 ){
- wrote = write( fd , &c[writePos] , toWrite );
- if ( wrote < 1 ) break;
- toWrite -= wrote;
- writePos += wrote;
- }
- return writePos;
+ int toWrite = size;
+ int writePos = 0;
+ int wrote;
+ while ( toWrite > 0 ) {
+ wrote = write( fd , &c[writePos] , toWrite );
+ if ( wrote < 1 ) break;
+ toWrite -= wrote;
+ writePos += wrote;
+ }
+ return writePos;
#else
- return -1;
+ return -1;
#endif
-}
-
-static int formattedWrite( int fd , const char* format, ... ){
- const int MAX_ENTRY = 256;
- static char entryBuf[MAX_ENTRY];
-
- va_list ap;
- va_start( ap , format );
- int entrySize = vsnprintf( entryBuf , MAX_ENTRY-1 , format , ap );
- if ( entrySize < 0 ){
- return -1;
}
- if ( rawWrite( fd , entryBuf , entrySize ) < 0 ){
- return -1;
- }
+ static int formattedWrite( int fd , const char* format, ... ) {
+ const int MAX_ENTRY = 256;
+ static char entryBuf[MAX_ENTRY];
+
+ va_list ap;
+ va_start( ap , format );
+ int entrySize = vsnprintf( entryBuf , MAX_ENTRY-1 , format , ap );
+ if ( entrySize < 0 ) {
+ return -1;
+ }
- return 0;
-}
+ if ( rawWrite( fd , entryBuf , entrySize ) < 0 ) {
+ return -1;
+ }
-static void formattedBacktrace( int fd ){
+ return 0;
+ }
+
+ static void formattedBacktrace( int fd ) {
#if !defined(_WIN32) && !defined(NOEXECINFO)
- int numFrames;
- const int MAX_DEPTH = 20;
- void* stackFrames[MAX_DEPTH];
+ int numFrames;
+ const int MAX_DEPTH = 20;
+ void* stackFrames[MAX_DEPTH];
- numFrames = backtrace( stackFrames , 20 );
- for ( int i = 0; i < numFrames; i++ ){
- formattedWrite( fd , "%p " , stackFrames[i] );
- }
- formattedWrite( fd , "\n" );
+ numFrames = backtrace( stackFrames , 20 );
+ for ( int i = 0; i < numFrames; i++ ) {
+ formattedWrite( fd , "%p " , stackFrames[i] );
+ }
+ formattedWrite( fd , "\n" );
- backtrace_symbols_fd( stackFrames , numFrames , fd );
+ backtrace_symbols_fd( stackFrames , numFrames , fd );
#else
- formattedWrite( fd, "backtracing not implemented for this platform yet\n" );
+ formattedWrite( fd, "backtracing not implemented for this platform yet\n" );
#endif
-}
+ }
-void printStackAndExit( int signalNum ){
- int fd = Logstream::getLogDesc();
+ void printStackAndExit( int signalNum ) {
+ int fd = Logstream::getLogDesc();
- if ( fd >= 0 ){
- formattedWrite( fd , "Received signal %d\n" , signalNum );
- formattedWrite( fd , "Backtrace: " );
- formattedBacktrace( fd );
- formattedWrite( fd , "===\n" );
- }
+ if ( fd >= 0 ) {
+ formattedWrite( fd , "Received signal %d\n" , signalNum );
+ formattedWrite( fd , "Backtrace: " );
+ formattedBacktrace( fd );
+ formattedWrite( fd , "===\n" );
+ }
- ::exit( EXIT_ABRUPT );
-}
+ ::exit( EXIT_ABRUPT );
+ }
} // namespace mongo
diff --git a/util/signal_handlers.h b/util/signal_handlers.h
index 917c6acf5b3..9d3a735a723 100644
--- a/util/signal_handlers.h
+++ b/util/signal_handlers.h
@@ -23,7 +23,7 @@
namespace mongo {
/**
- * Obtains the log file handler and writes the current thread's stack trace to
+ * Obtains the log file handler and writes the current thread's stack trace to
* it. This call issues an exit(). The function can safely be called from within a
* signal handler.
*
diff --git a/util/sock.cpp b/util/sock.cpp
index 8f26c679c9c..ef3ed0e97ca 100644
--- a/util/sock.cpp
+++ b/util/sock.cpp
@@ -26,7 +26,7 @@ namespace mongo {
void enableIPv6(bool state) { ipv6 = state; }
bool IPv6Enabled() { return ipv6; }
- string getAddrInfoStrError(int code) {
+ string getAddrInfoStrError(int code) {
#if !defined(_WIN32)
return gai_strerror(code);
#else
@@ -47,7 +47,7 @@ namespace mongo {
if (!strcmp(iporhost, "localhost"))
iporhost = "127.0.0.1";
- if (strchr(iporhost, '/')){
+ if (strchr(iporhost, '/')) {
#ifdef _WIN32
uassert(13080, "no unix socket support on windows", false);
#endif
@@ -55,7 +55,8 @@ namespace mongo {
as<sockaddr_un>().sun_family = AF_UNIX;
strcpy(as<sockaddr_un>().sun_path, iporhost);
addressSize = sizeof(sockaddr_un);
- }else{
+ }
+ else {
addrinfo* addrs = NULL;
addrinfo hints;
memset(&hints, 0, sizeof(addrinfo));
@@ -74,16 +75,17 @@ namespace mongo {
#else
int nodata = false;
#endif
- if (ret == EAI_NONAME || nodata){
+ if (ret == EAI_NONAME || nodata) {
// iporhost isn't an IP address, allow DNS lookup
hints.ai_flags &= ~AI_NUMERICHOST;
ret = getaddrinfo(iporhost, ss.str().c_str(), &hints, &addrs);
}
- if (ret){
+ if (ret) {
log() << "getaddrinfo(\"" << iporhost << "\") failed: " << gai_strerror(ret) << endl;
- *this = SockAddr(port);
- }else{
+ *this = SockAddr(port);
+ }
+ else {
//TODO: handle other addresses in linked list;
assert(addrs->ai_addrlen <= sizeof(sa));
memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
@@ -92,13 +94,13 @@ namespace mongo {
}
}
}
-
+
bool SockAddr::isLocalHost() const {
- switch (getType()){
- case AF_INET: return getAddr() == "127.0.0.1";
- case AF_INET6: return getAddr() == "::1";
- case AF_UNIX: return true;
- default: return false;
+ switch (getType()) {
+ case AF_INET: return getAddr() == "127.0.0.1";
+ case AF_INET6: return getAddr() == "::1";
+ case AF_UNIX: return true;
+ default: return false;
}
assert(false);
return false;
@@ -214,18 +216,18 @@ namespace mongo {
SockAddr unknownAddress( "0.0.0.0", 0 );
ListeningSockets* ListeningSockets::_instance = new ListeningSockets();
-
- ListeningSockets* ListeningSockets::get(){
+
+ ListeningSockets* ListeningSockets::get() {
return _instance;
}
string _hostNameCached;
- static void _hostNameCachedInit(){
+ static void _hostNameCachedInit() {
_hostNameCached = getHostName();
}
boost::once_flag _hostNameCachedInitFlags = BOOST_ONCE_INIT;
- string getHostNameCached(){
+ string getHostNameCached() {
boost::call_once( _hostNameCachedInit , _hostNameCachedInitFlags );
return _hostNameCached;
}
diff --git a/util/sock.h b/util/sock.h
index 7f63966102c..98f307375b8 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -106,7 +106,7 @@ namespace mongo {
#endif
- inline string makeUnixSockPath(int port){
+ inline string makeUnixSockPath(int port) {
return "/tmp/mongodb-" + BSONObjBuilder::numStr(port) + ".sock";
}
@@ -143,7 +143,7 @@ namespace mongo {
template <typename T>
const T& as() const { return *(const T*)(&sa); }
- string toString(bool includePort=true) const{
+ string toString(bool includePort=true) const {
string out = getAddr();
if (includePort && getType() != AF_UNIX && getType() != AF_UNSPEC)
out += ':' + BSONObjBuilder::numStr(getPort());
@@ -156,34 +156,34 @@ namespace mongo {
}
unsigned getPort() const {
- switch (getType()){
- case AF_INET: return ntohs(as<sockaddr_in>().sin_port);
- case AF_INET6: return ntohs(as<sockaddr_in6>().sin6_port);
- case AF_UNIX: return 0;
- case AF_UNSPEC: return 0;
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return 0;
+ switch (getType()) {
+ case AF_INET: return ntohs(as<sockaddr_in>().sin_port);
+ case AF_INET6: return ntohs(as<sockaddr_in6>().sin6_port);
+ case AF_UNIX: return 0;
+ case AF_UNSPEC: return 0;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return 0;
}
}
string getAddr() const {
- switch (getType()){
- case AF_INET:
- case AF_INET6: {
- const int buflen=128;
- char buffer[buflen];
- int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
- massert(13082, getAddrInfoStrError(ret), ret == 0);
- return buffer;
- }
-
- case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
- case AF_UNSPEC: return "(NONE)";
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
+ switch (getType()) {
+ case AF_INET:
+ case AF_INET6: {
+ const int buflen=128;
+ char buffer[buflen];
+ int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
+ massert(13082, getAddrInfoStrError(ret), ret == 0);
+ return buffer;
+ }
+
+ case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
+ case AF_UNSPEC: return "(NONE)";
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
}
}
bool isLocalHost() const;
-
+
bool operator==(const SockAddr& r) const {
if (getType() != r.getType())
return false;
@@ -191,12 +191,12 @@ namespace mongo {
if (getPort() != r.getPort())
return false;
- switch (getType()){
- case AF_INET: return as<sockaddr_in>().sin_addr.s_addr == r.as<sockaddr_in>().sin_addr.s_addr;
- case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) == 0;
- case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) == 0;
- case AF_UNSPEC: return true; // assume all unspecified addresses are the same
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr == r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) == 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) == 0;
+ case AF_UNSPEC: return true; // assume all unspecified addresses are the same
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
}
}
bool operator!=(const SockAddr& r) const {
@@ -213,12 +213,12 @@ namespace mongo {
else if (getPort() > r.getPort())
return false;
- switch (getType()){
- case AF_INET: return as<sockaddr_in>().sin_addr.s_addr < r.as<sockaddr_in>().sin_addr.s_addr;
- case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) < 0;
- case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) < 0;
- case AF_UNSPEC: return false;
- default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr < r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) < 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) < 0;
+ case AF_UNSPEC: return false;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
}
}
@@ -226,7 +226,7 @@ namespace mongo {
sockaddr* raw() {return (sockaddr*)&sa;}
socklen_t addressSize;
- private:
+ private:
struct sockaddr_storage sa;
};
@@ -248,24 +248,24 @@ namespace mongo {
class ListeningSockets {
public:
- ListeningSockets()
+ ListeningSockets()
: _mutex("ListeningSockets")
, _sockets( new set<int>() )
, _socketPaths( new set<string>() )
{ }
- void add( int sock ){
+ void add( int sock ) {
scoped_lock lk( _mutex );
_sockets->insert( sock );
}
- void addPath( string path ){
+ void addPath( string path ) {
scoped_lock lk( _mutex );
_socketPaths->insert( path );
}
- void remove( int sock ){
+ void remove( int sock ) {
scoped_lock lk( _mutex );
_sockets->erase( sock );
}
- void closeAll(){
+ void closeAll() {
set<int>* sockets;
set<string>* paths;
@@ -281,13 +281,13 @@ namespace mongo {
int sock = *i;
log() << "closing listening socket: " << sock << endl;
closesocket( sock );
- }
+ }
for ( set<string>::iterator i=paths->begin(); i!=paths->end(); i++ ) {
string path = *i;
log() << "removing socket file: " << path << endl;
::remove( path.c_str() );
- }
+ }
}
static ListeningSockets* get();
private:
diff --git a/util/stringutils.cpp b/util/stringutils.cpp
index 3f989fd8560..229f57bb3cb 100644
--- a/util/stringutils.cpp
+++ b/util/stringutils.cpp
@@ -20,13 +20,13 @@
namespace mongo {
- void splitStringDelim( const string& str , vector<string>* res , char delim ){
+ void splitStringDelim( const string& str , vector<string>* res , char delim ) {
if ( str.empty() )
return;
size_t beg = 0;
size_t pos = str.find( delim );
- while ( pos != string::npos ){
+ while ( pos != string::npos ) {
res->push_back( str.substr( beg, pos - beg) );
beg = ++pos;
pos = str.find( delim, beg );
@@ -34,8 +34,8 @@ namespace mongo {
res->push_back( str.substr( beg ) );
}
- void joinStringDelim( const vector<string>& strs , string* res , char delim ){
- for ( vector<string>::const_iterator it = strs.begin(); it != strs.end(); ++it ){
+ void joinStringDelim( const vector<string>& strs , string* res , char delim ) {
+ for ( vector<string>::const_iterator it = strs.begin(); it != strs.end(); ++it ) {
if ( it !=strs.begin() ) res->push_back( delim );
res->append( *it );
}
diff --git a/util/stringutils.h b/util/stringutils.h
index 856315787a6..60571e6c125 100644
--- a/util/stringutils.h
+++ b/util/stringutils.h
@@ -26,20 +26,20 @@ namespace mongo {
void joinStringDelim( const vector<string>& strs , string* res , char delim );
- inline string tolowerString( const string& input ){
+ inline string tolowerString( const string& input ) {
string::size_type sz = input.size();
-
+
boost::scoped_array<char> line(new char[sz+1]);
char * copy = line.get();
-
- for ( string::size_type i=0; i<sz; i++ ){
+
+ for ( string::size_type i=0; i<sz; i++ ) {
char c = input[i];
copy[i] = (char)tolower( (int)c );
}
copy[sz] = 0;
return string(copy);
}
-
+
} // namespace mongo
#endif // UTIL_STRING_UTILS_HEADER
diff --git a/util/text.cpp b/util/text.cpp
index f381e01c12d..51a2556afdc 100644
--- a/util/text.cpp
+++ b/util/text.cpp
@@ -19,9 +19,9 @@
#include "text.h"
#include "unittest.h"
-namespace mongo{
+namespace mongo {
- inline int leadingOnes(unsigned char c){
+ inline int leadingOnes(unsigned char c) {
if (c < 0x80) return 0;
static const char _leadingOnes[128] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x80 - 0x8F
@@ -32,24 +32,25 @@ namespace mongo{
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xD0 - 0xD9
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xE0 - 0xE9
4, 4, 4, 4, 4, 4, 4, 4, // 0xF0 - 0xF7
- 5, 5, 5, 5, // 0xF8 - 0xFB
- 6, 6, // 0xFC - 0xFD
- 7, // 0xFE
- 8, // 0xFF
+ 5, 5, 5, 5, // 0xF8 - 0xFB
+ 6, 6, // 0xFC - 0xFD
+ 7, // 0xFE
+ 8, // 0xFF
};
return _leadingOnes[c & 0x7f];
}
- bool isValidUTF8(const char *s){
+ bool isValidUTF8(const char *s) {
int left = 0; // how many bytes are left in the current codepoint
- while (*s){
+ while (*s) {
const unsigned char c = (unsigned char) *(s++);
const int ones = leadingOnes(c);
- if (left){
+ if (left) {
if (ones != 1) return false; // should be a continuation byte
left--;
- }else{
+ }
+ else {
if (ones == 0) continue; // ASCII byte
if (ones == 1) return false; // unexpected continuation byte
if (c > 0xF4) return false; // codepoint too large (< 0x10FFFF)
@@ -61,53 +62,50 @@ namespace mongo{
}
if (left!=0) return false; // string ended mid-codepoint
return true;
- }
-
- #if defined(_WIN32)
-
- std::string toUtf8String(const std::wstring& wide)
- {
- if (wide.size() > boost::integer_traits<int>::const_max)
- throw std::length_error(
- "Wide string cannot be more than INT_MAX characters long.");
- if (wide.size() == 0)
- return "";
-
- // Calculate necessary buffer size
- int len = ::WideCharToMultiByte(
- CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
- NULL, 0, NULL, NULL);
-
- // Perform actual conversion
- if (len > 0)
- {
- std::vector<char> buffer(len);
- len = ::WideCharToMultiByte(
- CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
- &buffer[0], static_cast<int>(buffer.size()), NULL, NULL);
- if (len > 0)
- {
- assert(len == static_cast<int>(buffer.size()));
- return std::string(&buffer[0], buffer.size());
- }
- }
-
- throw boost::system::system_error(
- ::GetLastError(), boost::system::system_category);
- }
+ }
+
+#if defined(_WIN32)
+
+ std::string toUtf8String(const std::wstring& wide) {
+ if (wide.size() > boost::integer_traits<int>::const_max)
+ throw std::length_error(
+ "Wide string cannot be more than INT_MAX characters long.");
+ if (wide.size() == 0)
+ return "";
+
+ // Calculate necessary buffer size
+ int len = ::WideCharToMultiByte(
+ CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
+ NULL, 0, NULL, NULL);
+
+ // Perform actual conversion
+ if (len > 0) {
+ std::vector<char> buffer(len);
+ len = ::WideCharToMultiByte(
+ CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
+ &buffer[0], static_cast<int>(buffer.size()), NULL, NULL);
+ if (len > 0) {
+ assert(len == static_cast<int>(buffer.size()));
+ return std::string(&buffer[0], buffer.size());
+ }
+ }
+
+ throw boost::system::system_error(
+ ::GetLastError(), boost::system::system_category);
+ }
#if defined(_UNICODE)
- std::wstring toWideString(const char *s) {
+ std::wstring toWideString(const char *s) {
std::basic_ostringstream<TCHAR> buf;
buf << s;
return buf.str();
}
#endif
- #endif
+#endif
struct TextUnitTest : public UnitTest {
- void run() {
+ void run() {
assert( parseLL("123") == 123 );
assert( parseLL("-123000000000") == -123000000000LL );
}
diff --git a/util/text.h b/util/text.h
index 3b311fa4ff4..fc2da214097 100644
--- a/util/text.h
+++ b/util/text.h
@@ -33,57 +33,57 @@
#pragma once
namespace mongo {
-
+
class StringSplitter {
public:
- /** @param big the string to be split
+ /** @param big the string to be split
@param splitter the delimiter
*/
StringSplitter( const char * big , const char * splitter )
- : _big( big ) , _splitter( splitter ){
+ : _big( big ) , _splitter( splitter ) {
}
/** @return true if more to be taken via next() */
- bool more(){
+ bool more() {
return _big[0];
}
/** get next split string fragment */
- string next(){
+ string next() {
const char * foo = strstr( _big , _splitter );
- if ( foo ){
+ if ( foo ) {
string s( _big , foo - _big );
_big = foo + 1;
while ( *_big && strstr( _big , _splitter ) == _big )
_big++;
return s;
}
-
+
string s = _big;
_big += strlen( _big );
return s;
}
-
- void split( vector<string>& l ){
- while ( more() ){
+
+ void split( vector<string>& l ) {
+ while ( more() ) {
l.push_back( next() );
}
}
-
- vector<string> split(){
+
+ vector<string> split() {
vector<string> l;
split( l );
return l;
}
- static vector<string> split( const string& big , const string& splitter ){
+ static vector<string> split( const string& big , const string& splitter ) {
StringSplitter ss( big.c_str() , splitter.c_str() );
return ss.split();
}
- static string join( vector<string>& l , const string& split ){
+ static string join( vector<string>& l , const string& split ) {
stringstream ss;
- for ( unsigned i=0; i<l.size(); i++ ){
+ for ( unsigned i=0; i<l.size(); i++ ) {
if ( i > 0 )
ss << split;
ss << l[i];
@@ -95,20 +95,20 @@ namespace mongo {
const char * _big;
const char * _splitter;
};
-
+
/* This doesn't defend against ALL bad UTF8, but it will guarantee that the
* string can be converted to sequence of codepoints. However, it doesn't
* guarantee that the codepoints are valid.
*/
bool isValidUTF8(const char *s);
- inline bool isValidUTF8(string s) { return isValidUTF8(s.c_str()); }
+ inline bool isValidUTF8(string s) { return isValidUTF8(s.c_str()); }
#if defined(_WIN32)
std::string toUtf8String(const std::wstring& wide);
std::wstring toWideString(const char *s);
-
+
/* like toWideString but UNICODE macro sensitive */
# if !defined(_UNICODE)
#error temp error
@@ -116,9 +116,9 @@ namespace mongo {
# else
inline std::wstring toNativeString(const char *s) { return toWideString(s); }
# endif
-
+
#endif
-
+
// expect that n contains a base ten number and nothing else after it
// NOTE win version hasn't been tested directly
inline long long parseLL( const char *n ) {
@@ -129,11 +129,12 @@ namespace mongo {
errno = 0;
ret = strtoll( n, &endPtr, 10 );
uassert( 13305, "could not convert string to long long", *endPtr == 0 && errno == 0 );
-#elif _MSC_VER>=1600 // 1600 is VS2k10 1500 is VS2k8
+#elif _MSC_VER>=1600 // 1600 is VS2k10 1500 is VS2k8
size_t endLen = 0;
try {
ret = stoll( n, &endLen, 10 );
- } catch ( ... ) {
+ }
+ catch ( ... ) {
endLen = 0;
}
uassert( 13306, "could not convert string to long long", endLen != 0 && n[ endLen ] == 0 );
diff --git a/util/time_support.h b/util/time_support.h
index 4e606b23f3d..e833a518a5c 100644
--- a/util/time_support.h
+++ b/util/time_support.h
@@ -42,7 +42,7 @@ namespace mongo {
// uses ISO 8601 dates without trailing Z
// colonsOk should be false when creating filenames
- inline string terseCurrentTime(bool colonsOk=true){
+ inline string terseCurrentTime(bool colonsOk=true) {
struct tm t;
time_t_to_Struct( time(0) , &t );
@@ -64,7 +64,7 @@ namespace mongo {
if ( 2 != sscanf( str.c_str() , "%d:%d" , &hh , &mm ) ) {
return false;
}
-
+
// verify that time is well formed
if ( ( hh / 24 ) || ( mm / 60 ) ) {
return false;
@@ -99,7 +99,7 @@ namespace mongo {
if ( xt.nsec >= 1000000000 ) {
xt.nsec -= 1000000000;
xt.sec++;
- }
+ }
boost::thread::sleep(xt);
}
inline void sleepmicros(long long s) {
@@ -112,7 +112,7 @@ namespace mongo {
if ( xt.nsec >= 1000000000 ) {
xt.nsec -= 1000000000;
xt.sec++;
- }
+ }
boost::thread::sleep(xt);
}
#else
@@ -120,7 +120,7 @@ namespace mongo {
struct timespec t;
t.tv_sec = s;
t.tv_nsec = 0;
- if ( nanosleep( &t , 0 ) ){
+ if ( nanosleep( &t , 0 ) ) {
cout << "nanosleep failed" << endl;
}
}
@@ -131,7 +131,7 @@ namespace mongo {
t.tv_sec = (int)(s / 1000000);
t.tv_nsec = 1000 * ( s % 1000000 );
struct timespec out;
- if ( nanosleep( &t , &out ) ){
+ if ( nanosleep( &t , &out ) ) {
cout << "nanosleep failed" << endl;
}
}
diff --git a/util/timer.h b/util/timer.h
index cfd5ea295fc..f5a21f873d0 100644
--- a/util/timer.h
+++ b/util/timer.h
@@ -52,7 +52,7 @@ namespace mongo {
return n - old;
}
- unsigned long long startTime(){
+ unsigned long long startTime() {
return old;
}
diff --git a/util/unittest.h b/util/unittest.h
index 366ca2e16fc..94be444363f 100644
--- a/util/unittest.h
+++ b/util/unittest.h
@@ -26,7 +26,7 @@ namespace mongo {
To define a unit test, inherit from this and implement run. instantiate one object for the new class
as a global.
- These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is a
+ These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is a
good quick check for a bad build.
*/
struct UnitTest {
diff --git a/util/util.cpp b/util/util.cpp
index 63588dee7dd..adc1fcc9d7b 100644
--- a/util/util.cpp
+++ b/util/util.cpp
@@ -39,27 +39,26 @@ namespace mongo {
}
boost::thread_specific_ptr<string> _threadName;
-
- unsigned _setThreadName( const char * name ){
+
+ unsigned _setThreadName( const char * name ) {
static unsigned N = 0;
- if ( strcmp( name , "conn" ) == 0 ){
+ if ( strcmp( name , "conn" ) == 0 ) {
unsigned n = ++N;
stringstream ss;
ss << name << n;
_threadName.reset( new string( ss.str() ) );
return n;
}
-
- _threadName.reset( new string(name) );
+
+ _threadName.reset( new string(name) );
return 0;
}
#if defined(_WIN32)
#define MS_VC_EXCEPTION 0x406D1388
#pragma pack(push,8)
- typedef struct tagTHREADNAME_INFO
- {
+ typedef struct tagTHREADNAME_INFO {
DWORD dwType; // Must be 0x1000.
LPCSTR szName; // Pointer to name (in user addr space).
DWORD dwThreadID; // Thread ID (-1=caller thread).
@@ -67,7 +66,7 @@ namespace mongo {
} THREADNAME_INFO;
#pragma pack(pop)
- void setWinThreadName(const char *name) {
+ void setWinThreadName(const char *name) {
/* is the sleep here necessary???
Sleep(10);
*/
@@ -76,17 +75,14 @@ namespace mongo {
info.szName = name;
info.dwThreadID = -1;
info.dwFlags = 0;
- __try
- {
+ __try {
RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
}
- __except(EXCEPTION_EXECUTE_HANDLER)
- {
+ __except(EXCEPTION_EXECUTE_HANDLER) {
}
}
- unsigned setThreadName(const char *name)
- {
+ unsigned setThreadName(const char *name) {
unsigned n = _setThreadName( name );
#if !defined(_DEBUG)
// naming might be expensive so don't do "conn*" over and over
@@ -99,13 +95,13 @@ namespace mongo {
#else
- unsigned setThreadName(const char * name ) {
+ unsigned setThreadName(const char * name ) {
return _setThreadName( name );
}
#endif
- string getThreadName(){
+ string getThreadName() {
string * s = _threadName.get();
if ( s )
return *s;
@@ -122,7 +118,7 @@ namespace mongo {
int tlogLevel = 0;
mongo::mutex Logstream::mutex("Logstream");
int Logstream::doneSetup = Logstream::magicNumber();
-
+
bool isPrime(int n) {
int z = 2;
while ( 1 ) {
@@ -171,9 +167,9 @@ namespace mongo {
}
} utilTest;
-
+
OpTime OpTime::last(0, 0);
-
+
/* this is a good place to set a breakpoint when debugging, as lots of warning things
(assert, wassert) call it.
*/
@@ -201,11 +197,11 @@ namespace mongo {
Logstream::logLockless("\n");
}
- ostream& operator<<( ostream &s, const ThreadSafeString &o ){
+ ostream& operator<<( ostream &s, const ThreadSafeString &o ) {
s << o.toString();
return s;
}
bool StaticObserver::_destroyingStatics = false;
-
+
} // namespace mongo
diff --git a/util/version.cpp b/util/version.cpp
index 1bbc603abd4..11ddc2823bb 100644
--- a/util/version.cpp
+++ b/util/version.cpp
@@ -36,14 +36,14 @@ namespace mongo {
#ifndef _SCONS
// only works in scons
- const char * gitVersion(){ return "not-scons"; }
+ const char * gitVersion() { return "not-scons"; }
#endif
void printGitVersion() { log() << "git version: " << gitVersion() << endl; }
#ifndef _SCONS
#if defined(_WIN32)
- string sysInfo(){
+ string sysInfo() {
stringstream ss;
ss << "not-scons win";
ss << " mscver:" << _MSC_FULL_VER << " built:" << __DATE__;
@@ -55,12 +55,12 @@ namespace mongo {
return ss.str();
}
#else
- string sysInfo(){ return ""; }
+ string sysInfo() { return ""; }
#endif
#endif
- void printSysInfo() {
- log() << "sys info: " << sysInfo() << endl;
+ void printSysInfo() {
+ log() << "sys info: " << sysInfo() << endl;
#if defined(_TESTINTENT)
log() << "_TESTINTENT defined - this mode is for qa purposes" << endl;
#endif
@@ -69,7 +69,7 @@ namespace mongo {
//
// 32 bit systems warning
//
- void show_warnings(){
+ void show_warnings() {
// each message adds a leading but not a trailing newline
bool warned = false;
@@ -91,7 +91,7 @@ namespace mongo {
}
#ifdef __linux__
- if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")){
+ if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
cout << endl;
cout << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << endl;
warned = true;
@@ -102,25 +102,26 @@ namespace mongo {
cout << endl;
}
- int versionCmp(StringData rhs, StringData lhs){
+ int versionCmp(StringData rhs, StringData lhs) {
if (strcmp(rhs.data(),lhs.data()) == 0)
return 0;
// handle "1.2.3-" and "1.2.3-pre"
if (rhs.size() < lhs.size()) {
if (strncmp(rhs.data(), lhs.data(), rhs.size()) == 0 && lhs.data()[rhs.size()] == '-')
- return +1;
- } else if (rhs.size() > lhs.size()) {
+ return +1;
+ }
+ else if (rhs.size() > lhs.size()) {
if (strncmp(rhs.data(), lhs.data(), lhs.size()) == 0 && rhs.data()[lhs.size()] == '-')
- return -1;
+ return -1;
}
-
+
return lexNumCmp(rhs.data(), lhs.data());
}
class VersionCmpTest : public UnitTest {
public:
- void run(){
+ void run() {
assert( versionCmp("1.2.3", "1.2.3") == 0 );
assert( versionCmp("1.2.3", "1.2.4") < 0 );
assert( versionCmp("1.2.3", "1.2.20") < 0 );
diff --git a/util/version.h b/util/version.h
index 1dde19f3d65..779fbdc8691 100644
--- a/util/version.h
+++ b/util/version.h
@@ -9,7 +9,7 @@ namespace mongo {
// mongo version
extern const char versionString[];
- string mongodVersion();
+ string mongodVersion();
int versionCmp(StringData rhs, StringData lhs); // like strcmp
const char * gitVersion();