summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2009-01-15 11:34:04 -0500
committerEliot Horowitz <eliot@10gen.com>2009-01-15 11:34:04 -0500
commitdb0b2adb69ee5ba41d830c0cbfd2d8a73d661c11 (patch)
treece572913795fec4563db5af326e8b7c593ffc7bc
parent85157dfb02e4135716f5efe476c0b95968885ada (diff)
parent261a467aa50cf6c733c89d2e299a8dd63b569750 (diff)
downloadmongo-db0b2adb69ee5ba41d830c0cbfd2d8a73d661c11.tar.gz
Merge branch 'master' of ssh://git.10gen.com/data/gitroot/p
-rw-r--r--client/clientOnly.cpp15
-rw-r--r--client/connpool.cpp38
-rw-r--r--client/connpool.h112
-rw-r--r--client/dbclient.cpp1030
-rw-r--r--client/dbclient.h798
-rw-r--r--client/model.cpp14
-rw-r--r--client/model.h64
-rw-r--r--db/btree.cpp1362
-rw-r--r--db/btree.h462
-rw-r--r--db/btreecursor.cpp332
-rw-r--r--db/clientcursor.cpp346
-rw-r--r--db/clientcursor.h104
-rw-r--r--db/cloner.cpp402
-rw-r--r--db/commands.cpp74
-rw-r--r--db/commands.h60
-rw-r--r--db/cursor.cpp202
-rw-r--r--db/cursor.h356
-rw-r--r--db/database.h194
-rw-r--r--db/db.cpp596
-rw-r--r--db/db.h268
-rw-r--r--db/dbcommands.cpp1256
-rw-r--r--db/dbeval.cpp154
-rw-r--r--db/dbhelpers.cpp52
-rw-r--r--db/dbhelpers.h68
-rw-r--r--db/dbinfo.cpp46
-rw-r--r--db/dbinfo.h84
-rw-r--r--db/dbmessage.h244
-rw-r--r--db/dbwebserver.cpp580
-rw-r--r--db/flushtest.cpp190
-rw-r--r--db/instance.cpp846
-rw-r--r--db/instance.h124
-rw-r--r--db/introspect.cpp56
-rw-r--r--db/introspect.h80
-rw-r--r--db/javajs.cpp1018
-rw-r--r--db/javajs.h268
-rw-r--r--db/jsobj.cpp1564
-rw-r--r--db/jsobj.h1440
-rw-r--r--db/json.cpp862
-rw-r--r--db/json.h4
-rw-r--r--db/lasterror.cpp2
-rw-r--r--db/lasterror.h62
-rw-r--r--db/matcher.cpp804
-rw-r--r--db/matcher.h178
-rw-r--r--db/minilex.h166
-rw-r--r--db/namespace.cpp868
-rw-r--r--db/namespace.h614
-rw-r--r--db/pdfile.cpp1918
-rw-r--r--db/pdfile.h684
-rw-r--r--db/query.cpp1466
-rw-r--r--db/query.h12
-rw-r--r--db/queryoptimizer.cpp18
-rw-r--r--db/queryoptimizer.h58
-rw-r--r--db/repl.cpp1932
-rw-r--r--db/repl.h278
-rw-r--r--db/replset.h260
-rw-r--r--db/scanandorder.h198
-rw-r--r--db/security.cpp98
-rw-r--r--db/security.h20
-rw-r--r--db/storage.h234
-rw-r--r--db/tests.cpp62
-rw-r--r--dbgrid/dbgrid.cpp174
-rw-r--r--dbgrid/dbgrid_commands.cpp78
-rw-r--r--dbgrid/gridconfig.cpp56
-rw-r--r--dbgrid/gridconfig.h144
-rw-r--r--dbgrid/griddatabase.cpp206
-rw-r--r--dbgrid/griddatabase.h28
-rw-r--r--dbgrid/request.cpp146
-rw-r--r--dbgrid/shard.cpp20
-rw-r--r--dbgrid/shard.h30
-rw-r--r--dbtests/btreetests.cpp400
-rw-r--r--dbtests/dbtests.cpp4
-rw-r--r--dbtests/jsobjtests.cpp1848
-rw-r--r--dbtests/namespacetests.cpp1060
-rw-r--r--dbtests/pairingtests.cpp624
-rw-r--r--dbtests/pdfiletests.cpp478
-rw-r--r--grid/message.cpp554
-rw-r--r--grid/message.h282
-rw-r--r--stdafx.cpp116
-rw-r--r--stdafx.h290
-rw-r--r--tools/dump.cpp144
-rw-r--r--tools/import.cpp130
-rw-r--r--util/background.cpp70
-rw-r--r--util/background.h82
-rw-r--r--util/builder.h164
-rw-r--r--util/goodies.h374
-rw-r--r--util/hashtab.h182
-rw-r--r--util/log.h174
-rw-r--r--util/lruishmap.h88
-rw-r--r--util/md5.hpp28
-rw-r--r--util/miniwebserver.cpp274
-rw-r--r--util/miniwebserver.h58
-rw-r--r--util/mmap.cpp288
-rw-r--r--util/mmap.h64
-rw-r--r--util/sock.cpp302
-rw-r--r--util/sock.h302
-rw-r--r--util/unittest.h48
-rw-r--r--util/util.cpp94
97 files changed, 17547 insertions, 17554 deletions
diff --git a/client/clientOnly.cpp b/client/clientOnly.cpp
index eb5fec052da..8711dd713fa 100644
--- a/client/clientOnly.cpp
+++ b/client/clientOnly.cpp
@@ -1,23 +1,20 @@
-
-#include <iostream>
-
-using namespace std;
+#include "../stdafx.h"
namespace mongo {
const char * curNs = "in client mode";
// Database* database = 0;
- void dbexit(int returnCode, const char *whyMsg ){
- cout << "dbexit called" << endl;
+ void dbexit(int returnCode, const char *whyMsg ) {
+ out() << "dbexit called" << endl;
if ( whyMsg )
- cout << " b/c " << whyMsg << endl;
- cout << "exiting" << endl;
+ out() << " b/c " << whyMsg << endl;
+ out() << "exiting" << endl;
exit( returnCode );
}
- string getDbContext(){
+ string getDbContext() {
return "in client only mode";
}
}
diff --git a/client/connpool.cpp b/client/connpool.cpp
index 90271909ed3..95b6067fd9a 100644
--- a/client/connpool.cpp
+++ b/client/connpool.cpp
@@ -24,27 +24,27 @@
namespace mongo {
-DBConnectionPool pool;
-
-DBClientConnection* DBConnectionPool::get(const string& host) {
- boostlock L(poolMutex);
-
- PoolForHost *&p = pools[host];
- if ( p == 0 )
- p = new PoolForHost();
- if ( p->pool.empty() ) {
- string errmsg;
- DBClientConnection *c = new DBClientConnection();
- if ( !c->connect(host.c_str(), errmsg) ) {
- delete c;
- uassert("dbconnectionpool: connect failed", false);
- return 0;
+ DBConnectionPool pool;
+
+ DBClientConnection* DBConnectionPool::get(const string& host) {
+ boostlock L(poolMutex);
+
+ PoolForHost *&p = pools[host];
+ if ( p == 0 )
+ p = new PoolForHost();
+ if ( p->pool.empty() ) {
+ string errmsg;
+ DBClientConnection *c = new DBClientConnection();
+ if ( !c->connect(host.c_str(), errmsg) ) {
+ delete c;
+ uassert("dbconnectionpool: connect failed", false);
+ return 0;
+ }
+ return c;
}
+ DBClientConnection *c = p->pool.front();
+ p->pool.pop();
return c;
}
- DBClientConnection *c = p->pool.front();
- p->pool.pop();
- return c;
-}
} // namespace mongo
diff --git a/client/connpool.h b/client/connpool.h
index c811152e474..c1ad3c409a8 100644
--- a/client/connpool.h
+++ b/client/connpool.h
@@ -23,70 +23,70 @@
namespace mongo {
-struct PoolForHost {
- queue<DBClientConnection*> pool;
-};
+ struct PoolForHost {
+ queue<DBClientConnection*> pool;
+ };
-class DBConnectionPool {
- boost::mutex poolMutex;
- map<string,PoolForHost*> pools;
-public:
+ class DBConnectionPool {
+ boost::mutex poolMutex;
+ map<string,PoolForHost*> pools;
+ public:
- /* generally, use ScopedDbConnection and do not call these directly */
- DBClientConnection *get(const string& host);
- void release(const string& host, DBClientConnection *c) {
- boostlock L(poolMutex);
- pools[host]->pool.push(c);
- }
-};
+ /* generally, use ScopedDbConnection and do not call these directly */
+ DBClientConnection *get(const string& host);
+ void release(const string& host, DBClientConnection *c) {
+ boostlock L(poolMutex);
+ pools[host]->pool.push(c);
+ }
+ };
-extern DBConnectionPool pool;
+ extern DBConnectionPool pool;
-/* Use to get a connection from the pool. On exceptions things
- clean up nicely.
-*/
-class ScopedDbConnection {
- const string host;
- DBClientConnection *_conn;
-public:
- DBClientConnection& conn() {
- return *_conn;
- }
+ /* Use to get a connection from the pool. On exceptions things
+ clean up nicely.
+ */
+ class ScopedDbConnection {
+ const string host;
+ DBClientConnection *_conn;
+ public:
+ DBClientConnection& conn() {
+ return *_conn;
+ }
- /* throws UserAssertionAcception if can't connect */
- ScopedDbConnection(const string& _host) :
- host(_host), _conn( pool.get(_host) ) { }
+ /* throws UserAssertionAcception if can't connect */
+ ScopedDbConnection(const string& _host) :
+ host(_host), _conn( pool.get(_host) ) { }
- /* Force closure of the connection. You should call this if you leave it in
- a bad state. Destructor will do this too, but it is verbose.
- */
- void kill() {
- delete _conn;
- _conn = 0;
- }
+ /* Force closure of the connection. You should call this if you leave it in
+ a bad state. Destructor will do this too, but it is verbose.
+ */
+ void kill() {
+ delete _conn;
+ _conn = 0;
+ }
- /* Call this when you are done with the ocnnection.
- Why? See note in the destructor below.
- */
- void done() {
- if ( _conn->isFailed() )
- kill();
- else
- pool.release(host, _conn);
- _conn = 0;
- }
+ /* Call this when you are done with the ocnnection.
+ Why? See note in the destructor below.
+ */
+ void done() {
+ if ( _conn->isFailed() )
+ kill();
+ else
+ pool.release(host, _conn);
+ _conn = 0;
+ }
- ~ScopedDbConnection() {
- if ( _conn ) {
- /* you are supposed to call done(). if you did that, correctly, we
- only get here if an exception was thrown. in such a scenario, we can't
- be sure we fully read all expected data of a reply on the socket. so
- we don't try to reuse the connection. The cout is just informational.
- */
- cout << "~ScopedDBConnection: _conn != null\n";
- kill();
+ ~ScopedDbConnection() {
+ if ( _conn ) {
+ /* you are supposed to call done(). if you did that, correctly, we
+ only get here if an exception was thrown. in such a scenario, we can't
+ be sure we fully read all expected data of a reply on the socket. so
+ we don't try to reuse the connection. The out() is just informational.
+ */
+ out() << "~ScopedDBConnection: _conn != null\n";
+ kill();
+ }
}
- }
-};
+ };
} // namespace mongo
diff --git a/client/dbclient.cpp b/client/dbclient.cpp
index f740989103a..cfbccd19fb0 100644
--- a/client/dbclient.cpp
+++ b/client/dbclient.cpp
@@ -27,595 +27,595 @@
namespace mongo {
-/* --- dbclientcommands --- */
+ /* --- dbclientcommands --- */
-inline bool DBClientWithCommands::isOk(const BSONObj& o) {
- return o.getIntField("ok") == 1;
-}
+ inline bool DBClientWithCommands::isOk(const BSONObj& o) {
+ return o.getIntField("ok") == 1;
+ }
-inline bool DBClientWithCommands::runCommand(const char *dbname, BSONObj cmd, BSONObj &info) {
- string ns = string(dbname) + ".$cmd";
- info = findOne(ns.c_str(), cmd);
- return isOk(info);
-}
+ inline bool DBClientWithCommands::runCommand(const char *dbname, BSONObj cmd, BSONObj &info) {
+ string ns = string(dbname) + ".$cmd";
+ info = findOne(ns.c_str(), cmd);
+ return isOk(info);
+ }
-/* note - we build a bson obj here -- for something that is super common like getlasterror you
- should have that object prebuilt as that would be faster.
-*/
-bool DBClientWithCommands::simpleCommand(const char *dbname, BSONObj *info, const char *command) {
- BSONObj o;
- if ( info == 0 )
- info = &o;
- BSONObjBuilder b;
- b.appendInt(command, 1);
- return runCommand(dbname, b.done(), *info);
-}
-
-BSONObj ismastercmdobj = fromjson("{\"ismaster\":1}");
-
-bool DBClientWithCommands::isMaster(bool& isMaster, BSONObj *info) {
- BSONObj o;
- if ( info == 0 ) info = &o;
- bool ok = runCommand("admin", ismastercmdobj, *info);
- isMaster = (info->getIntField("ismaster") == 1);
- return ok;
-}
-
-bool DBClientWithCommands::createCollection(const char *ns, unsigned size, bool capped, int max, BSONObj *info) {
- BSONObj o;
- if ( info == 0 ) info = &o;
- BSONObjBuilder b;
- b.append("create", ns);
- if ( size ) b.append("size", size);
- if ( capped ) b.append("capped", true);
- if ( max ) b.append("max", max);
- string db = nsToClient(ns);
- return runCommand(db.c_str(), b.done(), *info);
-}
-
-bool DBClientWithCommands::copyDatabase(const char *fromdb, const char *todb, const char *fromhost, BSONObj *info) {
- assert( *fromdb && *todb );
- BSONObj o;
- if ( info == 0 ) info = &o;
- BSONObjBuilder b;
- b.append("copydb", 1);
- b.append("fromhost", fromhost);
- b.append("fromdb", fromdb);
- b.append("todb", todb);
- return runCommand("admin", b.done(), *info);
-}
-
-bool DBClientWithCommands::setDbProfilingLevel(const char *dbname, ProfilingLevel level, BSONObj *info ) {
- BSONObj o;
- if ( info == 0 ) info = &o;
-
- if ( level ) {
- // Create system.profile collection. If it already exists this does nothing.
- // TODO: move this into the db instead of here so that all
- // drivers don't have to do this.
- string ns = string(dbname) + ".system.profile";
- createCollection(ns.c_str(), 1024 * 1024, true, 0, info);
- }
-
- BSONObjBuilder b;
- b.append("profile", (int) level);
- return runCommand(dbname, b.done(), *info);
-}
-
-BSONObj getprofilingcmdobj = fromjson("{\"profile\":-1}");
-
-bool DBClientWithCommands::getDbProfilingLevel(const char *dbname, ProfilingLevel& level, BSONObj *info) {
- BSONObj o;
- if ( info == 0 ) info = &o;
- if ( runCommand(dbname, getprofilingcmdobj, *info) ) {
- level = (ProfilingLevel) info->getIntField("was");
- return true;
+ /* note - we build a bson obj here -- for something that is super common like getlasterror you
+ should have that object prebuilt as that would be faster.
+ */
+ bool DBClientWithCommands::simpleCommand(const char *dbname, BSONObj *info, const char *command) {
+ BSONObj o;
+ if ( info == 0 )
+ info = &o;
+ BSONObjBuilder b;
+ b.appendInt(command, 1);
+ return runCommand(dbname, b.done(), *info);
}
- return false;
-}
-
-bool DBClientWithCommands::eval(const char *dbname, const char *jscode, BSONObj& info, BSONElement& retValue, BSONObj *args) {
- BSONObjBuilder b;
- b.appendCode("$eval", jscode);
- if ( args )
- b.appendArray("args", *args);
- bool ok = runCommand(dbname, b.done(), info);
- if ( ok )
- retValue = info.getField("retval");
- return ok;
-}
-
-bool DBClientWithCommands::eval(const char *dbname, const char *jscode) {
- BSONObj info;
- BSONElement retValue;
- return eval(dbname, jscode, info, retValue);
-}
-
-/* TODO: unit tests should run this? */
-void testDbEval() {
- DBClientConnection c;
- string err;
- if ( !c.connect("localhost", err) ) {
- cout << "can't connect to server " << err << endl;
- return;
- }
- BSONObj info;
- BSONElement retValue;
- BSONObjBuilder b;
- b.append("0", 99);
- BSONObj args = b.done();
- bool ok = c.eval("dwight", "function() { return args[0]; }", info, retValue, &args);
- cout << "eval ok=" << ok << endl;
- cout << "retvalue=" << retValue.toString() << endl;
- cout << "info=" << info.toString() << endl;
-
- cout << endl;
-
- int x = 3;
- assert( c.eval("dwight", "function() { return 3; }", x) );
-
- cout << "***\n";
-
- BSONObj foo = fromjson("{\"x\":7}");
- cout << foo.toString() << endl;
- int res=0;
- ok = c.eval("dwight", "function(parm1) { return parm1.x; }", foo, res);
- cout << ok << " retval:" << res << endl;
-}
-
-int test2() {
- testDbEval();
- return 0;
-}
-
-/* --- dbclientconnection --- */
-
-BSONObj DBClientBase::findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn, int queryOptions) {
- auto_ptr<DBClientCursor> c =
- this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
-
- massert( "DBClientBase::findOne: transport error", c.get() );
-
- if ( !c->more() )
- return BSONObj();
-
- return c->next().copy();
-}
-
-bool DBClientConnection::connect(const char *_serverAddress, string& errmsg) {
- serverAddress = _serverAddress;
-
- int port = DBPort;
- string ip = hostbyname(_serverAddress);
- if ( ip.empty() )
- ip = serverAddress;
-
- size_t idx = ip.find( ":" );
- if ( idx != string::npos ) {
- //cout << "port string:" << ip.substr( idx ) << endl;
- port = atoi( ip.substr( idx + 1 ).c_str() );
- ip = ip.substr( 0 , idx );
- ip = hostbyname(ip.c_str());
-
- }
- if ( ip.empty() )
- ip = serverAddress;
-
- // we keep around SockAddr for connection life -- maybe MessagingPort
- // requires that?
- server = auto_ptr<SockAddr>(new SockAddr(ip.c_str(), port));
- p = auto_ptr<MessagingPort>(new MessagingPort());
-
- if ( !p->connect(*server) ) {
- stringstream ss;
- ss << "couldn't connect to server " << serverAddress << " " << ip << ":" << port;
- errmsg = ss.str();
- failed = true;
+
+ BSONObj ismastercmdobj = fromjson("{\"ismaster\":1}");
+
+ bool DBClientWithCommands::isMaster(bool& isMaster, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ bool ok = runCommand("admin", ismastercmdobj, *info);
+ isMaster = (info->getIntField("ismaster") == 1);
+ return ok;
+ }
+
+ bool DBClientWithCommands::createCollection(const char *ns, unsigned size, bool capped, int max, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ BSONObjBuilder b;
+ b.append("create", ns);
+ if ( size ) b.append("size", size);
+ if ( capped ) b.append("capped", true);
+ if ( max ) b.append("max", max);
+ string db = nsToClient(ns);
+ return runCommand(db.c_str(), b.done(), *info);
+ }
+
+ bool DBClientWithCommands::copyDatabase(const char *fromdb, const char *todb, const char *fromhost, BSONObj *info) {
+ assert( *fromdb && *todb );
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ BSONObjBuilder b;
+ b.append("copydb", 1);
+ b.append("fromhost", fromhost);
+ b.append("fromdb", fromdb);
+ b.append("todb", todb);
+ return runCommand("admin", b.done(), *info);
+ }
+
+ bool DBClientWithCommands::setDbProfilingLevel(const char *dbname, ProfilingLevel level, BSONObj *info ) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+
+ if ( level ) {
+ // Create system.profile collection. If it already exists this does nothing.
+ // TODO: move this into the db instead of here so that all
+ // drivers don't have to do this.
+ string ns = string(dbname) + ".system.profile";
+ createCollection(ns.c_str(), 1024 * 1024, true, 0, info);
+ }
+
+ BSONObjBuilder b;
+ b.append("profile", (int) level);
+ return runCommand(dbname, b.done(), *info);
+ }
+
+ BSONObj getprofilingcmdobj = fromjson("{\"profile\":-1}");
+
+ bool DBClientWithCommands::getDbProfilingLevel(const char *dbname, ProfilingLevel& level, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ if ( runCommand(dbname, getprofilingcmdobj, *info) ) {
+ level = (ProfilingLevel) info->getIntField("was");
+ return true;
+ }
return false;
}
- return true;
-}
-void DBClientConnection::checkConnection() {
- if ( !failed )
- return;
- if ( lastReconnectTry && time(0)-lastReconnectTry < 2 )
- return;
- if ( !autoReconnect )
- return;
+ bool DBClientWithCommands::eval(const char *dbname, const char *jscode, BSONObj& info, BSONElement& retValue, BSONObj *args) {
+ BSONObjBuilder b;
+ b.appendCode("$eval", jscode);
+ if ( args )
+ b.appendArray("args", *args);
+ bool ok = runCommand(dbname, b.done(), info);
+ if ( ok )
+ retValue = info.getField("retval");
+ return ok;
+ }
+
+ bool DBClientWithCommands::eval(const char *dbname, const char *jscode) {
+ BSONObj info;
+ BSONElement retValue;
+ return eval(dbname, jscode, info, retValue);
+ }
+
+ /* TODO: unit tests should run this? */
+ void testDbEval() {
+ DBClientConnection c;
+ string err;
+ if ( !c.connect("localhost", err) ) {
+ out() << "can't connect to server " << err << endl;
+ return;
+ }
+ BSONObj info;
+ BSONElement retValue;
+ BSONObjBuilder b;
+ b.append("0", 99);
+ BSONObj args = b.done();
+ bool ok = c.eval("dwight", "function() { return args[0]; }", info, retValue, &args);
+ out() << "eval ok=" << ok << endl;
+ out() << "retvalue=" << retValue.toString() << endl;
+ out() << "info=" << info.toString() << endl;
+
+ out() << endl;
+
+ int x = 3;
+ assert( c.eval("dwight", "function() { return 3; }", x) );
+
+ out() << "***\n";
+
+ BSONObj foo = fromjson("{\"x\":7}");
+ out() << foo.toString() << endl;
+ int res=0;
+ ok = c.eval("dwight", "function(parm1) { return parm1.x; }", foo, res);
+ out() << ok << " retval:" << res << endl;
+ }
+
+ int test2() {
+ testDbEval();
+ return 0;
+ }
+
+ /* --- dbclientconnection --- */
- lastReconnectTry = time(0);
- log() << "trying reconnect to " << serverAddress << endl;
- string errmsg;
- string tmp = serverAddress;
- failed = false;
- if ( !connect(tmp.c_str(), errmsg) )
- log() << "reconnect " << serverAddress << " failed " << errmsg << endl;
- else
- log() << "reconnect " << serverAddress << " ok" << endl;
-}
+ BSONObj DBClientBase::findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn, int queryOptions) {
+ auto_ptr<DBClientCursor> c =
+ this->query(ns, query, 1, 0, fieldsToReturn, queryOptions);
-auto_ptr<DBClientCursor> DBClientBase::query(const char *ns, BSONObj query, int nToReturn,
- int nToSkip, BSONObj *fieldsToReturn, int queryOptions) {
- auto_ptr<DBClientCursor> c( new DBClientCursor( this,
- ns, query, nToReturn, nToSkip,
- fieldsToReturn, queryOptions ) );
- if ( c->init() )
- return c;
- return auto_ptr< DBClientCursor >( 0 );
-}
+ massert( "DBClientBase::findOne: transport error", c.get() );
-void DBClientBase::insert( const char * ns , BSONObj obj ) {
- Message toSend;
+ if ( !c->more() )
+ return BSONObj();
- BufBuilder b;
- int opts = 0;
- b.append( opts );
- b.append( ns );
- obj.appendSelfToBufBuilder( b );
+ return c->next().copy();
+ }
+
+ bool DBClientConnection::connect(const char *_serverAddress, string& errmsg) {
+ serverAddress = _serverAddress;
+
+ int port = DBPort;
+ string ip = hostbyname(_serverAddress);
+ if ( ip.empty() )
+ ip = serverAddress;
+
+ size_t idx = ip.find( ":" );
+ if ( idx != string::npos ) {
+ //out() << "port string:" << ip.substr( idx ) << endl;
+ port = atoi( ip.substr( idx + 1 ).c_str() );
+ ip = ip.substr( 0 , idx );
+ ip = hostbyname(ip.c_str());
- toSend.setData( dbInsert , b.buf() , b.len() );
+ }
+ if ( ip.empty() )
+ ip = serverAddress;
+
+ // we keep around SockAddr for connection life -- maybe MessagingPort
+ // requires that?
+ server = auto_ptr<SockAddr>(new SockAddr(ip.c_str(), port));
+ p = auto_ptr<MessagingPort>(new MessagingPort());
+
+ if ( !p->connect(*server) ) {
+ stringstream ss;
+ ss << "couldn't connect to server " << serverAddress << " " << ip << ":" << port;
+ errmsg = ss.str();
+ failed = true;
+ return false;
+ }
+ return true;
+ }
- say( toSend );
-}
+ void DBClientConnection::checkConnection() {
+ if ( !failed )
+ return;
+ if ( lastReconnectTry && time(0)-lastReconnectTry < 2 )
+ return;
+ if ( !autoReconnect )
+ return;
+
+ lastReconnectTry = time(0);
+ log() << "trying reconnect to " << serverAddress << endl;
+ string errmsg;
+ string tmp = serverAddress;
+ failed = false;
+ if ( !connect(tmp.c_str(), errmsg) )
+ log() << "reconnect " << serverAddress << " failed " << errmsg << endl;
+ else
+ log() << "reconnect " << serverAddress << " ok" << endl;
+ }
-void DBClientBase::remove( const char * ns , BSONObj obj , bool justOne ) {
- Message toSend;
+ auto_ptr<DBClientCursor> DBClientBase::query(const char *ns, BSONObj query, int nToReturn,
+ int nToSkip, BSONObj *fieldsToReturn, int queryOptions) {
+ auto_ptr<DBClientCursor> c( new DBClientCursor( this,
+ ns, query, nToReturn, nToSkip,
+ fieldsToReturn, queryOptions ) );
+ if ( c->init() )
+ return c;
+ return auto_ptr< DBClientCursor >( 0 );
+ }
- BufBuilder b;
- int opts = 0;
- b.append( opts );
- b.append( ns );
+ void DBClientBase::insert( const char * ns , BSONObj obj ) {
+ Message toSend;
- int flags = 0;
- if ( justOne || obj.hasField( "_id" ) )
- flags &= 1;
- b.append( flags );
+ BufBuilder b;
+ int opts = 0;
+ b.append( opts );
+ b.append( ns );
+ obj.appendSelfToBufBuilder( b );
- obj.appendSelfToBufBuilder( b );
+ toSend.setData( dbInsert , b.buf() , b.len() );
- toSend.setData( dbDelete , b.buf() , b.len() );
+ say( toSend );
+ }
- say( toSend );
-}
+ void DBClientBase::remove( const char * ns , BSONObj obj , bool justOne ) {
+ Message toSend;
-void DBClientBase::update( const char * ns , BSONObj query , BSONObj obj , bool upsert ) {
+ BufBuilder b;
+ int opts = 0;
+ b.append( opts );
+ b.append( ns );
- BufBuilder b;
- b.append( (int)0 ); // reserverd
- b.append( ns );
+ int flags = 0;
+ if ( justOne || obj.hasField( "_id" ) )
+ flags &= 1;
+ b.append( flags );
- b.append( (int)upsert );
+ obj.appendSelfToBufBuilder( b );
- query.appendSelfToBufBuilder( b );
- obj.appendSelfToBufBuilder( b );
+ toSend.setData( dbDelete , b.buf() , b.len() );
- Message toSend;
- toSend.setData( dbUpdate , b.buf() , b.len() );
+ say( toSend );
+ }
- say( toSend );
-}
+ void DBClientBase::update( const char * ns , BSONObj query , BSONObj obj , bool upsert ) {
-bool DBClientBase::ensureIndex( const char * ns , BSONObj keys , const char * name ) {
- BSONObjBuilder toSave;
- toSave.append( "ns" , ns );
- toSave.append( "key" , keys );
+ BufBuilder b;
+ b.append( (int)0 ); // reserverd
+ b.append( ns );
- string cacheKey(ns);
- cacheKey += "--";
+ b.append( (int)upsert );
- if ( name ) {
- toSave.append( "name" , name );
- cacheKey += name;
+ query.appendSelfToBufBuilder( b );
+ obj.appendSelfToBufBuilder( b );
+
+ Message toSend;
+ toSend.setData( dbUpdate , b.buf() , b.len() );
+
+ say( toSend );
}
- else {
- stringstream ss;
- bool first = 1;
- for ( BSONObjIterator i(keys); i.more(); ) {
- BSONElement f = i.next();
- if ( f.eoo() )
- break;
+ bool DBClientBase::ensureIndex( const char * ns , BSONObj keys , const char * name ) {
+ BSONObjBuilder toSave;
+ toSave.append( "ns" , ns );
+ toSave.append( "key" , keys );
+
+ string cacheKey(ns);
+ cacheKey += "--";
+
+ if ( name ) {
+ toSave.append( "name" , name );
+ cacheKey += name;
+ }
+ else {
+ stringstream ss;
+
+ bool first = 1;
+ for ( BSONObjIterator i(keys); i.more(); ) {
+ BSONElement f = i.next();
+ if ( f.eoo() )
+ break;
- if ( first )
- first = 0;
- else
- ss << "_";
+ if ( first )
+ first = 0;
+ else
+ ss << "_";
- ss << f.fieldName() << "_";
+ ss << f.fieldName() << "_";
- if ( f.type() == NumberInt )
- ss << (int)(f.number() );
- else if ( f.type() == NumberDouble )
- ss << f.number();
+ if ( f.type() == NumberInt )
+ ss << (int)(f.number() );
+ else if ( f.type() == NumberDouble )
+ ss << f.number();
+ }
+
+ toSave.append( "name" , ss.str() );
+ cacheKey += ss.str();
}
- toSave.append( "name" , ss.str() );
- cacheKey += ss.str();
+ if ( _seenIndexes.count( cacheKey ) )
+ return 0;
+ _seenIndexes.insert( cacheKey );
+
+ insert( Namespace( ns ).getSisterNS( "system.indexes" ).c_str() , toSave.doneAndDecouple() );
+ return 1;
}
- if ( _seenIndexes.count( cacheKey ) )
- return 0;
- _seenIndexes.insert( cacheKey );
-
- insert( Namespace( ns ).getSisterNS( "system.indexes" ).c_str() , toSave.doneAndDecouple() );
- return 1;
-}
-
-void DBClientBase::resetIndexCache() {
- _seenIndexes.clear();
-}
-
-/* -- DBClientCursor ---------------------------------------------- */
-
-void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
- // see query.h for the protocol we are using here.
- BufBuilder b;
- int opts = queryOptions;
- assert( (opts&Option_ALLMASK) == opts );
- b.append(opts);
- b.append(ns.c_str());
- b.append(nToSkip);
- b.append(nToReturn);
- query.appendSelfToBufBuilder(b);
- if ( fieldsToReturn )
- fieldsToReturn->appendSelfToBufBuilder(b);
- toSend.setData(dbQuery, b.buf(), b.len());
-}
-
-void DBClientConnection::say( Message &toSend ) {
- port().say( toSend );
-}
-
-void DBClientConnection::sayPiggyBack( Message &toSend ) {
- port().piggyBack( toSend );
-}
-
-bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk ) {
- if ( !port().call(toSend, response) ) {
- failed = true;
- if ( assertOk )
- massert("dbclient error communicating with server", false);
- return false;
+ void DBClientBase::resetIndexCache() {
+ _seenIndexes.clear();
}
- return true;
-}
-void DBClientConnection::checkResponse( const char *data, int nReturned ) {
- /* check for errors. the only one we really care about at
- this stage is "not master" */
- if ( clientPaired && nReturned ) {
- BSONObj o(data);
- BSONElement e = o.firstElement();
- if ( strcmp(e.fieldName(), "$err") == 0 &&
- e.type() == String && strncmp(e.valuestr(), "not master", 10) == 0 ) {
- clientPaired->isntMaster();
- }
+ /* -- DBClientCursor ---------------------------------------------- */
+
+ void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
+ // see query.h for the protocol we are using here.
+ BufBuilder b;
+ int opts = queryOptions;
+ assert( (opts&Option_ALLMASK) == opts );
+ b.append(opts);
+ b.append(ns.c_str());
+ b.append(nToSkip);
+ b.append(nToReturn);
+ query.appendSelfToBufBuilder(b);
+ if ( fieldsToReturn )
+ fieldsToReturn->appendSelfToBufBuilder(b);
+ toSend.setData(dbQuery, b.buf(), b.len());
}
-}
-bool DBClientCursor::init() {
- Message toSend;
- assembleRequest( ns, query, nToReturn, nToSkip, fieldsToReturn, opts, toSend );
- if ( !connector->call( toSend, *m, false ) )
- return false;
+ void DBClientConnection::say( Message &toSend ) {
+ port().say( toSend );
+ }
- dataReceived();
- return true;
-}
-
-void DBClientCursor::requestMore() {
- assert( cursorId && pos == nReturned );
-
- BufBuilder b;
- b.append(opts);
- b.append(ns.c_str());
- b.append(nToReturn);
- b.append(cursorId);
-
- Message toSend;
- toSend.setData(dbGetMore, b.buf(), b.len());
- auto_ptr<Message> response(new Message());
- connector->call( toSend, *response );
-
- m = response;
- dataReceived();
-}
-
-void DBClientCursor::dataReceived() {
- QueryResult *qr = (QueryResult *) m->data;
- if ( qr->resultFlags() & QueryResult::ResultFlag_CursorNotFound ) {
- // cursor id no longer valid at the server.
- assert( qr->cursorId == 0 );
- cursorId = 0; // 0 indicates no longer valid (dead)
- }
- if ( cursorId == 0 ) {
- // only set initially: we don't want to kill it on end of data
- // if it's a tailable cursor
- cursorId = qr->cursorId;
- }
- nReturned = qr->nReturned;
- pos = 0;
- data = qr->data();
-
- connector->checkResponse( data, nReturned );
- /* this assert would fire the way we currently work:
- assert( nReturned || cursorId == 0 );
- */
-}
+ void DBClientConnection::sayPiggyBack( Message &toSend ) {
+ port().piggyBack( toSend );
+ }
-bool DBClientCursor::more() {
- if ( pos < nReturned )
+ bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk ) {
+ if ( !port().call(toSend, response) ) {
+ failed = true;
+ if ( assertOk )
+ massert("dbclient error communicating with server", false);
+ return false;
+ }
return true;
+ }
- if ( cursorId == 0 )
- return false;
+ void DBClientConnection::checkResponse( const char *data, int nReturned ) {
+ /* check for errors. the only one we really care about at
+ this stage is "not master" */
+ if ( clientPaired && nReturned ) {
+ BSONObj o(data);
+ BSONElement e = o.firstElement();
+ if ( strcmp(e.fieldName(), "$err") == 0 &&
+ e.type() == String && strncmp(e.valuestr(), "not master", 10) == 0 ) {
+ clientPaired->isntMaster();
+ }
+ }
+ }
- requestMore();
- return pos < nReturned;
-}
+ bool DBClientCursor::init() {
+ Message toSend;
+ assembleRequest( ns, query, nToReturn, nToSkip, fieldsToReturn, opts, toSend );
+ if ( !connector->call( toSend, *m, false ) )
+ return false;
-BSONObj DBClientCursor::next() {
- assert( more() );
- pos++;
- BSONObj o(data);
- data += o.objsize();
- return o;
-}
+ dataReceived();
+ return true;
+ }
+
+ void DBClientCursor::requestMore() {
+ assert( cursorId && pos == nReturned );
-DBClientCursor::~DBClientCursor() {
- if ( cursorId ) {
BufBuilder b;
- b.append( (int)0 ); // reserved
- b.append( (int)1 ); // number
- b.append( cursorId );
+ b.append(opts);
+ b.append(ns.c_str());
+ b.append(nToReturn);
+ b.append(cursorId);
+
+ Message toSend;
+ toSend.setData(dbGetMore, b.buf(), b.len());
+ auto_ptr<Message> response(new Message());
+ connector->call( toSend, *response );
+
+ m = response;
+ dataReceived();
+ }
- Message m;
- m.setData( dbKillCursors , b.buf() , b.len() );
+ void DBClientCursor::dataReceived() {
+ QueryResult *qr = (QueryResult *) m->data;
+ if ( qr->resultFlags() & QueryResult::ResultFlag_CursorNotFound ) {
+ // cursor id no longer valid at the server.
+ assert( qr->cursorId == 0 );
+ cursorId = 0; // 0 indicates no longer valid (dead)
+ }
+ if ( cursorId == 0 ) {
+ // only set initially: we don't want to kill it on end of data
+ // if it's a tailable cursor
+ cursorId = qr->cursorId;
+ }
+ nReturned = qr->nReturned;
+ pos = 0;
+ data = qr->data();
+
+ connector->checkResponse( data, nReturned );
+ /* this assert would fire the way we currently work:
+ assert( nReturned || cursorId == 0 );
+ */
+ }
+
+ bool DBClientCursor::more() {
+ if ( pos < nReturned )
+ return true;
- connector->sayPiggyBack( m );
+ if ( cursorId == 0 )
+ return false;
+
+ requestMore();
+ return pos < nReturned;
+ }
+
+ BSONObj DBClientCursor::next() {
+ assert( more() );
+ pos++;
+ BSONObj o(data);
+ data += o.objsize();
+ return o;
}
-}
+ DBClientCursor::~DBClientCursor() {
+ if ( cursorId ) {
+ BufBuilder b;
+ b.append( (int)0 ); // reserved
+ b.append( (int)1 ); // number
+ b.append( cursorId );
-/* ------------------------------------------------------ */
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ connector->sayPiggyBack( m );
+ }
+
+ }
+
+ /* ------------------------------------------------------ */
// "./db testclient" to invoke
-extern BSONObj emptyObj;
-void testClient() {
- cout << "testClient()" << endl;
+ extern BSONObj emptyObj;
+ void testClient() {
+ out() << "testClient()" << endl;
// DBClientConnection c(true);
- DBClientPaired c;
- string err;
- if ( !c.connect("10.211.55.2", "1.2.3.4") ) {
+ DBClientPaired c;
+ string err;
+ if ( !c.connect("10.211.55.2", "1.2.3.4") ) {
// if( !c.connect("10.211.55.2", err) ) {
- cout << "testClient: connect() failed" << endl;
- }
- else {
- // temp:
- cout << "test query returns: " << c.findOne("foo.bar", fromjson("{}")).toString() << endl;
- }
+ out() << "testClient: connect() failed" << endl;
+ }
+ else {
+ // temp:
+ out() << "test query returns: " << c.findOne("foo.bar", fromjson("{}")).toString() << endl;
+ }
again:
- cout << "query foo.bar..." << endl;
- auto_ptr<DBClientCursor> cursor =
- c.query("foo.bar", emptyObj, 0, 0, 0, Option_CursorTailable);
- DBClientCursor *cc = cursor.get();
- if ( cc == 0 ) {
- cout << "query() returned 0, sleeping 10 secs" << endl;
- sleepsecs(10);
- goto again;
- }
- while ( 1 ) {
- bool m;
- try {
- m = cc->more();
- } catch (AssertionException&) {
- cout << "more() asserted, sleeping 10 sec" << endl;
+ out() << "query foo.bar..." << endl;
+ auto_ptr<DBClientCursor> cursor =
+ c.query("foo.bar", emptyObj, 0, 0, 0, Option_CursorTailable);
+ DBClientCursor *cc = cursor.get();
+ if ( cc == 0 ) {
+ out() << "query() returned 0, sleeping 10 secs" << endl;
+ sleepsecs(10);
goto again;
}
- cout << "more: " << m << " dead:" << cc->isDead() << endl;
- if ( !m ) {
- if ( cc->isDead() )
- cout << "cursor dead, stopping" << endl;
- else {
- cout << "Sleeping 10 seconds" << endl;
- sleepsecs(10);
- continue;
- }
- break;
- }
- cout << cc->next().toString() << endl;
- }
-}
-
-/* --- class dbclientpaired --- */
-
-string DBClientPaired::toString() {
- stringstream ss;
- ss << "state: " << master << '\n';
- ss << "left: " << left.toStringLong() << '\n';
- ss << "right: " << right.toStringLong() << '\n';
- return ss.str();
-}
-
-DBClientPaired::DBClientPaired() :
- left(true), right(true)
-{
- master = NotSetL;
-}
-
-/* find which server, the left or right, is currently master mode */
-void DBClientPaired::_checkMaster() {
- for ( int retry = 0; retry < 2; retry++ ) {
- int x = master;
- for ( int pass = 0; pass < 2; pass++ ) {
- DBClientConnection& c = x == 0 ? left : right;
+ while ( 1 ) {
+ bool m;
try {
- bool im;
- BSONObj o;
- c.isMaster(im, &o);
- if ( retry )
- log() << "checkmaster: " << c.toString() << ' ' << o.toString() << '\n';
- if ( im ) {
- master = (State) (x + 2);
- return;
+ m = cc->more();
+ } catch (AssertionException&) {
+ out() << "more() asserted, sleeping 10 sec" << endl;
+ goto again;
+ }
+ out() << "more: " << m << " dead:" << cc->isDead() << endl;
+ if ( !m ) {
+ if ( cc->isDead() )
+ out() << "cursor dead, stopping" << endl;
+ else {
+ out() << "Sleeping 10 seconds" << endl;
+ sleepsecs(10);
+ continue;
}
+ break;
}
- catch (AssertionException&) {
- if ( retry )
- log() << "checkmaster: caught exception " << c.toString() << '\n';
+ out() << cc->next().toString() << endl;
+ }
+ }
+
+ /* --- class dbclientpaired --- */
+
+ string DBClientPaired::toString() {
+ stringstream ss;
+ ss << "state: " << master << '\n';
+ ss << "left: " << left.toStringLong() << '\n';
+ ss << "right: " << right.toStringLong() << '\n';
+ return ss.str();
+ }
+
+ DBClientPaired::DBClientPaired() :
+ left(true), right(true)
+ {
+ master = NotSetL;
+ }
+
+ /* find which server, the left or right, is currently master mode */
+ void DBClientPaired::_checkMaster() {
+ for ( int retry = 0; retry < 2; retry++ ) {
+ int x = master;
+ for ( int pass = 0; pass < 2; pass++ ) {
+ DBClientConnection& c = x == 0 ? left : right;
+ try {
+ bool im;
+ BSONObj o;
+ c.isMaster(im, &o);
+ if ( retry )
+ log() << "checkmaster: " << c.toString() << ' ' << o.toString() << '\n';
+ if ( im ) {
+ master = (State) (x + 2);
+ return;
+ }
+ }
+ catch (AssertionException&) {
+ if ( retry )
+ log() << "checkmaster: caught exception " << c.toString() << '\n';
+ }
+ x = x^1;
}
- x = x^1;
+ sleepsecs(1);
}
- sleepsecs(1);
+
+ uassert("checkmaster: no master found", false);
}
- uassert("checkmaster: no master found", false);
-}
+ inline DBClientConnection& DBClientPaired::checkMaster() {
+ if ( master > NotSetR ) {
+ // a master is selected. let's just make sure connection didn't die
+ DBClientConnection& c = master == Left ? left : right;
+ if ( !c.isFailed() )
+ return c;
+ // after a failure, on the next checkMaster, start with the other
+ // server -- presumably it took over. (not critical which we check first,
+ // just will make the failover slightly faster if we guess right)
+ master = master == Left ? NotSetR : NotSetL;
+ }
-inline DBClientConnection& DBClientPaired::checkMaster() {
- if ( master > NotSetR ) {
- // a master is selected. let's just make sure connection didn't die
- DBClientConnection& c = master == Left ? left : right;
- if ( !c.isFailed() )
- return c;
- // after a failure, on the next checkMaster, start with the other
- // server -- presumably it took over. (not critical which we check first,
- // just will make the failover slightly faster if we guess right)
- master = master == Left ? NotSetR : NotSetL;
- }
-
- _checkMaster();
- assert( master > NotSetR );
- return master == Left ? left : right;
-}
-
-bool DBClientPaired::connect(const char *serverHostname1, const char *serverHostname2) {
- string errmsg;
- bool l = left.connect(serverHostname1, errmsg);
- bool r = right.connect(serverHostname2, errmsg);
- master = l ? NotSetL : NotSetR;
- if ( !l && !r ) // it would be ok to fall through, but checkMaster will then try an immediate reconnect which is slow
- return false;
- try {
- checkMaster();
+ _checkMaster();
+ assert( master > NotSetR );
+ return master == Left ? left : right;
}
- catch (UserAssertionException&) {
- return false;
+
+ bool DBClientPaired::connect(const char *serverHostname1, const char *serverHostname2) {
+ string errmsg;
+ bool l = left.connect(serverHostname1, errmsg);
+ bool r = right.connect(serverHostname2, errmsg);
+ master = l ? NotSetL : NotSetR;
+ if ( !l && !r ) // it would be ok to fall through, but checkMaster will then try an immediate reconnect which is slow
+ return false;
+ try {
+ checkMaster();
+ }
+ catch (UserAssertionException&) {
+ return false;
+ }
+ return true;
}
- return true;
-}
-auto_ptr<DBClientCursor> DBClientPaired::query(const char *a, BSONObj b, int c, int d,
- BSONObj *e, int f)
-{
- return checkMaster().query(a,b,c,d,e,f);
-}
+ auto_ptr<DBClientCursor> DBClientPaired::query(const char *a, BSONObj b, int c, int d,
+ BSONObj *e, int f)
+ {
+ return checkMaster().query(a,b,c,d,e,f);
+ }
-BSONObj DBClientPaired::findOne(const char *a, BSONObj b, BSONObj *c, int d) {
- return checkMaster().findOne(a,b,c,d);
-}
+ BSONObj DBClientPaired::findOne(const char *a, BSONObj b, BSONObj *c, int d) {
+ return checkMaster().findOne(a,b,c,d);
+ }
diff --git a/client/dbclient.h b/client/dbclient.h
index 6135a595bc8..dd3b4ac5716 100644
--- a/client/dbclient.h
+++ b/client/dbclient.h
@@ -24,424 +24,424 @@
namespace mongo {
-/* the query field 'options' can have these bits set: */
-enum QueryOptions {
- /* Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
- the final object's position. you can resume using the cursor later, from where it was located,
- if more data were received. Set on dbQuery and dbGetMore.
-
- like any "latent cursor", the cursor may become invalid at some point -- for example if that
- final object it references were deleted. Thus, you should be prepared to requery if you get back
- ResultFlag_CursorNotFound.
- */
- Option_CursorTailable = 2,
-
- /* allow query of replica slave. normally these return an error except for namespace "local".
- */
- Option_SlaveOk = 4,
-
- Option_ALLMASK = 6
-};
+ /* the query field 'options' can have these bits set: */
+ enum QueryOptions {
+ /* Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
+ the final object's position. you can resume using the cursor later, from where it was located,
+ if more data were received. Set on dbQuery and dbGetMore.
+
+ like any "latent cursor", the cursor may become invalid at some point -- for example if that
+ final object it references were deleted. Thus, you should be prepared to requery if you get back
+ ResultFlag_CursorNotFound.
+ */
+ Option_CursorTailable = 2,
+
+ /* allow query of replica slave. normally these return an error except for namespace "local".
+ */
+ Option_SlaveOk = 4,
+
+ Option_ALLMASK = 6
+ };
-class BSONObj;
+ class BSONObj;
-/* db response format
+ /* db response format
- Query or GetMore: // see struct QueryResult
- int resultFlags;
- int64 cursorID;
- int startingFrom;
- int nReturned;
- list of marshalled JSObjects;
-*/
+ Query or GetMore: // see struct QueryResult
+ int resultFlags;
+ int64 cursorID;
+ int startingFrom;
+ int nReturned;
+ list of marshalled JSObjects;
+ */
#pragma pack(push,1)
-struct QueryResult : public MsgData {
- enum {
- ResultFlag_CursorNotFound = 1, /* returned, with zero results, when getMore is called but the cursor id is not valid at the server. */
- ResultFlag_ErrSet = 2 /* { $err : ... } is being returned */
+ struct QueryResult : public MsgData {
+ enum {
+ ResultFlag_CursorNotFound = 1, /* returned, with zero results, when getMore is called but the cursor id is not valid at the server. */
+ ResultFlag_ErrSet = 2 /* { $err : ... } is being returned */
+ };
+
+ long long cursorId;
+ int startingFrom;
+ int nReturned;
+ const char *data() {
+ return (char *) (((int *)&nReturned)+1);
+ }
+ int& resultFlags() {
+ return dataAsInt();
+ }
};
-
- long long cursorId;
- int startingFrom;
- int nReturned;
- const char *data() {
- return (char *) (((int *)&nReturned)+1);
- }
- int& resultFlags() {
- return dataAsInt();
- }
-};
#pragma pack(pop)
-class DBConnector {
-public:
- virtual bool call( Message &toSend, Message &response, bool assertOk=true ) = 0;
- virtual void say( Message &toSend ) = 0;
- virtual void sayPiggyBack( Message &toSend ) = 0;
- virtual void checkResponse( const char *data, int nReturned ) {}
-};
-
-class DBClientCursor : boost::noncopyable {
-public:
- bool more(); // if true, safe to call next()
-
- /* returns next object in the result cursor.
- on an error at the remote server, you will get back:
- { $err: <string> }
- if you do not want to handle that yourself, call nextSafe().
- */
- BSONObj next();
-
- BSONObj nextSafe() {
- BSONObj o = next();
- BSONElement e = o.firstElement();
- assert( strcmp(e.fieldName(), "$err") != 0 );
- return o;
- }
-
- /* cursor no longer valid -- use with tailable cursors.
- note you should only rely on this once more() returns false;
- 'dead' may be preset yet some data still queued and locally
- available from the dbclientcursor.
- */
- bool isDead() const {
- return cursorId == 0;
- }
-
- bool tailable() const {
- return (opts & Option_CursorTailable) != 0;
- }
-
- bool init();
-
- DBClientCursor( DBConnector *_connector, const char * _ns, BSONObj _query, int _nToReturn,
- int _nToSkip, BSONObj *_fieldsToReturn, int queryOptions ) :
- connector(_connector),
- ns(_ns),
- query(_query),
- nToReturn(_nToReturn),
- nToSkip(_nToSkip),
- fieldsToReturn(_fieldsToReturn),
- opts(queryOptions),
- m(new Message()) {
- cursorId = 0;
- }
-
- virtual ~DBClientCursor();
-
-private:
- DBConnector *connector;
- string ns;
- BSONObj query;
- int nToReturn;
- int nToSkip;
- BSONObj *fieldsToReturn;
- int opts;
- auto_ptr<Message> m;
-
- long long cursorId;
- int nReturned;
- int pos;
- const char *data;
- void dataReceived();
- void requestMore();
-};
-
-class DBClientInterface : boost::noncopyable {
-public:
- virtual
- auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
- BSONObj *fieldsToReturn = 0, int queryOptions = 0) = 0;
-
- virtual
- BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0) = 0;
-
- virtual void insert( const char * ns , BSONObj obj ) = 0;
-};
-
-/* db "commands"
- basically just invocations of connection.$cmd.findOne({...});
-*/
-class DBClientWithCommands : public DBClientInterface {
- bool isOk(const BSONObj&);
- bool simpleCommand(const char *dbname, BSONObj *info, const char *command);
-public:
- /* Run a database command. Database commands are represented as BSON objects. Common database
- commands have prebuilt helper functions -- see below. If a helper is not available you can
- directly call runCommand.
-
- dbname - database name. Use "admin" for global administrative commands.
- cmd - the command object to execute. For example, { ismaster : 1 }
- info - the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
- set.
-
- returns: true if the command returned "ok".
- */
- bool runCommand(const char *dbname, BSONObj cmd, BSONObj &info);
-
- /* returns true in isMaster parm if this db is the current master
- of a replica pair.
-
- pass in info for more details e.g.:
- { "ismaster" : 1.0 , "msg" : "not paired" , "ok" : 1.0 }
-
- returns true if command invoked successfully.
- */
- virtual bool isMaster(bool& isMaster, BSONObj *info=0);
-
- /*
- Create a new collection in the database. Normally, collection creation is automatic. You would
- use this function if you wish to specify special options on creation.
+ class DBConnector {
+ public:
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true ) = 0;
+ virtual void say( Message &toSend ) = 0;
+ virtual void sayPiggyBack( Message &toSend ) = 0;
+ virtual void checkResponse( const char *data, int nReturned ) {}
+ };
- If the collection already exists, no action occurs.
+ class DBClientCursor : boost::noncopyable {
+ public:
+ bool more(); // if true, safe to call next()
+
+ /* returns next object in the result cursor.
+ on an error at the remote server, you will get back:
+ { $err: <string> }
+ if you do not want to handle that yourself, call nextSafe().
+ */
+ BSONObj next();
+
+ BSONObj nextSafe() {
+ BSONObj o = next();
+ BSONElement e = o.firstElement();
+ assert( strcmp(e.fieldName(), "$err") != 0 );
+ return o;
+ }
+
+ /* cursor no longer valid -- use with tailable cursors.
+ note you should only rely on this once more() returns false;
+ 'dead' may be preset yet some data still queued and locally
+ available from the dbclientcursor.
+ */
+ bool isDead() const {
+ return cursorId == 0;
+ }
+
+ bool tailable() const {
+ return (opts & Option_CursorTailable) != 0;
+ }
+
+ bool init();
+
+ DBClientCursor( DBConnector *_connector, const char * _ns, BSONObj _query, int _nToReturn,
+ int _nToSkip, BSONObj *_fieldsToReturn, int queryOptions ) :
+ connector(_connector),
+ ns(_ns),
+ query(_query),
+ nToReturn(_nToReturn),
+ nToSkip(_nToSkip),
+ fieldsToReturn(_fieldsToReturn),
+ opts(queryOptions),
+ m(new Message()) {
+ cursorId = 0;
+ }
+
+ virtual ~DBClientCursor();
+
+ private:
+ DBConnector *connector;
+ string ns;
+ BSONObj query;
+ int nToReturn;
+ int nToSkip;
+ BSONObj *fieldsToReturn;
+ int opts;
+ auto_ptr<Message> m;
+
+ long long cursorId;
+ int nReturned;
+ int pos;
+ const char *data;
+ void dataReceived();
+ void requestMore();
+ };
- ns: fully qualified collection name
- size: desired initial extent size for the collection.
- Must be <= 1000000000 for normal collections.
- For fixed size (capped) collections, this size is the total/max size of the
- collection.
- capped: if true, this is a fixed size collection (where old data rolls out).
- max: maximum number of objects if capped (optional).
+ class DBClientInterface : boost::noncopyable {
+ public:
+ virtual
+ auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
+ BSONObj *fieldsToReturn = 0, int queryOptions = 0) = 0;
- returns true if successful.
- */
- bool createCollection(const char *ns, unsigned size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
+ virtual
+ BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0) = 0;
- /* Erase / drop an entire database */
- bool dropDatabase(const char *dbname, BSONObj *info = 0) {
- return simpleCommand(dbname, info, "dropDatabase");
- }
+ virtual void insert( const char * ns , BSONObj obj ) = 0;
+ };
- /* Perform a repair and compaction of the specified database. May take a long time to run. Disk space
- must be available equal to the size of the database while repairing.
+ /* db "commands"
+ basically just invocations of connection.$cmd.findOne({...});
*/
- bool repairDatabase(const char *dbname, BSONObj *info = 0) {
- return simpleCommand(dbname, info, "repairDatabase");
- }
-
- /* Copy database from one server or name to another server or name.
-
- Generally, you should dropDatabase() first as otherwise the copied information will MERGE
- into whatever data is already present in this database.
-
- For security reasons this function only works when you are authorized to access the "admin" db. However,
- if you have access to said db, you can copy any database from one place to another.
- TODO: this needs enhancement to be more flexible in terms of security.
-
- This method provides a way to "rename" a database by copying it to a new db name and
- location. The copy is "repaired" and compacted.
+ class DBClientWithCommands : public DBClientInterface {
+ bool isOk(const BSONObj&);
+ bool simpleCommand(const char *dbname, BSONObj *info, const char *command);
+ public:
+ /* Run a database command. Database commands are represented as BSON objects. Common database
+ commands have prebuilt helper functions -- see below. If a helper is not available you can
+ directly call runCommand.
+
+ dbname - database name. Use "admin" for global administrative commands.
+ cmd - the command object to execute. For example, { ismaster : 1 }
+ info - the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
+ set.
+
+ returns: true if the command returned "ok".
+ */
+ bool runCommand(const char *dbname, BSONObj cmd, BSONObj &info);
+
+ /* returns true in isMaster parm if this db is the current master
+ of a replica pair.
+
+ pass in info for more details e.g.:
+ { "ismaster" : 1.0 , "msg" : "not paired" , "ok" : 1.0 }
+
+ returns true if command invoked successfully.
+ */
+ virtual bool isMaster(bool& isMaster, BSONObj *info=0);
+
+ /*
+ Create a new collection in the database. Normally, collection creation is automatic. You would
+ use this function if you wish to specify special options on creation.
+
+ If the collection already exists, no action occurs.
+
+ ns: fully qualified collection name
+ size: desired initial extent size for the collection.
+ Must be <= 1000000000 for normal collections.
+ For fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ capped: if true, this is a fixed size collection (where old data rolls out).
+ max: maximum number of objects if capped (optional).
+
+ returns true if successful.
+ */
+ bool createCollection(const char *ns, unsigned size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
+
+ /* Erase / drop an entire database */
+ bool dropDatabase(const char *dbname, BSONObj *info = 0) {
+ return simpleCommand(dbname, info, "dropDatabase");
+ }
+
+ /* Perform a repair and compaction of the specified database. May take a long time to run. Disk space
+ must be available equal to the size of the database while repairing.
+ */
+ bool repairDatabase(const char *dbname, BSONObj *info = 0) {
+ return simpleCommand(dbname, info, "repairDatabase");
+ }
+
+ /* Copy database from one server or name to another server or name.
+
+ Generally, you should dropDatabase() first as otherwise the copied information will MERGE
+ into whatever data is already present in this database.
+
+ For security reasons this function only works when you are authorized to access the "admin" db. However,
+ if you have access to said db, you can copy any database from one place to another.
+ TODO: this needs enhancement to be more flexible in terms of security.
+
+ This method provides a way to "rename" a database by copying it to a new db name and
+ location. The copy is "repaired" and compacted.
+
+ fromdb database name from which to copy.
+ todb database name to copy to.
+ fromhost hostname of the database (and optionally, ":port") from which to
+ copy the data. copies from self if "".
+
+ returns true if successful
+ */
+ bool copyDatabase(const char *fromdb, const char *todb, const char *fromhost = "", BSONObj *info = 0);
+
+ /* The Mongo database provides built-in performance profiling capabilities. Uset setDbProfilingLevel()
+ to enable. Profiling information is then written to the system.profiling collection, which one can
+ then query.
+ */
+ enum ProfilingLevel {
+ ProfileOff = 0,
+ ProfileSlow = 1, // log very slow (>100ms) operations
+ ProfileAll = 2
+ };
+ bool setDbProfilingLevel(const char *dbname, ProfilingLevel level, BSONObj *info = 0);
+ bool getDbProfilingLevel(const char *dbname, ProfilingLevel& level, BSONObj *info = 0);
+
+ /* Run javascript code on the database server.
+ dbname database context in which the code runs. The javascript variable 'db' will be assigned
+ to this database when the function is invoked.
+ jscode source code for a javascript function.
+ info the command object which contains any information on the invocation result including
+ the return value and other information. If an error occurs running the jscode, error
+ information will be in info. (try "out() << info.toString()")
+ retValue return value from the jscode function.
+ args args to pass to the jscode function. when invoked, the 'args' variable will be defined
+ for use by the jscode.
+
+ returns true if runs ok.
+
+ See testDbEval() in dbclient.cpp for an example of usage.
+ */
+ bool eval(const char *dbname, const char *jscode, BSONObj& info, BSONElement& retValue, BSONObj *args = 0);
+
+ /* The following helpers are simply more convenient forms of eval() for certain common cases */
+
+ /* invocation with no return value of interest -- with or without one simple parameter */
+ bool eval(const char *dbname, const char *jscode);
+ template< class T >
+ bool eval(const char *dbname, const char *jscode, T parm1) {
+ BSONObj info;
+ BSONElement retValue;
+ BSONObjBuilder b;
+ b.append("0", parm1);
+ BSONObj args = b.done();
+ return eval(dbname, jscode, info, retValue, &args);
+ }
+
+ /* invocation with one parm to server and one numeric field (either int or double) returned */
+ template< class T, class NumType >
+ bool eval(const char *dbname, const char *jscode, T parm1, NumType& ret) {
+ BSONObj info;
+ BSONElement retValue;
+ BSONObjBuilder b;
+ b.append("0", parm1);
+ BSONObj args = b.done();
+ if ( !eval(dbname, jscode, info, retValue, &args) )
+ return false;
+ ret = (NumType) retValue.number();
+ return true;
+ }
+
+ virtual string toString() = 0;
+ };
- fromdb database name from which to copy.
- todb database name to copy to.
- fromhost hostname of the database (and optionally, ":port") from which to
- copy the data. copies from self if "".
+ class DBClientBase : public DBClientWithCommands, public DBConnector {
+ public:
+ /* send a query to the database.
+ ns: namespace to query, format is <dbname>.<collectname>[.<collectname>]*
+ query: query to perform on the collection. this is a BSONObj (binary JSON)
+ You may format as
+ { query: { ... }, order: { ... } }
+ to specify a sort order.
+ nToReturn: n to return. 0 = unlimited
+ nToSkip: start with the nth item
+ fieldsToReturn:
+ optional template of which fields to select. if unspecified, returns all fields
+ queryOptions: see options enum at top of this file
+
+ returns: cursor.
+ 0 if error (connection failure)
+ */
+ /*throws AssertionException*/
+ virtual
+ auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
+ BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ /*throws AssertionException*/
+ virtual
+ BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ virtual void insert( const char * ns , BSONObj obj );
+
+ virtual void remove( const char * ns , BSONObj obj , bool justOne = 0 );
+
+ virtual void update( const char * ns , BSONObj query , BSONObj obj , bool upsert = 0 );
+
+ /**
+ if name isn't specified, it will be created from the keys (reccomended)
+ @return whether or not sent message to db
+ should be true on first call, false on subsequent unless resetIndexCache was called
+ */
+ virtual bool ensureIndex( const char * ns , BSONObj keys , const char * name = 0 );
+ virtual void resetIndexCache();
+
+ private:
+ set<string> _seenIndexes;
+ };
- returns true if successful
- */
- bool copyDatabase(const char *fromdb, const char *todb, const char *fromhost = "", BSONObj *info = 0);
+ class DBClientPaired;
+
+ /* A basic connection to the database. */
+ class DBClientConnection : public DBClientBase {
+ DBClientPaired *clientPaired;
+ auto_ptr<MessagingPort> p;
+ auto_ptr<SockAddr> server;
+ bool failed; // true if some sort of fatal error has ever happened
+ bool autoReconnect;
+ time_t lastReconnectTry;
+ string serverAddress; // remember for reconnects
+ void checkConnection();
+ public:
+ string toStringLong() const {
+ stringstream ss;
+ ss << serverAddress;
+ if ( failed ) ss << " failed";
+ return ss.str();
+ }
+ string toString() {
+ return serverAddress;
+ }
+ MessagingPort& port() {
+ return *p.get();
+ }
+ bool isFailed() const {
+ return failed;
+ }
+ DBClientConnection(bool _autoReconnect=false,DBClientPaired* cp=0) :
+ clientPaired(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0) { }
+
+ virtual auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
+ BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
+ checkConnection();
+ return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions );
+ }
+
+ /* Returns false if fails to connect.
+ If autoReconnect is true, you can try to use the DBClientConnection even when
+ false was returned -- it will try to connect again.
+ */
+ virtual
+ bool connect(const char *serverHostname, string& errmsg);
+
+ protected:
+ virtual bool call( Message &toSend, Message &response, bool assertOk = true );
+ virtual void say( Message &toSend );
+ virtual void sayPiggyBack( Message &toSend );
+ virtual void checkResponse( const char *data, int nReturned );
+ };
- /* The Mongo database provides built-in performance profiling capabilities. Uset setDbProfilingLevel()
- to enable. Profiling information is then written to the system.profiling collection, which one can
- then query.
+ /* Use this class to connect to a replica pair of servers. The class will manage
+ checking for which is master, and do failover automatically.
*/
- enum ProfilingLevel {
- ProfileOff = 0,
- ProfileSlow = 1, // log very slow (>100ms) operations
- ProfileAll = 2
+ class DBClientPaired : public DBClientWithCommands {
+ DBClientConnection left,right;
+ enum State {
+ NotSetL=0,
+ NotSetR=1,
+ Left, Right
+ } master;
+
+ void _checkMaster();
+ DBClientConnection& checkMaster();
+
+ public:
+ DBClientPaired();
+
+ /* Returns false is neither member of the pair were reachable, or neither is
+ master, although,
+ when false returned, you can still try to use this connection object, it will
+ try reconnects.
+ */
+ bool connect(const char *serverHostname1, const char *serverHostname2);
+
+ /* throws userassertion "no master found" */
+ virtual
+ auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
+ BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ /* throws userassertion "no master found" */
+ virtual
+ BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ // Not implemented
+ virtual void insert( const char * ns , BSONObj obj ) {
+ assert( false );
+ }
+
+ string toString();
+
+ /* notification that we got a "not master" error.
+ */
+ void isntMaster() {
+ master = ( ( master == Left ) ? NotSetR : NotSetL );
+ }
};
- bool setDbProfilingLevel(const char *dbname, ProfilingLevel level, BSONObj *info = 0);
- bool getDbProfilingLevel(const char *dbname, ProfilingLevel& level, BSONObj *info = 0);
-
- /* Run javascript code on the database server.
- dbname database context in which the code runs. The javascript variable 'db' will be assigned
- to this database when the function is invoked.
- jscode source code for a javascript function.
- info the command object which contains any information on the invocation result including
- the return value and other information. If an error occurs running the jscode, error
- information will be in info. (try "cout << info.toString()")
- retValue return value from the jscode function.
- args args to pass to the jscode function. when invoked, the 'args' variable will be defined
- for use by the jscode.
-
- returns true if runs ok.
-
- See testDbEval() in dbclient.cpp for an example of usage.
- */
- bool eval(const char *dbname, const char *jscode, BSONObj& info, BSONElement& retValue, BSONObj *args = 0);
-
- /* The following helpers are simply more convenient forms of eval() for certain common cases */
-
- /* invocation with no return value of interest -- with or without one simple parameter */
- bool eval(const char *dbname, const char *jscode);
- template< class T >
- bool eval(const char *dbname, const char *jscode, T parm1) {
- BSONObj info;
- BSONElement retValue;
- BSONObjBuilder b;
- b.append("0", parm1);
- BSONObj args = b.done();
- return eval(dbname, jscode, info, retValue, &args);
- }
-
- /* invocation with one parm to server and one numeric field (either int or double) returned */
- template< class T, class NumType >
- bool eval(const char *dbname, const char *jscode, T parm1, NumType& ret) {
- BSONObj info;
- BSONElement retValue;
- BSONObjBuilder b;
- b.append("0", parm1);
- BSONObj args = b.done();
- if ( !eval(dbname, jscode, info, retValue, &args) )
- return false;
- ret = (NumType) retValue.number();
- return true;
- }
-
- virtual string toString() = 0;
-};
-
-class DBClientBase : public DBClientWithCommands, public DBConnector {
-public:
- /* send a query to the database.
- ns: namespace to query, format is <dbname>.<collectname>[.<collectname>]*
- query: query to perform on the collection. this is a BSONObj (binary JSON)
- You may format as
- { query: { ... }, order: { ... } }
- to specify a sort order.
- nToReturn: n to return. 0 = unlimited
- nToSkip: start with the nth item
- fieldsToReturn:
- optional template of which fields to select. if unspecified, returns all fields
- queryOptions: see options enum at top of this file
-
- returns: cursor.
- 0 if error (connection failure)
- */
- /*throws AssertionException*/
- virtual
- auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
- BSONObj *fieldsToReturn = 0, int queryOptions = 0);
-
- /*throws AssertionException*/
- virtual
- BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0);
-
- virtual void insert( const char * ns , BSONObj obj );
-
- virtual void remove( const char * ns , BSONObj obj , bool justOne = 0 );
-
- virtual void update( const char * ns , BSONObj query , BSONObj obj , bool upsert = 0 );
-
- /**
- if name isn't specified, it will be created from the keys (reccomended)
- @return whether or not sent message to db
- should be true on first call, false on subsequent unless resetIndexCache was called
- */
- virtual bool ensureIndex( const char * ns , BSONObj keys , const char * name = 0 );
- virtual void resetIndexCache();
-
-private:
- set<string> _seenIndexes;
-};
-
-class DBClientPaired;
-
-/* A basic connection to the database. */
-class DBClientConnection : public DBClientBase {
- DBClientPaired *clientPaired;
- auto_ptr<MessagingPort> p;
- auto_ptr<SockAddr> server;
- bool failed; // true if some sort of fatal error has ever happened
- bool autoReconnect;
- time_t lastReconnectTry;
- string serverAddress; // remember for reconnects
- void checkConnection();
-public:
- string toStringLong() const {
- stringstream ss;
- ss << serverAddress;
- if ( failed ) ss << " failed";
- return ss.str();
- }
- string toString() {
- return serverAddress;
- }
- MessagingPort& port() {
- return *p.get();
- }
- bool isFailed() const {
- return failed;
- }
- DBClientConnection(bool _autoReconnect=false,DBClientPaired* cp=0) :
- clientPaired(cp), failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0) { }
-
- virtual auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
- BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
- checkConnection();
- return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions );
- }
-
- /* Returns false if fails to connect.
- If autoReconnect is true, you can try to use the DBClientConnection even when
- false was returned -- it will try to connect again.
- */
- virtual
- bool connect(const char *serverHostname, string& errmsg);
-
-protected:
- virtual bool call( Message &toSend, Message &response, bool assertOk = true );
- virtual void say( Message &toSend );
- virtual void sayPiggyBack( Message &toSend );
- virtual void checkResponse( const char *data, int nReturned );
-};
-
-/* Use this class to connect to a replica pair of servers. The class will manage
- checking for which is master, and do failover automatically.
-*/
-class DBClientPaired : public DBClientWithCommands {
- DBClientConnection left,right;
- enum State {
- NotSetL=0,
- NotSetR=1,
- Left, Right
- } master;
-
- void _checkMaster();
- DBClientConnection& checkMaster();
-
-public:
- DBClientPaired();
-
- /* Returns false is neither member of the pair were reachable, or neither is
- master, although,
- when false returned, you can still try to use this connection object, it will
- try reconnects.
- */
- bool connect(const char *serverHostname1, const char *serverHostname2);
-
- /* throws userassertion "no master found" */
- virtual
- auto_ptr<DBClientCursor> query(const char *ns, BSONObj query, int nToReturn = 0, int nToSkip = 0,
- BSONObj *fieldsToReturn = 0, int queryOptions = 0);
-
- /* throws userassertion "no master found" */
- virtual
- BSONObj findOne(const char *ns, BSONObj query, BSONObj *fieldsToReturn = 0, int queryOptions = 0);
-
- // Not implemented
- virtual void insert( const char * ns , BSONObj obj ) {
- assert( false );
- }
-
- string toString();
-
- /* notification that we got a "not master" error.
- */
- void isntMaster() {
- master = ( ( master == Left ) ? NotSetR : NotSetL );
- }
-};
diff --git a/client/model.cpp b/client/model.cpp
index 19860d8b963..dd7049e7fa3 100644
--- a/client/model.cpp
+++ b/client/model.cpp
@@ -21,13 +21,13 @@
namespace mongo {
-bool Model::load(BSONObj& query) {
- BSONObj b = conn()->findOne(getNS(), query);
- if ( b.isEmpty() )
- return false;
+ bool Model::load(BSONObj& query) {
+ BSONObj b = conn()->findOne(getNS(), query);
+ if ( b.isEmpty() )
+ return false;
- unserialize(b);
- return true;
-}
+ unserialize(b);
+ return true;
+ }
} // namespace mongo
diff --git a/client/model.h b/client/model.h
index 88723dce7ac..f3bd69a5bb7 100644
--- a/client/model.h
+++ b/client/model.h
@@ -22,37 +22,37 @@
namespace mongo {
-/* Model is a base class for defining objects which are serializable to the Mongo
- database via the database driver.
-
- *Definition*
- Your serializable class should inherit from Model and implement the abstract methods
- below.
-
- *Loading*
- To load, first construct an (empty) objet. Then call load(). Do not load an object
- more than once.
-*/
-
-class Model {
-public:
- Model() { }
- virtual ~Model() { }
-
- virtual const char * getNS() = 0;
- virtual void serialize(BSONObjBuilder& to) = 0;
- virtual void unserialize(BSONObj& from) = 0;
-
- /* Define this as you see fit if you are using the default conn() implementation. */
- static DBClientWithCommands *globalConn;
-
- /* Override this if you need to do fancier connection management than simply using globalConn. */
- virtual DBClientWithCommands* conn() {
- return globalConn;
- }
-
- /* true if successful */
- bool load(BSONObj& query);
-};
+ /* Model is a base class for defining objects which are serializable to the Mongo
+ database via the database driver.
+
+ *Definition*
+ Your serializable class should inherit from Model and implement the abstract methods
+ below.
+
+ *Loading*
+ To load, first construct an (empty) objet. Then call load(). Do not load an object
+ more than once.
+ */
+
+ class Model {
+ public:
+ Model() { }
+ virtual ~Model() { }
+
+ virtual const char * getNS() = 0;
+ virtual void serialize(BSONObjBuilder& to) = 0;
+ virtual void unserialize(BSONObj& from) = 0;
+
+ /* Define this as you see fit if you are using the default conn() implementation. */
+ static DBClientWithCommands *globalConn;
+
+ /* Override this if you need to do fancier connection management than simply using globalConn. */
+ virtual DBClientWithCommands* conn() {
+ return globalConn;
+ }
+
+ /* true if successful */
+ bool load(BSONObj& query);
+ };
} // namespace mongo
diff --git a/db/btree.cpp b/db/btree.cpp
index 3a68f6ec88a..14f0addad18 100644
--- a/db/btree.cpp
+++ b/db/btree.cpp
@@ -22,792 +22,792 @@
namespace mongo {
-/* it is easy to do custom sizes for a namespace - all the same for now */
-const int BucketSize = 8192;
-const int KeyMax = BucketSize / 10;
-
-int ninserts = 0;
-extern int otherTraceLevel;
-int split_debug = 0;
-int insert_debug = 0;
-
-KeyNode::KeyNode(BucketBasics& bb, _KeyNode &k) :
- prevChildBucket(k.prevChildBucket),
- recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
-{ }
-
-/* BucketBasics --------------------------------------------------- */
-
-int BucketBasics::Size() const {
- assert( _Size == BucketSize );
- return _Size;
-}
-inline void BucketBasics::setNotPacked() {
- flags &= ~Packed;
-}
-inline void BucketBasics::setPacked() {
- flags |= Packed;
-}
-
-void BucketBasics::_shape(int level, stringstream& ss) {
- for ( int i = 0; i < level; i++ ) ss << ' ';
- ss << "*\n";
- for ( int i = 0; i < n; i++ )
- if ( !k(i).prevChildBucket.isNull() )
- k(i).prevChildBucket.btree()->_shape(level+1,ss);
- if ( !nextChild.isNull() )
- nextChild.btree()->_shape(level+1,ss);
-}
-
-int bt_fv=0;
-int bt_dmp=0;
-
-void BucketBasics::dumpTree(DiskLoc thisLoc, const BSONObj &order) {
- bt_dmp=1;
- fullValidate(thisLoc, order);
- bt_dmp=0;
-}
-
-int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
- assertValid(order, true);
+ /* it is easy to do custom sizes for a namespace - all the same for now */
+ const int BucketSize = 8192;
+ const int KeyMax = BucketSize / 10;
+
+ int ninserts = 0;
+ extern int otherTraceLevel;
+ int split_debug = 0;
+ int insert_debug = 0;
+
+ KeyNode::KeyNode(BucketBasics& bb, _KeyNode &k) :
+ prevChildBucket(k.prevChildBucket),
+ recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
+ { }
+
+ /* BucketBasics --------------------------------------------------- */
+
+ int BucketBasics::Size() const {
+ assert( _Size == BucketSize );
+ return _Size;
+ }
+ inline void BucketBasics::setNotPacked() {
+ flags &= ~Packed;
+ }
+ inline void BucketBasics::setPacked() {
+ flags |= Packed;
+ }
+
+ void BucketBasics::_shape(int level, stringstream& ss) {
+ for ( int i = 0; i < level; i++ ) ss << ' ';
+ ss << "*\n";
+ for ( int i = 0; i < n; i++ )
+ if ( !k(i).prevChildBucket.isNull() )
+ k(i).prevChildBucket.btree()->_shape(level+1,ss);
+ if ( !nextChild.isNull() )
+ nextChild.btree()->_shape(level+1,ss);
+ }
+
+ int bt_fv=0;
+ int bt_dmp=0;
+
+ void BucketBasics::dumpTree(DiskLoc thisLoc, const BSONObj &order) {
+ bt_dmp=1;
+ fullValidate(thisLoc, order);
+ bt_dmp=0;
+ }
+
+ int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
+ assertValid(order, true);
// if( bt_fv==0 )
// return;
- if ( bt_dmp ) {
- cout << thisLoc.toString() << ' ';
- ((BtreeBucket *) this)->dump();
- }
+ if ( bt_dmp ) {
+ out() << thisLoc.toString() << ' ';
+ ((BtreeBucket *) this)->dump();
+ }
- // keycount
- int kc = 0;
+ // keycount
+ int kc = 0;
- for ( int i = 0; i < n; i++ ) {
- _KeyNode& kn = k(i);
+ for ( int i = 0; i < n; i++ ) {
+ _KeyNode& kn = k(i);
- if ( kn.isUsed() ) kc++;
- if ( !kn.prevChildBucket.isNull() ) {
- DiskLoc left = kn.prevChildBucket;
- BtreeBucket *b = left.btree();
+ if ( kn.isUsed() ) kc++;
+ if ( !kn.prevChildBucket.isNull() ) {
+ DiskLoc left = kn.prevChildBucket;
+ BtreeBucket *b = left.btree();
+ wassert( b->parent == thisLoc );
+ kc += b->fullValidate(kn.prevChildBucket, order);
+ }
+ }
+ if ( !nextChild.isNull() ) {
+ BtreeBucket *b = nextChild.btree();
wassert( b->parent == thisLoc );
- kc += b->fullValidate(kn.prevChildBucket, order);
- }
- }
- if ( !nextChild.isNull() ) {
- BtreeBucket *b = nextChild.btree();
- wassert( b->parent == thisLoc );
- kc += b->fullValidate(nextChild, order);
- }
-
- return kc;
-}
-
-int nDumped = 0;
-
-void BucketBasics::assertValid(const BSONObj &order, bool force) {
- if ( !debug && !force )
- return;
- wassert( n >= 0 && n < Size() );
- wassert( emptySize >= 0 && emptySize < BucketSize );
- wassert( topSize >= n && topSize <= BucketSize );
- DEV {
- // slow:
- for ( int i = 0; i < n-1; i++ ) {
- BSONObj k1 = keyNode(i).key;
- BSONObj k2 = keyNode(i+1).key;
- int z = k1.woCompare(k2, order); //OK
- if ( z > 0 ) {
- cout << "ERROR: btree key order corrupt. Keys:" << endl;
- if ( ++nDumped < 5 ) {
- for ( int j = 0; j < n; j++ ) {
- cout << " " << keyNode(j).key.toString() << endl;
+ kc += b->fullValidate(nextChild, order);
+ }
+
+ return kc;
+ }
+
+ int nDumped = 0;
+
+ void BucketBasics::assertValid(const BSONObj &order, bool force) {
+ if ( !debug && !force )
+ return;
+ wassert( n >= 0 && n < Size() );
+ wassert( emptySize >= 0 && emptySize < BucketSize );
+ wassert( topSize >= n && topSize <= BucketSize );
+ DEV {
+ // slow:
+ for ( int i = 0; i < n-1; i++ ) {
+ BSONObj k1 = keyNode(i).key;
+ BSONObj k2 = keyNode(i+1).key;
+ int z = k1.woCompare(k2, order); //OK
+ if ( z > 0 ) {
+ out() << "ERROR: btree key order corrupt. Keys:" << endl;
+ if ( ++nDumped < 5 ) {
+ for ( int j = 0; j < n; j++ ) {
+ out() << " " << keyNode(j).key.toString() << endl;
+ }
+ ((BtreeBucket *) this)->dump();
}
- ((BtreeBucket *) this)->dump();
+ wassert(false);
+ break;
}
- wassert(false);
- break;
- }
- else if ( z == 0 ) {
- if ( !(k(i).recordLoc < k(i+1).recordLoc) ) {
- cout << "ERROR: btree key order corrupt (recordloc's wrong). Keys:" << endl;
- cout << " k(" << i << "):" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
- cout << " k(" << i+1 << "):" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
- wassert( k(i).recordLoc < k(i+1).recordLoc );
+ else if ( z == 0 ) {
+ if ( !(k(i).recordLoc < k(i+1).recordLoc) ) {
+ out() << "ERROR: btree key order corrupt (recordloc's wrong). Keys:" << endl;
+ out() << " k(" << i << "):" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
+ out() << " k(" << i+1 << "):" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
+ wassert( k(i).recordLoc < k(i+1).recordLoc );
+ }
}
}
}
- }
- else {
- //faster:
- if ( n > 1 ) {
- BSONObj k1 = keyNode(0).key;
- BSONObj k2 = keyNode(n-1).key;
- int z = k1.woCompare(k2, order);
- //wassert( z <= 0 );
- if ( z > 0 ) {
- problem() << "btree keys out of order" << '\n';
- ONCE {
- ((BtreeBucket *) this)->dump();
+ else {
+ //faster:
+ if ( n > 1 ) {
+ BSONObj k1 = keyNode(0).key;
+ BSONObj k2 = keyNode(n-1).key;
+ int z = k1.woCompare(k2, order);
+ //wassert( z <= 0 );
+ if ( z > 0 ) {
+ problem() << "btree keys out of order" << '\n';
+ ONCE {
+ ((BtreeBucket *) this)->dump();
+ }
+ assert(false);
}
- assert(false);
}
}
}
-}
-inline void BucketBasics::markUnused(int keypos) {
- assert( keypos >= 0 && keypos < n );
- k(keypos).setUnused();
-}
+ inline void BucketBasics::markUnused(int keypos) {
+ assert( keypos >= 0 && keypos < n );
+ k(keypos).setUnused();
+ }
+
+ inline int BucketBasics::totalDataSize() const {
+ return Size() - (data-(char*)this);
+ }
+
+ void BucketBasics::init() {
+ parent.Null();
+ nextChild.Null();
+ _Size = BucketSize;
+ flags = Packed;
+ n = 0;
+ emptySize = totalDataSize();
+ topSize = 0;
+ reserved = 0;
+ }
+
+ /* we allocate space from the end of the buffer for data.
+ the keynodes grow from the front.
+ */
+ inline int BucketBasics::_alloc(int bytes) {
+ topSize += bytes;
+ emptySize -= bytes;
+ int ofs = totalDataSize() - topSize;
+ assert( ofs > 0 );
+ return ofs;
+ }
+
+ void BucketBasics::_delKeyAtPos(int keypos) {
+ assert( keypos >= 0 && keypos <= n );
+ assert( childForPos(keypos).isNull() );
+ n--;
+ assert( n > 0 || nextChild.isNull() );
+ for ( int j = keypos; j < n; j++ )
+ k(j) = k(j+1);
+ emptySize += sizeof(_KeyNode);
+ setNotPacked();
+ }
+
+ /* add a key. must be > all existing. be careful to set next ptr right. */
+ void BucketBasics::pushBack(const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order, DiskLoc prevChild) {
+ int bytesNeeded = key.objsize() + sizeof(_KeyNode);
+ assert( bytesNeeded <= emptySize );
+ assert( n == 0 || keyNode(n-1).key.woCompare(key, order) <= 0 );
+ emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(n++);
+ kn.prevChildBucket = prevChild;
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs( (short) _alloc(key.objsize()) );
+ char *p = dataAt(kn.keyDataOfs());
+ memcpy(p, key.objdata(), key.objsize());
+ }
+
+ bool BucketBasics::basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order) {
+ assert( keypos >= 0 && keypos <= n );
+ int bytesNeeded = key.objsize() + sizeof(_KeyNode);
+ if ( bytesNeeded > emptySize ) {
+ pack( order );
+ if ( bytesNeeded > emptySize )
+ return false;
+ }
+ for ( int j = n; j > keypos; j-- ) // make room
+ k(j) = k(j-1);
+ n++;
+ emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(keypos);
+ kn.prevChildBucket.Null();
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs((short) _alloc(key.objsize()) );
+ char *p = dataAt(kn.keyDataOfs());
+ memcpy(p, key.objdata(), key.objsize());
+ return true;
+ }
-inline int BucketBasics::totalDataSize() const {
- return Size() - (data-(char*)this);
-}
+ /* when we delete things we just leave empty space until the node is
+ full and then we repack it.
+ */
+ void BucketBasics::pack( const BSONObj &order ) {
+ if ( flags & Packed )
+ return;
-void BucketBasics::init() {
- parent.Null();
- nextChild.Null();
- _Size = BucketSize;
- flags = Packed;
- n = 0;
- emptySize = totalDataSize();
- topSize = 0;
- reserved = 0;
-}
+ int tdz = totalDataSize();
+ char temp[BucketSize];
+ int ofs = tdz;
+ topSize = 0;
+ for ( int j = 0; j < n; j++ ) {
+ short ofsold = k(j).keyDataOfs();
+ int sz = keyNode(j).key.objsize();
+ ofs -= sz;
+ topSize += sz;
+ memcpy(temp+ofs, dataAt(ofsold), sz);
+ k(j).setKeyDataOfsSavingUse( ofs );
+ }
+ int dataUsed = tdz - ofs;
+ memcpy(data + ofs, temp + ofs, dataUsed);
+ emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
+ assert( emptySize >= 0 );
-/* we allocate space from the end of the buffer for data.
- the keynodes grow from the front.
-*/
-inline int BucketBasics::_alloc(int bytes) {
- topSize += bytes;
- emptySize -= bytes;
- int ofs = totalDataSize() - topSize;
- assert( ofs > 0 );
- return ofs;
-}
-
-void BucketBasics::_delKeyAtPos(int keypos) {
- assert( keypos >= 0 && keypos <= n );
- assert( childForPos(keypos).isNull() );
- n--;
- assert( n > 0 || nextChild.isNull() );
- for ( int j = keypos; j < n; j++ )
- k(j) = k(j+1);
- emptySize += sizeof(_KeyNode);
- setNotPacked();
-}
-
-/* add a key. must be > all existing. be careful to set next ptr right. */
-void BucketBasics::pushBack(const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order, DiskLoc prevChild) {
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- assert( bytesNeeded <= emptySize );
- assert( n == 0 || keyNode(n-1).key.woCompare(key, order) <= 0 );
- emptySize -= sizeof(_KeyNode);
- _KeyNode& kn = k(n++);
- kn.prevChildBucket = prevChild;
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs( (short) _alloc(key.objsize()) );
- char *p = dataAt(kn.keyDataOfs());
- memcpy(p, key.objdata(), key.objsize());
-}
-
-bool BucketBasics::basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order) {
- assert( keypos >= 0 && keypos <= n );
- int bytesNeeded = key.objsize() + sizeof(_KeyNode);
- if ( bytesNeeded > emptySize ) {
+ setPacked();
+ assertValid( order );
+ }
+
+ inline void BucketBasics::truncateTo(int N, const BSONObj &order) {
+ n = N;
+ setNotPacked();
pack( order );
- if ( bytesNeeded > emptySize )
- return false;
}
- for ( int j = n; j > keypos; j-- ) // make room
- k(j) = k(j-1);
- n++;
- emptySize -= sizeof(_KeyNode);
- _KeyNode& kn = k(keypos);
- kn.prevChildBucket.Null();
- kn.recordLoc = recordLoc;
- kn.setKeyDataOfs((short) _alloc(key.objsize()) );
- char *p = dataAt(kn.keyDataOfs());
- memcpy(p, key.objdata(), key.objsize());
- return true;
-}
-
-/* when we delete things we just leave empty space until the node is
- full and then we repack it.
-*/
-void BucketBasics::pack( const BSONObj &order ) {
- if ( flags & Packed )
- return;
-
- int tdz = totalDataSize();
- char temp[BucketSize];
- int ofs = tdz;
- topSize = 0;
- for ( int j = 0; j < n; j++ ) {
- short ofsold = k(j).keyDataOfs();
- int sz = keyNode(j).key.objsize();
- ofs -= sz;
- topSize += sz;
- memcpy(temp+ofs, dataAt(ofsold), sz);
- k(j).setKeyDataOfsSavingUse( ofs );
- }
- int dataUsed = tdz - ofs;
- memcpy(data + ofs, temp + ofs, dataUsed);
- emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
- assert( emptySize >= 0 );
-
- setPacked();
- assertValid( order );
-}
-
-inline void BucketBasics::truncateTo(int N, const BSONObj &order) {
- n = N;
- setNotPacked();
- pack( order );
-}
-
-/* - BtreeBucket --------------------------------------------------- */
-
-/* return largest key in the subtree. */
-void BtreeBucket::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
- DiskLoc loc = thisLoc;
- while ( 1 ) {
- BtreeBucket *b = loc.btree();
- if ( !b->nextChild.isNull() ) {
- loc = b->nextChild;
- continue;
- }
-
- assert(b->n>0);
- largestLoc = loc;
- largestKey = b->n-1;
-
- break;
- }
-}
-
-/* pos: for existing keys k0...kn-1.
- returns # it goes BEFORE. so key[pos-1] < key < key[pos]
- returns n if it goes after the last existing key.
- note result might be Unused!
-*/
-bool BtreeBucket::find(BSONObj& key, DiskLoc recordLoc, const BSONObj &order, int& pos) {
- /* binary search for this key */
- int l=0;
- int h=n-1;
- while ( l <= h ) {
- int m = (l+h)/2;
- KeyNode M = keyNode(m);
- int x = key.woCompare(M.key, order);
- if ( x == 0 )
- x = recordLoc.compare(M.recordLoc);
- if ( x < 0 ) // key < M.key
- h = m-1;
- else if ( x > 0 )
- l = m+1;
- else {
- // found it. however, if dup keys are here, be careful we might have
- // found one in the middle. we want find() to return the leftmost instance.
- /*
- while( m >= 1 && keyNode(m-1).key.woEqual(key) )
- m--;
- */
- pos = m;
+ /* - BtreeBucket --------------------------------------------------- */
- /*
- DiskLoc ch = k(m).prevChildBucket;
- if( !ch.isNull() ) {
- // if dup keys, might be dups to the left.
- DiskLoc largestLoc;
- int largestKey;
- ch.btree()->findLargestKey(ch, largestLoc, largestKey);
- if( !largestLoc.isNull() ) {
- if( largestLoc.btree()->keyAt(largestKey).woEqual(key) )
- return false;
- }
- }
- */
+ /* return largest key in the subtree. */
+ void BtreeBucket::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
+ DiskLoc loc = thisLoc;
+ while ( 1 ) {
+ BtreeBucket *b = loc.btree();
+ if ( !b->nextChild.isNull() ) {
+ loc = b->nextChild;
+ continue;
+ }
- return true;
+ assert(b->n>0);
+ largestLoc = loc;
+ largestKey = b->n-1;
+
+ break;
}
}
- // not found
- pos = l;
- if ( pos != n ) {
- BSONObj keyatpos = keyNode(pos).key;
- wassert( key.woCompare(keyatpos, order) <= 0 );
- if ( pos > 0 ) {
- wassert( keyNode(pos-1).key.woCompare(key, order) <= 0 );
+
+ /* pos: for existing keys k0...kn-1.
+ returns # it goes BEFORE. so key[pos-1] < key < key[pos]
+ returns n if it goes after the last existing key.
+ note result might be Unused!
+ */
+ bool BtreeBucket::find(BSONObj& key, DiskLoc recordLoc, const BSONObj &order, int& pos) {
+ /* binary search for this key */
+ int l=0;
+ int h=n-1;
+ while ( l <= h ) {
+ int m = (l+h)/2;
+ KeyNode M = keyNode(m);
+ int x = key.woCompare(M.key, order);
+ if ( x == 0 )
+ x = recordLoc.compare(M.recordLoc);
+ if ( x < 0 ) // key < M.key
+ h = m-1;
+ else if ( x > 0 )
+ l = m+1;
+ else {
+ // found it. however, if dup keys are here, be careful we might have
+ // found one in the middle. we want find() to return the leftmost instance.
+ /*
+ while( m >= 1 && keyNode(m-1).key.woEqual(key) )
+ m--;
+ */
+
+ pos = m;
+
+ /*
+ DiskLoc ch = k(m).prevChildBucket;
+ if( !ch.isNull() ) {
+ // if dup keys, might be dups to the left.
+ DiskLoc largestLoc;
+ int largestKey;
+ ch.btree()->findLargestKey(ch, largestLoc, largestKey);
+ if( !largestLoc.isNull() ) {
+ if( largestLoc.btree()->keyAt(largestKey).woEqual(key) )
+ return false;
+ }
+ }
+ */
+
+ return true;
+ }
+ }
+ // not found
+ pos = l;
+ if ( pos != n ) {
+ BSONObj keyatpos = keyNode(pos).key;
+ wassert( key.woCompare(keyatpos, order) <= 0 );
+ if ( pos > 0 ) {
+ wassert( keyNode(pos-1).key.woCompare(key, order) <= 0 );
+ }
}
- }
- return false;
-}
+ return false;
+ }
-void aboutToDeleteBucket(const DiskLoc&);
-void BtreeBucket::delBucket(const DiskLoc& thisLoc, IndexDetails& id) {
- aboutToDeleteBucket(thisLoc);
+ void aboutToDeleteBucket(const DiskLoc&);
+ void BtreeBucket::delBucket(const DiskLoc& thisLoc, IndexDetails& id) {
+ aboutToDeleteBucket(thisLoc);
- assert( !isHead() );
+ assert( !isHead() );
- BtreeBucket *p = parent.btree();
- if ( p->nextChild == thisLoc ) {
- p->nextChild.Null();
- }
- else {
- for ( int i = 0; i < p->n; i++ ) {
- if ( p->k(i).prevChildBucket == thisLoc ) {
- p->k(i).prevChildBucket.Null();
- goto found;
+ BtreeBucket *p = parent.btree();
+ if ( p->nextChild == thisLoc ) {
+ p->nextChild.Null();
+ }
+ else {
+ for ( int i = 0; i < p->n; i++ ) {
+ if ( p->k(i).prevChildBucket == thisLoc ) {
+ p->k(i).prevChildBucket.Null();
+ goto found;
+ }
}
+ out() << "ERROR: can't find ref to deleted bucket.\n";
+ out() << "To delete:\n";
+ dump();
+ out() << "Parent:\n";
+ p->dump();
+ assert(false);
}
- cout << "ERROR: can't find ref to deleted bucket.\n";
- cout << "To delete:\n";
- dump();
- cout << "Parent:\n";
- p->dump();
- assert(false);
- }
found:
#if 1
- /* as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
- it (meaning it is ineligible for reuse). temporary to see if it helps with some
- issues.
- */
- memset(this, 0, Size());
+ /* as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
+ it (meaning it is ineligible for reuse). temporary to see if it helps with some
+ issues.
+ */
+ memset(this, 0, Size());
#else
- //defensive:
- n = -1;
- parent.Null();
- theDataFileMgr.deleteRecord(id.indexNamespace().c_str(), thisLoc.rec(), thisLoc);
+ //defensive:
+ n = -1;
+ parent.Null();
+ theDataFileMgr.deleteRecord(id.indexNamespace().c_str(), thisLoc.rec(), thisLoc);
#endif
-}
-
-/* note: may delete the entire bucket! this invalid upon return sometimes. */
-void BtreeBucket::delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p) {
- dassert( thisLoc.btree() == this );
- assert(n>0);
- DiskLoc left = childForPos(p);
-
- if ( n == 1 ) {
- if ( left.isNull() && nextChild.isNull() ) {
- if ( isHead() )
- _delKeyAtPos(p); // we don't delete the top bucket ever
- else
- delBucket(thisLoc, id);
+ }
+
+ /* note: may delete the entire bucket! this invalid upon return sometimes. */
+ void BtreeBucket::delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p) {
+ dassert( thisLoc.btree() == this );
+ assert(n>0);
+ DiskLoc left = childForPos(p);
+
+ if ( n == 1 ) {
+ if ( left.isNull() && nextChild.isNull() ) {
+ if ( isHead() )
+ _delKeyAtPos(p); // we don't delete the top bucket ever
+ else
+ delBucket(thisLoc, id);
+ return;
+ }
+ markUnused(p);
return;
}
- markUnused(p);
- return;
+
+ if ( left.isNull() )
+ _delKeyAtPos(p);
+ else
+ markUnused(p);
}
- if ( left.isNull() )
- _delKeyAtPos(p);
- else
- markUnused(p);
-}
+ int qqq = 0;
-int qqq = 0;
+ bool BtreeBucket::unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc ) {
+ if ( key.objsize() > KeyMax ) {
+ OCCASIONALLY problem() << "unindex: key too large to index, skipping " << id.indexNamespace() << /* ' ' << key.toString() << */ '\n';
+ return false;
+ }
-bool BtreeBucket::unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc ) {
- if ( key.objsize() > KeyMax ) {
- OCCASIONALLY problem() << "unindex: key too large to index, skipping " << id.indexNamespace() << /* ' ' << key.toString() << */ '\n';
+ int pos;
+ bool found;
+ DiskLoc loc = locate(thisLoc, key, id.keyPattern(), pos, found, recordLoc, 1);
+ if ( found ) {
+ loc.btree()->delKeyAtPos(loc, id, pos);
+ return true;
+ }
return false;
}
- int pos;
- bool found;
- DiskLoc loc = locate(thisLoc, key, id.keyPattern(), pos, found, recordLoc, 1);
- if ( found ) {
- loc.btree()->delKeyAtPos(loc, id, pos);
- return true;
+ BtreeBucket* BtreeBucket::allocTemp() {
+ BtreeBucket *b = (BtreeBucket*) malloc(BucketSize);
+ b->init();
+ return b;
}
- return false;
-}
-
-BtreeBucket* BtreeBucket::allocTemp() {
- BtreeBucket *b = (BtreeBucket*) malloc(BucketSize);
- b->init();
- return b;
-}
-inline void fix(const DiskLoc& thisLoc, const DiskLoc& child) {
- if ( !child.isNull() ) {
- if ( insert_debug )
- cout << " " << child.toString() << ".parent=" << thisLoc.toString() << endl;
- child.btree()->parent = thisLoc;
+ inline void fix(const DiskLoc& thisLoc, const DiskLoc& child) {
+ if ( !child.isNull() ) {
+ if ( insert_debug )
+ out() << " " << child.toString() << ".parent=" << thisLoc.toString() << endl;
+ child.btree()->parent = thisLoc;
+ }
}
-}
-/* this sucks. maybe get rid of parent ptrs. */
-void BtreeBucket::fixParentPtrs(const DiskLoc& thisLoc) {
- dassert( thisLoc.btree() == this );
- fix(thisLoc, nextChild);
- for ( int i = 0; i < n; i++ )
- fix(thisLoc, k(i).prevChildBucket);
-}
+ /* this sucks. maybe get rid of parent ptrs. */
+ void BtreeBucket::fixParentPtrs(const DiskLoc& thisLoc) {
+ dassert( thisLoc.btree() == this );
+ fix(thisLoc, nextChild);
+ for ( int i = 0; i < n; i++ )
+ fix(thisLoc, k(i).prevChildBucket);
+ }
-/* keypos - where to insert the key i3n range 0..n. 0=make leftmost, n=make rightmost.
-*/
-void BtreeBucket::insertHere(DiskLoc thisLoc, int keypos,
- DiskLoc recordLoc, BSONObj& key, const BSONObj& order,
- DiskLoc lchild, DiskLoc rchild, IndexDetails& idx)
-{
- dassert( thisLoc.btree() == this );
- if ( insert_debug )
- cout << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
- << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
-
- DiskLoc oldLoc = thisLoc;
-
- if ( basicInsert(keypos, recordLoc, key, order) ) {
- _KeyNode& kn = k(keypos);
- if ( keypos+1 == n ) { // last key
- if ( nextChild != lchild ) {
- cout << "ERROR nextChild != lchild" << endl;
- cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- cout << " keyPos: " << keypos << " n:" << n << endl;
- cout << " nextChild: " << nextChild.toString() << " lchild: " << lchild.toString() << endl;
- cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
- cout << " key: " << key.toString() << endl;
- dump();
+ /* keypos - where to insert the key i3n range 0..n. 0=make leftmost, n=make rightmost.
+ */
+ void BtreeBucket::insertHere(DiskLoc thisLoc, int keypos,
+ DiskLoc recordLoc, BSONObj& key, const BSONObj& order,
+ DiskLoc lchild, DiskLoc rchild, IndexDetails& idx)
+ {
+ dassert( thisLoc.btree() == this );
+ if ( insert_debug )
+ out() << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
+ << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
+
+ DiskLoc oldLoc = thisLoc;
+
+ if ( basicInsert(keypos, recordLoc, key, order) ) {
+ _KeyNode& kn = k(keypos);
+ if ( keypos+1 == n ) { // last key
+ if ( nextChild != lchild ) {
+ out() << "ERROR nextChild != lchild" << endl;
+ out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ out() << " keyPos: " << keypos << " n:" << n << endl;
+ out() << " nextChild: " << nextChild.toString() << " lchild: " << lchild.toString() << endl;
+ out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ out() << " key: " << key.toString() << endl;
+ dump();
#if defined(_WIN32)
- cout << "\n\nDUMPING FULL INDEX" << endl;
- bt_dmp=1;
- bt_fv=1;
- idx.head.btree()->fullValidate(idx.head);
+ out() << "\n\nDUMPING FULL INDEX" << endl;
+ bt_dmp=1;
+ bt_fv=1;
+ idx.head.btree()->fullValidate(idx.head);
#endif
- assert(false);
+ assert(false);
+ }
+ kn.prevChildBucket = nextChild;
+ assert( kn.prevChildBucket == lchild );
+ nextChild = rchild;
+ if ( !rchild.isNull() )
+ rchild.btree()->parent = thisLoc;
}
- kn.prevChildBucket = nextChild;
- assert( kn.prevChildBucket == lchild );
- nextChild = rchild;
- if ( !rchild.isNull() )
- rchild.btree()->parent = thisLoc;
- }
- else {
- k(keypos).prevChildBucket = lchild;
- if ( k(keypos+1).prevChildBucket != lchild ) {
- cout << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
- cout << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
- cout << " keyPos: " << keypos << " n:" << n << endl;
- cout << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
- cout << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
- cout << " key: " << key.toString() << endl;
- dump();
+ else {
+ k(keypos).prevChildBucket = lchild;
+ if ( k(keypos+1).prevChildBucket != lchild ) {
+ out() << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
+ out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ out() << " keyPos: " << keypos << " n:" << n << endl;
+ out() << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
+ out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ out() << " key: " << key.toString() << endl;
+ dump();
#if defined(_WIN32)
- cout << "\n\nDUMPING FULL INDEX" << endl;
- bt_dmp=1;
- bt_fv=1;
- idx.head.btree()->fullValidate(idx.head);
+ out() << "\n\nDUMPING FULL INDEX" << endl;
+ bt_dmp=1;
+ bt_fv=1;
+ idx.head.btree()->fullValidate(idx.head);
#endif
- assert(false);
+ assert(false);
+ }
+ k(keypos+1).prevChildBucket = rchild;
+ if ( !rchild.isNull() )
+ rchild.btree()->parent = thisLoc;
}
- k(keypos+1).prevChildBucket = rchild;
- if ( !rchild.isNull() )
- rchild.btree()->parent = thisLoc;
+ return;
}
- return;
- }
-
- // split
- if ( split_debug )
- cout << " " << thisLoc.toString() << ".split" << endl;
-
- int mid = n / 2;
- /* on duplicate key, we need to ensure that they all end up on the RHS */
- if ( 0 ) {
- assert(mid>0);
- while ( 1 ) {
- KeyNode mn = keyNode(mid);
- KeyNode left = keyNode(mid-1);
- if ( left.key.woCompare( mn.key, order ) < 0 )
- break;
- mid--;
- if ( mid < 3 ) {
- problem() << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
- cout << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
- cout << " ns:" << idx.indexNamespace() << endl;
- cout << " key:" << mn.key.toString() << endl;
- break;
+ // split
+ if ( split_debug )
+ out() << " " << thisLoc.toString() << ".split" << endl;
+
+ int mid = n / 2;
+
+ /* on duplicate key, we need to ensure that they all end up on the RHS */
+ if ( 0 ) {
+ assert(mid>0);
+ while ( 1 ) {
+ KeyNode mn = keyNode(mid);
+ KeyNode left = keyNode(mid-1);
+ if ( left.key.woCompare( mn.key, order ) < 0 )
+ break;
+ mid--;
+ if ( mid < 3 ) {
+ problem() << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
+ out() << "Assertion failure - mid<3: duplicate key bug not fixed yet" << endl;
+ out() << " ns:" << idx.indexNamespace() << endl;
+ out() << " key:" << mn.key.toString() << endl;
+ break;
+ }
}
}
- }
- BtreeBucket *r = allocTemp();
- DiskLoc rLoc;
+ BtreeBucket *r = allocTemp();
+ DiskLoc rLoc;
- if ( split_debug )
- cout << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl;
- for ( int i = mid+1; i < n; i++ ) {
- KeyNode kn = keyNode(i);
- r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
- }
- r->nextChild = nextChild;
- r->assertValid( order );
+ if ( split_debug )
+ out() << " mid:" << mid << ' ' << keyNode(mid).key.toString() << " n:" << n << endl;
+ for ( int i = mid+1; i < n; i++ ) {
+ KeyNode kn = keyNode(i);
+ r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
+ }
+ r->nextChild = nextChild;
+ r->assertValid( order );
//r->dump();
- rLoc = theDataFileMgr.insert(idx.indexNamespace().c_str(), r, r->Size(), true);
- if ( split_debug )
- cout << " new rLoc:" << rLoc.toString() << endl;
- free(r);
- r = 0;
- rLoc.btree()->fixParentPtrs(rLoc);
+ rLoc = theDataFileMgr.insert(idx.indexNamespace().c_str(), r, r->Size(), true);
+ if ( split_debug )
+ out() << " new rLoc:" << rLoc.toString() << endl;
+ free(r);
+ r = 0;
+ rLoc.btree()->fixParentPtrs(rLoc);
+
+ {
+ KeyNode middle = keyNode(mid);
+ nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ if ( split_debug ) {
+ //rLoc.btree()->dump();
+ out() << " middle key:" << middle.key.toString() << endl;
+ }
- {
- KeyNode middle = keyNode(mid);
- nextChild = middle.prevChildBucket; // middle key gets promoted, its children will be thisLoc (l) and rLoc (r)
- if ( split_debug ) {
- //rLoc.btree()->dump();
- cout << " middle key:" << middle.key.toString() << endl;
- }
-
- // promote middle to a parent node
- if ( parent.isNull() ) {
- // make a new parent if we were the root
- BtreeBucket *p = allocTemp();
- p->pushBack(middle.recordLoc, middle.key, order, thisLoc);
- p->nextChild = rLoc;
- p->assertValid( order );
- parent = idx.head = theDataFileMgr.insert(idx.indexNamespace().c_str(), p, p->Size(), true);
- if ( split_debug )
- cout << " we were root, making new root:" << hex << parent.getOfs() << dec << endl;
- free(p);
- rLoc.btree()->parent = parent;
- }
- else {
- /* set this before calling _insert - if it splits it will do fixParent() logic and fix the value,
- so we don't want to overwrite that if it happens.
- */
- rLoc.btree()->parent = parent;
- if ( split_debug )
- cout << " promoting middle key " << middle.key.toString() << endl;
- parent.btree()->_insert(parent, middle.recordLoc, middle.key, order, false, thisLoc, rLoc, idx);
- }
+ // promote middle to a parent node
+ if ( parent.isNull() ) {
+ // make a new parent if we were the root
+ BtreeBucket *p = allocTemp();
+ p->pushBack(middle.recordLoc, middle.key, order, thisLoc);
+ p->nextChild = rLoc;
+ p->assertValid( order );
+ parent = idx.head = theDataFileMgr.insert(idx.indexNamespace().c_str(), p, p->Size(), true);
+ if ( split_debug )
+ out() << " we were root, making new root:" << hex << parent.getOfs() << dec << endl;
+ free(p);
+ rLoc.btree()->parent = parent;
+ }
+ else {
+ /* set this before calling _insert - if it splits it will do fixParent() logic and fix the value,
+ so we don't want to overwrite that if it happens.
+ */
+ rLoc.btree()->parent = parent;
+ if ( split_debug )
+ out() << " promoting middle key " << middle.key.toString() << endl;
+ parent.btree()->_insert(parent, middle.recordLoc, middle.key, order, false, thisLoc, rLoc, idx);
+ }
//BtreeBucket *br = rLoc.btree();
//br->dump();
//parent.btree()->dump();
//idx.head.btree()->dump();
- }
+ }
- truncateTo(mid, order); // note this may trash middle.key! thus we had to promote it before finishing up here.
+ truncateTo(mid, order); // note this may trash middle.key! thus we had to promote it before finishing up here.
- // add our new key, there is room now
- {
+ // add our new key, there is room now
+ {
//dump();
- if ( keypos <= mid ) {
+ if ( keypos <= mid ) {
// if( keypos < mid ) {
- if ( split_debug )
- cout << " keypos<mid, insertHere() the new key" << endl;
- insertHere(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
+ if ( split_debug )
+ out() << " keypos<mid, insertHere() the new key" << endl;
+ insertHere(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
//dump();
- } else {
- int kp = keypos-mid-1;
- assert(kp>=0);
- rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
+ } else {
+ int kp = keypos-mid-1;
+ assert(kp>=0);
+ rLoc.btree()->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
// set a bp here.
-// if( !lchild.isNull() ) cout << lchild.btree()->parent.toString() << endl;
-// if( !rchild.isNull() ) cout << rchild.btree()->parent.toString() << endl;
- }
- }
-
- if ( split_debug )
- cout << " split end " << hex << thisLoc.getOfs() << dec << endl;
-}
-
-/* start a new index off, empty */
-DiskLoc BtreeBucket::addHead(IndexDetails& id) {
- BtreeBucket *p = allocTemp();
- DiskLoc loc = theDataFileMgr.insert(id.indexNamespace().c_str(), p, p->Size(), true);
- free(p);
- return loc;
-}
-
-DiskLoc BtreeBucket::getHead(const DiskLoc& thisLoc) {
- DiskLoc p = thisLoc;
- while ( !p.btree()->isHead() )
- p = p.btree()->parent;
- return p;
-}
-
-DiskLoc BtreeBucket::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
- if ( keyOfs < 0 || keyOfs >= n ) {
- cout << "ASSERT failure BtreeBucket::advance, caller: " << caller << endl;
- cout << " thisLoc: " << thisLoc.toString() << endl;
- cout << " keyOfs: " << keyOfs << " n:" << n << " direction: " << direction << endl;
- cout << bucketSummary() << endl;
- assert(false);
- }
- int adj = direction < 0 ? 1 : 0;
- int ko = keyOfs + direction;
- DiskLoc nextDown = childForPos(ko+adj);
- if ( !nextDown.isNull() ) {
- while ( 1 ) {
- keyOfs = direction>0 ? 0 : nextDown.btree()->n - 1;
- DiskLoc loc= nextDown.btree()->childForPos(keyOfs + adj);
- if ( loc.isNull() )
- break;
- nextDown = loc;
+// if( !lchild.isNull() ) out() << lchild.btree()->parent.toString() << endl;
+// if( !rchild.isNull() ) out() << rchild.btree()->parent.toString() << endl;
+ }
}
- return nextDown;
+
+ if ( split_debug )
+ out() << " split end " << hex << thisLoc.getOfs() << dec << endl;
}
- if ( ko < n && ko >= 0 ) {
- keyOfs = ko;
- return thisLoc;
+ /* start a new index off, empty */
+ DiskLoc BtreeBucket::addHead(IndexDetails& id) {
+ BtreeBucket *p = allocTemp();
+ DiskLoc loc = theDataFileMgr.insert(id.indexNamespace().c_str(), p, p->Size(), true);
+ free(p);
+ return loc;
}
- // end of bucket. traverse back up.
- DiskLoc childLoc = thisLoc;
- DiskLoc ancestor = parent;
- while ( 1 ) {
- if ( ancestor.isNull() )
- break;
- BtreeBucket *an = ancestor.btree();
- for ( int i = 0; i < an->n; i++ ) {
- if ( an->childForPos(i+adj) == childLoc ) {
- keyOfs = i;
- return ancestor;
- }
- }
- assert( direction<0 || an->nextChild == childLoc );
- // parent exhausted also, keep going up
- childLoc = ancestor;
- ancestor = an->parent;
+ DiskLoc BtreeBucket::getHead(const DiskLoc& thisLoc) {
+ DiskLoc p = thisLoc;
+ while ( !p.btree()->isHead() )
+ p = p.btree()->parent;
+ return p;
}
- return DiskLoc();
-}
+ DiskLoc BtreeBucket::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ if ( keyOfs < 0 || keyOfs >= n ) {
+ out() << "ASSERT failure BtreeBucket::advance, caller: " << caller << endl;
+ out() << " thisLoc: " << thisLoc.toString() << endl;
+ out() << " keyOfs: " << keyOfs << " n:" << n << " direction: " << direction << endl;
+ out() << bucketSummary() << endl;
+ assert(false);
+ }
+ int adj = direction < 0 ? 1 : 0;
+ int ko = keyOfs + direction;
+ DiskLoc nextDown = childForPos(ko+adj);
+ if ( !nextDown.isNull() ) {
+ while ( 1 ) {
+ keyOfs = direction>0 ? 0 : nextDown.btree()->n - 1;
+ DiskLoc loc= nextDown.btree()->childForPos(keyOfs + adj);
+ if ( loc.isNull() )
+ break;
+ nextDown = loc;
+ }
+ return nextDown;
+ }
-DiskLoc BtreeBucket::locate(const DiskLoc& thisLoc, BSONObj& key, const BSONObj &order, int& pos, bool& found, DiskLoc recordLoc, int direction) {
- int p;
- found = find(key, recordLoc, order, p);
- if ( found ) {
- pos = p;
- return thisLoc;
- }
+ if ( ko < n && ko >= 0 ) {
+ keyOfs = ko;
+ return thisLoc;
+ }
- DiskLoc child = childForPos(p);
+ // end of bucket. traverse back up.
+ DiskLoc childLoc = thisLoc;
+ DiskLoc ancestor = parent;
+ while ( 1 ) {
+ if ( ancestor.isNull() )
+ break;
+ BtreeBucket *an = ancestor.btree();
+ for ( int i = 0; i < an->n; i++ ) {
+ if ( an->childForPos(i+adj) == childLoc ) {
+ keyOfs = i;
+ return ancestor;
+ }
+ }
+ assert( direction<0 || an->nextChild == childLoc );
+ // parent exhausted also, keep going up
+ childLoc = ancestor;
+ ancestor = an->parent;
+ }
- if ( !child.isNull() ) {
- DiskLoc l = child.btree()->locate(child, key, order, pos, found, recordLoc, direction);
- if ( !l.isNull() )
- return l;
+ return DiskLoc();
}
- pos = p;
- if ( direction < 0 )
- return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
- else
- return pos == n ? DiskLoc() /*theend*/ : thisLoc;
-}
+ DiskLoc BtreeBucket::locate(const DiskLoc& thisLoc, BSONObj& key, const BSONObj &order, int& pos, bool& found, DiskLoc recordLoc, int direction) {
+ int p;
+ found = find(key, recordLoc, order, p);
+ if ( found ) {
+ pos = p;
+ return thisLoc;
+ }
-/* thisloc is the location of this bucket object. you must pass that in. */
-int BtreeBucket::_insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, const BSONObj &order, bool dupsAllowed,
- DiskLoc lChild, DiskLoc rChild, IndexDetails& idx) {
- if ( key.objsize() > KeyMax ) {
- problem() << "ERROR: key too large len:" << key.objsize() << " max:" << KeyMax << ' ' << idx.indexNamespace() << endl;
- return 2;
- }
- assert( key.objsize() > 0 );
+ DiskLoc child = childForPos(p);
+
+ if ( !child.isNull() ) {
+ DiskLoc l = child.btree()->locate(child, key, order, pos, found, recordLoc, direction);
+ if ( !l.isNull() )
+ return l;
+ }
- int pos;
- bool found = find(key, recordLoc, order, pos);
- if ( insert_debug ) {
- cout << " " << thisLoc.toString() << '.' << "_insert " <<
- key.toString() << '/' << recordLoc.toString() <<
- " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
- cout << " found:" << found << " pos:" << pos << " n:" << n << endl;
+ pos = p;
+ if ( direction < 0 )
+ return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
+ else
+ return pos == n ? DiskLoc() /*theend*/ : thisLoc;
}
- if ( found ) {
- if ( k(pos).isUnused() ) {
- cout << "an unused already occupying keyslot, write more code.\n";
- cout << " index may be corrupt (missing data) now.\n";
+ /* thisloc is the location of this bucket object. you must pass that in. */
+ int BtreeBucket::_insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, const BSONObj &order, bool dupsAllowed,
+ DiskLoc lChild, DiskLoc rChild, IndexDetails& idx) {
+ if ( key.objsize() > KeyMax ) {
+ problem() << "ERROR: key too large len:" << key.objsize() << " max:" << KeyMax << ' ' << idx.indexNamespace() << endl;
+ return 2;
}
+ assert( key.objsize() > 0 );
+
+ int pos;
+ bool found = find(key, recordLoc, order, pos);
+ if ( insert_debug ) {
+ out() << " " << thisLoc.toString() << '.' << "_insert " <<
+ key.toString() << '/' << recordLoc.toString() <<
+ " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
+ out() << " found:" << found << " pos:" << pos << " n:" << n << endl;
+ }
+
+ if ( found ) {
+ if ( k(pos).isUnused() ) {
+ out() << "an unused already occupying keyslot, write more code.\n";
+ out() << " index may be corrupt (missing data) now.\n";
+ }
- cout << "_insert(): key already exists in index\n";
- cout << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
- cout << " " << key.toString() << '\n';
- cout << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
- cout << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
- cout << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
- assert(false);
+ out() << "_insert(): key already exists in index\n";
+ out() << " " << idx.indexNamespace().c_str() << " thisLoc:" << thisLoc.toString() << '\n';
+ out() << " " << key.toString() << '\n';
+ out() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
+ out() << " old l r: " << childForPos(pos).toString() << ' ' << childForPos(pos+1).toString() << endl;
+ out() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
+ assert(false);
- // on a dup key always insert on the right or else you will be broken.
+ // on a dup key always insert on the right or else you will be broken.
// pos++;
- // on a promotion, find the right point to update if dup keys.
- /* not needed: we always insert right after the first key so we are ok with just pos++...
- if( !rChild.isNull() ) {
- while( pos < n && k(pos).prevChildBucket != lchild ) {
- pos++;
- cout << "looking for the right dup key" << endl;
- }
- }
- */
- }
-
- DEBUGGING cout << "TEMP: key: " << key.toString() << endl;
- DiskLoc& child = getChild(pos);
- if ( insert_debug )
- cout << " getChild(" << pos << "): " << child.toString() << endl;
- if ( child.isNull() || !rChild.isNull() /* means an 'internal' insert */ ) {
- insertHere(thisLoc, pos, recordLoc, key, order, lChild, rChild, idx);
- return 0;
- }
-
- return child.btree()->insert(child, recordLoc, key, order, dupsAllowed, idx, false);
-}
-
-void BtreeBucket::dump() {
- cout << "DUMP btreebucket: ";
- cout << " parent:" << hex << parent.getOfs() << dec;
- for ( int i = 0; i < n; i++ ) {
- cout << '\n';
- KeyNode k = keyNode(i);
- cout << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
- k.prevChildBucket.getOfs() << "\trec:" << k.recordLoc.getOfs() << dec;
- if ( this->k(i).isUnused() )
- cout << " UNUSED";
- }
- cout << " right:" << hex << nextChild.getOfs() << dec << endl;
-}
-
-/* todo: meaning of return code unclear clean up */
-int BtreeBucket::insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, const BSONObj &order, bool dupsAllowed,
- IndexDetails& idx, bool toplevel)
-{
- if ( toplevel ) {
- if ( key.objsize() > KeyMax ) {
- problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace().c_str() << ' ' << key.toString() << '\n';
- return 3;
+ // on a promotion, find the right point to update if dup keys.
+ /* not needed: we always insert right after the first key so we are ok with just pos++...
+ if( !rChild.isNull() ) {
+ while( pos < n && k(pos).prevChildBucket != lchild ) {
+ pos++;
+ out() << "looking for the right dup key" << endl;
+ }
+ }
+ */
}
- ++ninserts;
- /*
- if( ninserts % 1000 == 0 ) {
- cout << "ninserts: " << ninserts << endl;
- if( 0 && ninserts >= 127287 ) {
- cout << "debug?" << endl;
- split_debug = 1;
- }
+
+ DEBUGGING out() << "TEMP: key: " << key.toString() << endl;
+ DiskLoc& child = getChild(pos);
+ if ( insert_debug )
+ out() << " getChild(" << pos << "): " << child.toString() << endl;
+ if ( child.isNull() || !rChild.isNull() /* means an 'internal' insert */ ) {
+ insertHere(thisLoc, pos, recordLoc, key, order, lChild, rChild, idx);
+ return 0;
}
- */
+
+ return child.btree()->insert(child, recordLoc, key, order, dupsAllowed, idx, false);
}
- int x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
- assertValid( order );
+ void BtreeBucket::dump() {
+ out() << "DUMP btreebucket: ";
+ out() << " parent:" << hex << parent.getOfs() << dec;
+ for ( int i = 0; i < n; i++ ) {
+ out() << '\n';
+ KeyNode k = keyNode(i);
+ out() << '\t' << i << '\t' << k.key.toString() << "\tleft:" << hex <<
+ k.prevChildBucket.getOfs() << "\trec:" << k.recordLoc.getOfs() << dec;
+ if ( this->k(i).isUnused() )
+ out() << " UNUSED";
+ }
+ out() << " right:" << hex << nextChild.getOfs() << dec << endl;
+ }
- return x;
-}
+ /* todo: meaning of return code unclear clean up */
+ int BtreeBucket::insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, const BSONObj &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel)
+ {
+ if ( toplevel ) {
+ if ( key.objsize() > KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace().c_str() << ' ' << key.toString() << '\n';
+ return 3;
+ }
+ ++ninserts;
+ /*
+ if( ninserts % 1000 == 0 ) {
+ out() << "ninserts: " << ninserts << endl;
+ if( 0 && ninserts >= 127287 ) {
+ out() << "debug?" << endl;
+ split_debug = 1;
+ }
+ }
+ */
+ }
-void BtreeBucket::shape(stringstream& ss) {
- _shape(0, ss);
-}
+ int x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
+ assertValid( order );
+
+ return x;
+ }
+
+ void BtreeBucket::shape(stringstream& ss) {
+ _shape(0, ss);
+ }
} // namespace mongo
diff --git a/db/btree.h b/db/btree.h
index c05955c776f..b79c595e0d9 100644
--- a/db/btree.h
+++ b/db/btree.h
@@ -27,245 +27,245 @@ namespace mongo {
#pragma pack(push,1)
-struct _KeyNode {
- DiskLoc prevChildBucket;
- DiskLoc recordLoc;
- short keyDataOfs() {
- return (short) _kdo;
- }
- unsigned short _kdo;
- void setKeyDataOfs(short s) {
- _kdo = s;
- assert(s>=0);
- }
- void setKeyDataOfsSavingUse(short s) {
- _kdo = s;
- assert(s>=0);
- }
- void setUnused() {
- /* Setting ofs to odd is the sentinel for unused, as real recordLoc's are always
- even numbers.
- Note we need to keep its value basically the same as we use the recordLoc
- as part of the key in the index (to handle duplicate keys efficiently).
- */
- recordLoc.GETOFS() |= 1;
- }
- int isUnused() {
- return recordLoc.getOfs() & 1;
- }
- int isUsed() {
- return !isUnused();
- }
-};
+ struct _KeyNode {
+ DiskLoc prevChildBucket;
+ DiskLoc recordLoc;
+ short keyDataOfs() {
+ return (short) _kdo;
+ }
+ unsigned short _kdo;
+ void setKeyDataOfs(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ void setKeyDataOfsSavingUse(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ void setUnused() {
+ /* Setting ofs to odd is the sentinel for unused, as real recordLoc's are always
+ even numbers.
+ Note we need to keep its value basically the same as we use the recordLoc
+ as part of the key in the index (to handle duplicate keys efficiently).
+ */
+ recordLoc.GETOFS() |= 1;
+ }
+ int isUnused() {
+ return recordLoc.getOfs() & 1;
+ }
+ int isUsed() {
+ return !isUnused();
+ }
+ };
#pragma pack(pop)
-class BucketBasics;
+ class BucketBasics;
-/* wrapper - this is our in memory representation of the key. _KeyNode is the disk representation. */
-class KeyNode {
-public:
- KeyNode(BucketBasics& bb, _KeyNode &k);
- DiskLoc& prevChildBucket;
- DiskLoc& recordLoc;
- BSONObj key;
-};
+ /* wrapper - this is our in memory representation of the key. _KeyNode is the disk representation. */
+ class KeyNode {
+ public:
+ KeyNode(BucketBasics& bb, _KeyNode &k);
+ DiskLoc& prevChildBucket;
+ DiskLoc& recordLoc;
+ BSONObj key;
+ };
#pragma pack(push,1)
-/* this class is all about the storage management */
-class BucketBasics {
- friend class KeyNode;
-public:
- void dumpTree(DiskLoc thisLoc, const BSONObj &order);
- bool isHead() {
- return parent.isNull();
- }
- void assertValid(const BSONObj &order, bool force = false);
- int fullValidate(const DiskLoc& thisLoc, const BSONObj &order); /* traverses everything */
-protected:
- DiskLoc& getChild(int pos) {
- assert( pos >= 0 && pos <= n );
- return pos == n ? nextChild : k(pos).prevChildBucket;
- }
- KeyNode keyNode(int i) {
- assert( i < n );
- return KeyNode(*this, k(i));
- }
-
- char * dataAt(short ofs) {
- return data + ofs;
- }
-
- void init(); // initialize a new node
-
- /* returns false if node is full and must be split
- keypos is where to insert -- inserted after that key #. so keypos=0 is the leftmost one.
- */
- bool basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order);
- void pushBack(const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order, DiskLoc prevChild);
- void _delKeyAtPos(int keypos); // low level version that doesn't deal with child ptrs.
-
- /* !Packed means there is deleted fragment space within the bucket.
- We "repack" when we run out of space before considering the node
- to be full.
- */
- enum Flags { Packed=1 };
-
- DiskLoc childForPos(int p) {
- return p == n ? nextChild : k(p).prevChildBucket;
- }
-
- int totalDataSize() const;
- void pack( const BSONObj &order );
- void setNotPacked();
- void setPacked();
- int _alloc(int bytes);
- void truncateTo(int N, const BSONObj &order);
- void markUnused(int keypos);
-public:
- DiskLoc parent;
-
- string bucketSummary() const {
- stringstream ss;
- ss << " Bucket info:" << endl;
- ss << " n: " << n << endl;
- ss << " parent: " << parent.toString() << endl;
- ss << " nextChild: " << parent.toString() << endl;
- ss << " Size: " << _Size << " flags:" << flags << endl;
- ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
- return ss.str();
- }
-
-protected:
- void _shape(int level, stringstream&);
- DiskLoc nextChild; // child bucket off and to the right of the highest key.
- int _Size; // total size of this btree node in bytes. constant.
- int Size() const;
- int flags;
- int emptySize; // size of the empty region
- int topSize; // size of the data at the top of the bucket (keys are at the beginning or 'bottom')
- int n; // # of keys so far.
- int reserved;
- _KeyNode& k(int i) {
- return ((_KeyNode*)data)[i];
- }
- char data[4];
-};
-
-class BtreeBucket : public BucketBasics {
- friend class BtreeCursor;
-public:
- void dump();
-
- static DiskLoc addHead(IndexDetails&); /* start a new index off, empty */
- int insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, const BSONObj &order, bool dupsAllowed,
- IndexDetails& idx, bool toplevel);
-
- bool unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc);
-
- /* locate may return an "unused" key that is just a marker. so be careful.
- looks for a key:recordloc pair.
- */
- DiskLoc locate(const DiskLoc& thisLoc, BSONObj& key, const BSONObj &order, int& pos, bool& found, DiskLoc recordLoc, int direction=1);
-
- /* advance one key position in the index: */
- DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller);
- DiskLoc getHead(const DiskLoc& thisLoc);
-
- /* get tree shape */
- void shape(stringstream&);
-private:
- void fixParentPtrs(const DiskLoc& thisLoc);
- void delBucket(const DiskLoc& thisLoc, IndexDetails&);
- void delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p);
- BSONObj keyAt(int keyOfs) {
- return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key;
- }
- static BtreeBucket* allocTemp(); /* caller must release with free() */
- void insertHere(DiskLoc thisLoc, int keypos,
- DiskLoc recordLoc, BSONObj& key, const BSONObj &order,
- DiskLoc lchild, DiskLoc rchild, IndexDetails&);
- int _insert(DiskLoc thisLoc, DiskLoc recordLoc,
- BSONObj& key, const BSONObj &order, bool dupsAllowed,
- DiskLoc lChild, DiskLoc rChild, IndexDetails&);
- bool find(BSONObj& key, DiskLoc recordLoc, const BSONObj &order, int& pos);
- static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
-};
-
-class BtreeCursor : public Cursor {
- friend class BtreeBucket;
- BSONObj startKey;
- BSONObj endKey;
+ /* this class is all about the storage management */
+ class BucketBasics {
+ friend class KeyNode;
+ public:
+ void dumpTree(DiskLoc thisLoc, const BSONObj &order);
+ bool isHead() {
+ return parent.isNull();
+ }
+ void assertValid(const BSONObj &order, bool force = false);
+ int fullValidate(const DiskLoc& thisLoc, const BSONObj &order); /* traverses everything */
+ protected:
+ DiskLoc& getChild(int pos) {
+ assert( pos >= 0 && pos <= n );
+ return pos == n ? nextChild : k(pos).prevChildBucket;
+ }
+ KeyNode keyNode(int i) {
+ assert( i < n );
+ return KeyNode(*this, k(i));
+ }
+
+ char * dataAt(short ofs) {
+ return data + ofs;
+ }
+
+ void init(); // initialize a new node
+
+ /* returns false if node is full and must be split
+ keypos is where to insert -- inserted after that key #. so keypos=0 is the leftmost one.
+ */
+ bool basicInsert(int keypos, const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order);
+ void pushBack(const DiskLoc& recordLoc, BSONObj& key, const BSONObj &order, DiskLoc prevChild);
+ void _delKeyAtPos(int keypos); // low level version that doesn't deal with child ptrs.
+
+ /* !Packed means there is deleted fragment space within the bucket.
+ We "repack" when we run out of space before considering the node
+ to be full.
+ */
+ enum Flags { Packed=1 };
+
+ DiskLoc childForPos(int p) {
+ return p == n ? nextChild : k(p).prevChildBucket;
+ }
+
+ int totalDataSize() const;
+ void pack( const BSONObj &order );
+ void setNotPacked();
+ void setPacked();
+ int _alloc(int bytes);
+ void truncateTo(int N, const BSONObj &order);
+ void markUnused(int keypos);
+ public:
+ DiskLoc parent;
+
+ string bucketSummary() const {
+ stringstream ss;
+ ss << " Bucket info:" << endl;
+ ss << " n: " << n << endl;
+ ss << " parent: " << parent.toString() << endl;
+ ss << " nextChild: " << parent.toString() << endl;
+ ss << " Size: " << _Size << " flags:" << flags << endl;
+ ss << " emptySize: " << emptySize << " topSize: " << topSize << endl;
+ return ss.str();
+ }
+
+ protected:
+ void _shape(int level, stringstream&);
+ DiskLoc nextChild; // child bucket off and to the right of the highest key.
+ int _Size; // total size of this btree node in bytes. constant.
+ int Size() const;
+ int flags;
+ int emptySize; // size of the empty region
+ int topSize; // size of the data at the top of the bucket (keys are at the beginning or 'bottom')
+ int n; // # of keys so far.
+ int reserved;
+ _KeyNode& k(int i) {
+ return ((_KeyNode*)data)[i];
+ }
+ char data[4];
+ };
+
+ class BtreeBucket : public BucketBasics {
+ friend class BtreeCursor;
+ public:
+ void dump();
+
+ static DiskLoc addHead(IndexDetails&); /* start a new index off, empty */
+ int insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, const BSONObj &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel);
+
+ bool unindex(const DiskLoc& thisLoc, IndexDetails& id, BSONObj& key, const DiskLoc& recordLoc);
+
+ /* locate may return an "unused" key that is just a marker. so be careful.
+ looks for a key:recordloc pair.
+ */
+ DiskLoc locate(const DiskLoc& thisLoc, BSONObj& key, const BSONObj &order, int& pos, bool& found, DiskLoc recordLoc, int direction=1);
+
+ /* advance one key position in the index: */
+ DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller);
+ DiskLoc getHead(const DiskLoc& thisLoc);
+
+ /* get tree shape */
+ void shape(stringstream&);
+ private:
+ void fixParentPtrs(const DiskLoc& thisLoc);
+ void delBucket(const DiskLoc& thisLoc, IndexDetails&);
+ void delKeyAtPos(const DiskLoc& thisLoc, IndexDetails& id, int p);
+ BSONObj keyAt(int keyOfs) {
+ return keyOfs >= n ? BSONObj() : keyNode(keyOfs).key;
+ }
+ static BtreeBucket* allocTemp(); /* caller must release with free() */
+ void insertHere(DiskLoc thisLoc, int keypos,
+ DiskLoc recordLoc, BSONObj& key, const BSONObj &order,
+ DiskLoc lchild, DiskLoc rchild, IndexDetails&);
+ int _insert(DiskLoc thisLoc, DiskLoc recordLoc,
+ BSONObj& key, const BSONObj &order, bool dupsAllowed,
+ DiskLoc lChild, DiskLoc rChild, IndexDetails&);
+ bool find(BSONObj& key, DiskLoc recordLoc, const BSONObj &order, int& pos);
+ static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
+ };
+
+ class BtreeCursor : public Cursor {
+ friend class BtreeBucket;
+ BSONObj startKey;
+ BSONObj endKey;
// BSONObj query; // the query we are working on in association with the cursor -- see noMoreMatches()
-public:
- BtreeCursor(IndexDetails&, const BSONObj& startKey, int direction, BSONObj& query);
- virtual bool ok() {
- return !bucket.isNull();
- }
- bool eof() {
- return !ok();
- }
- virtual bool advance();
-
- virtual void noteLocation(); // updates keyAtKeyOfs...
- virtual void checkLocation();
-
- _KeyNode& _currKeyNode() {
- assert( !bucket.isNull() );
- _KeyNode& kn = bucket.btree()->k(keyOfs);
- assert( kn.isUsed() );
- return kn;
- }
- KeyNode currKeyNode() {
- assert( !bucket.isNull() );
- return bucket.btree()->keyNode(keyOfs);
- }
- BSONObj currKey() {
- return currKeyNode().key;
- }
-
- virtual BSONObj indexKeyPattern() {
- return indexDetails.keyPattern();
- }
-
- virtual void aboutToDeleteBucket(const DiskLoc& b) {
- if ( bucket == b )
- keyOfs = -1;
- }
-
- virtual DiskLoc currLoc() {
- return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc();
- }
- virtual Record* _current() {
- return currLoc().rec();
- }
- virtual BSONObj current() {
- return BSONObj(_current());
- }
- virtual string toString() {
- string s = string("BtreeCursor ") + indexDetails.indexName();
- if ( direction < 0 ) s += " reverse";
- return s;
- }
-
-private:
- void findExtremeKeys( const BSONObj &query );
- void findExtremeInequalityValues( const BSONElement &e,
- BSONElement &lowest,
- BSONElement &highest );
- static void getFields( const BSONObj &key, set< string > &fields );
- void checkUnused();
- void checkEnd();
- IndexDetails& indexDetails;
- BSONObj order;
- DiskLoc bucket;
- int keyOfs;
- int direction; // 1=fwd,-1=reverse
- BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
- DiskLoc locAtKeyOfs;
-};
+ public:
+ BtreeCursor(IndexDetails&, const BSONObj& startKey, int direction, BSONObj& query);
+ virtual bool ok() {
+ return !bucket.isNull();
+ }
+ bool eof() {
+ return !ok();
+ }
+ virtual bool advance();
+
+ virtual void noteLocation(); // updates keyAtKeyOfs...
+ virtual void checkLocation();
+
+ _KeyNode& _currKeyNode() {
+ assert( !bucket.isNull() );
+ _KeyNode& kn = bucket.btree()->k(keyOfs);
+ assert( kn.isUsed() );
+ return kn;
+ }
+ KeyNode currKeyNode() {
+ assert( !bucket.isNull() );
+ return bucket.btree()->keyNode(keyOfs);
+ }
+ BSONObj currKey() {
+ return currKeyNode().key;
+ }
+
+ virtual BSONObj indexKeyPattern() {
+ return indexDetails.keyPattern();
+ }
+
+ virtual void aboutToDeleteBucket(const DiskLoc& b) {
+ if ( bucket == b )
+ keyOfs = -1;
+ }
+
+ virtual DiskLoc currLoc() {
+ return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc();
+ }
+ virtual Record* _current() {
+ return currLoc().rec();
+ }
+ virtual BSONObj current() {
+ return BSONObj(_current());
+ }
+ virtual string toString() {
+ string s = string("BtreeCursor ") + indexDetails.indexName();
+ if ( direction < 0 ) s += " reverse";
+ return s;
+ }
+
+ private:
+ void findExtremeKeys( const BSONObj &query );
+ void findExtremeInequalityValues( const BSONElement &e,
+ BSONElement &lowest,
+ BSONElement &highest );
+ static void getFields( const BSONObj &key, set< string > &fields );
+ void checkUnused();
+ void checkEnd();
+ IndexDetails& indexDetails;
+ BSONObj order;
+ DiskLoc bucket;
+ int keyOfs;
+ int direction; // 1=fwd,-1=reverse
+ BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
+ DiskLoc locAtKeyOfs;
+ };
#pragma pack(pop)
diff --git a/db/btreecursor.cpp b/db/btreecursor.cpp
index b141cc24c36..735c9dc441a 100644
--- a/db/btreecursor.cpp
+++ b/db/btreecursor.cpp
@@ -23,213 +23,213 @@
namespace mongo {
-extern int otherTraceLevel;
+ extern int otherTraceLevel;
-DiskLoc maxDiskLoc(0x7fffffff, 0x7fffffff);
-DiskLoc minDiskLoc(0, 1);
+ DiskLoc maxDiskLoc(0x7fffffff, 0x7fffffff);
+ DiskLoc minDiskLoc(0, 1);
-BtreeCursor::BtreeCursor(IndexDetails& _id, const BSONObj& k, int _direction, BSONObj& _query) :
+ BtreeCursor::BtreeCursor(IndexDetails& _id, const BSONObj& k, int _direction, BSONObj& _query) :
// query(_query),
- indexDetails(_id),
- order(_id.keyPattern()),
- direction(_direction)
-{
+ indexDetails(_id),
+ order(_id.keyPattern()),
+ direction(_direction)
+ {
//otherTraceLevel = 999;
- bool found;
- if ( otherTraceLevel >= 12 ) {
- if ( otherTraceLevel >= 200 ) {
- cout << "::BtreeCursor() qtl>200. validating entire index." << endl;
- indexDetails.head.btree()->fullValidate(indexDetails.head, order);
- }
- else {
- cout << "BTreeCursor(). dumping head bucket" << endl;
- indexDetails.head.btree()->dump();
+ bool found;
+ if ( otherTraceLevel >= 12 ) {
+ if ( otherTraceLevel >= 200 ) {
+ out() << "::BtreeCursor() qtl>200. validating entire index." << endl;
+ indexDetails.head.btree()->fullValidate(indexDetails.head, order);
+ }
+ else {
+ out() << "BTreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree()->dump();
+ }
}
- }
- findExtremeKeys( _query );
- if ( !k.isEmpty() )
- startKey = k;
+ findExtremeKeys( _query );
+ if ( !k.isEmpty() )
+ startKey = k;
- bucket = indexDetails.head.btree()->
- locate(indexDetails.head, startKey, order, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
+ bucket = indexDetails.head.btree()->
+ locate(indexDetails.head, startKey, order, keyOfs, found, direction > 0 ? minDiskLoc : maxDiskLoc, direction);
- checkUnused();
-}
+ checkUnused();
+ }
// Given a query, find the lowest and highest keys along our index that could
// potentially match the query. These lowest and highest keys will be mapped
// to startKey and endKey based on the value of direction.
-void BtreeCursor::findExtremeKeys( const BSONObj &query ) {
- BSONObjBuilder startBuilder;
- BSONObjBuilder endBuilder;
- set< string >fields;
- getFields( indexDetails.keyPattern(), fields );
- for ( set<string>::iterator i = fields.begin(); i != fields.end(); ++i ) {
- const char * field = i->c_str();
- BSONElement k = indexDetails.keyPattern().getFieldDotted( field );
- int number = (int) k.number(); // returns 0.0 if not numeric
- bool forward = ( ( number >= 0 ? 1 : -1 ) * direction > 0 );
- BSONElement lowest = minKey.firstElement();
- BSONElement highest = maxKey.firstElement();
- BSONElement e = query.getFieldDotted( field );
- if ( !e.eoo() && e.type() != RegEx ) {
- if ( getGtLtOp( e ) == JSMatcher::Equality )
- lowest = highest = e;
- else
- findExtremeInequalityValues( e, lowest, highest );
+ void BtreeCursor::findExtremeKeys( const BSONObj &query ) {
+ BSONObjBuilder startBuilder;
+ BSONObjBuilder endBuilder;
+ set< string >fields;
+ getFields( indexDetails.keyPattern(), fields );
+ for ( set<string>::iterator i = fields.begin(); i != fields.end(); ++i ) {
+ const char * field = i->c_str();
+ BSONElement k = indexDetails.keyPattern().getFieldDotted( field );
+ int number = (int) k.number(); // returns 0.0 if not numeric
+ bool forward = ( ( number >= 0 ? 1 : -1 ) * direction > 0 );
+ BSONElement lowest = minKey.firstElement();
+ BSONElement highest = maxKey.firstElement();
+ BSONElement e = query.getFieldDotted( field );
+ if ( !e.eoo() && e.type() != RegEx ) {
+ if ( getGtLtOp( e ) == JSMatcher::Equality )
+ lowest = highest = e;
+ else
+ findExtremeInequalityValues( e, lowest, highest );
+ }
+ startBuilder.appendAs( forward ? lowest : highest, "" );
+ endBuilder.appendAs( forward ? highest : lowest, "" );
}
- startBuilder.appendAs( forward ? lowest : highest, "" );
- endBuilder.appendAs( forward ? highest : lowest, "" );
+ startKey = startBuilder.doneAndDecouple();
+ endKey = endBuilder.doneAndDecouple();
}
- startKey = startBuilder.doneAndDecouple();
- endKey = endBuilder.doneAndDecouple();
-}
// Find lowest and highest possible key values given all $gt, $gte, $lt, and
// $lte elements in e. The values of lowest and highest should be
// preinitialized, for example to minKey.firstElement() and maxKey.firstElement().
-void BtreeCursor::findExtremeInequalityValues( const BSONElement &e,
- BSONElement &lowest,
- BSONElement &highest ) {
- BSONObjIterator i( e.embeddedObject() );
- while ( 1 ) {
- BSONElement s = i.next();
- if ( s.eoo() )
- break;
- int op = s.getGtLtOp();
- if ( ( op == JSMatcher::LT || op == JSMatcher::LTE ) &&
- ( s.woCompare( highest, false ) < 0 ) )
- highest = s;
- else if ( ( op == JSMatcher::GT || op == JSMatcher::GTE ) &&
- ( s.woCompare( lowest, false ) > 0 ) )
- lowest = s;
+ void BtreeCursor::findExtremeInequalityValues( const BSONElement &e,
+ BSONElement &lowest,
+ BSONElement &highest ) {
+ BSONObjIterator i( e.embeddedObject() );
+ while ( 1 ) {
+ BSONElement s = i.next();
+ if ( s.eoo() )
+ break;
+ int op = s.getGtLtOp();
+ if ( ( op == JSMatcher::LT || op == JSMatcher::LTE ) &&
+ ( s.woCompare( highest, false ) < 0 ) )
+ highest = s;
+ else if ( ( op == JSMatcher::GT || op == JSMatcher::GTE ) &&
+ ( s.woCompare( lowest, false ) > 0 ) )
+ lowest = s;
+ }
}
-}
// Expand all field names in key to use dotted notation.
-void BtreeCursor::getFields( const BSONObj &key, set< string > &fields ) {
- BSONObjIterator i( key );
- while ( 1 ) {
- BSONElement k = i.next();
- if ( k.eoo() )
- break;
- bool addedSubfield = false;
- if ( k.type() == Object ) {
- set< string > subFields;
- getFields( k.embeddedObject(), subFields );
- for ( set< string >::iterator i = subFields.begin(); i != subFields.end(); ++i ) {
- addedSubfield = true;
- fields.insert( k.fieldName() + string( "." ) + *i );
+ void BtreeCursor::getFields( const BSONObj &key, set< string > &fields ) {
+ BSONObjIterator i( key );
+ while ( 1 ) {
+ BSONElement k = i.next();
+ if ( k.eoo() )
+ break;
+ bool addedSubfield = false;
+ if ( k.type() == Object ) {
+ set< string > subFields;
+ getFields( k.embeddedObject(), subFields );
+ for ( set< string >::iterator i = subFields.begin(); i != subFields.end(); ++i ) {
+ addedSubfield = true;
+ fields.insert( k.fieldName() + string( "." ) + *i );
+ }
}
+ if ( !addedSubfield )
+ fields.insert( k.fieldName() );
}
- if ( !addedSubfield )
- fields.insert( k.fieldName() );
}
-}
-
-/* skip unused keys. */
-void BtreeCursor::checkUnused() {
- int u = 0;
- while ( 1 ) {
- if ( !ok() )
- break;
- BtreeBucket *b = bucket.btree();
- _KeyNode& kn = b->k(keyOfs);
- if ( kn.isUsed() )
- break;
- bucket = b->advance(bucket, keyOfs, direction, "checkUnused");
- u++;
+
+ /* skip unused keys. */
+ void BtreeCursor::checkUnused() {
+ int u = 0;
+ while ( 1 ) {
+ if ( !ok() )
+ break;
+ BtreeBucket *b = bucket.btree();
+ _KeyNode& kn = b->k(keyOfs);
+ if ( kn.isUsed() )
+ break;
+ bucket = b->advance(bucket, keyOfs, direction, "checkUnused");
+ u++;
+ }
+ if ( u > 10 )
+ OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
}
- if ( u > 10 )
- OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
-}
// Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
-int sgn( int i ) {
- if ( i == 0 )
- return 0;
- return i > 0 ? 1 : -1;
-}
+ int sgn( int i ) {
+ if ( i == 0 )
+ return 0;
+ return i > 0 ? 1 : -1;
+ }
// Check if the current key is beyond endKey.
-void BtreeCursor::checkEnd() {
- if ( bucket.isNull() )
- return;
- int cmp = sgn( endKey.woCompare( currKey(), order ) );
- if ( cmp != 0 && cmp != direction )
- bucket = DiskLoc();
-}
-
-bool BtreeCursor::advance() {
- if ( bucket.isNull() )
- return false;
- bucket = bucket.btree()->advance(bucket, keyOfs, direction, "BtreeCursor::advance");
- checkUnused();
- checkEnd();
- return !bucket.isNull();
-}
-
-void BtreeCursor::noteLocation() {
- if ( !eof() ) {
- BSONObj o = bucket.btree()->keyAt(keyOfs).copy();
- keyAtKeyOfs = o;
- locAtKeyOfs = bucket.btree()->k(keyOfs).recordLoc;
+ void BtreeCursor::checkEnd() {
+ if ( bucket.isNull() )
+ return;
+ int cmp = sgn( endKey.woCompare( currKey(), order ) );
+ if ( cmp != 0 && cmp != direction )
+ bucket = DiskLoc();
}
-}
-/* Since the last noteLocation(), our key may have moved around, and that old cached
- information may thus be stale and wrong (although often it is right). We check
- that here; if we have moved, we have to search back for where we were at.
+ bool BtreeCursor::advance() {
+ if ( bucket.isNull() )
+ return false;
+ bucket = bucket.btree()->advance(bucket, keyOfs, direction, "BtreeCursor::advance");
+ checkUnused();
+ checkEnd();
+ return !bucket.isNull();
+ }
- i.e., after operations on the index, the BtreeCursor's cached location info may
- be invalid. This function ensures validity, so you should call it before using
- the cursor if other writers have used the database since the last noteLocation
- call.
-*/
-void BtreeCursor::checkLocation() {
- if ( eof() )
- return;
-
- if ( keyOfs >= 0 ) {
- BtreeBucket *b = bucket.btree();
-
- assert( !keyAtKeyOfs.isEmpty() );
-
- // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
- // which is possible as keys may have been deleted.
- if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
- b->k(keyOfs).recordLoc == locAtKeyOfs ) {
- if ( !b->k(keyOfs).isUsed() ) {
- /* we were deleted but still exist as an unused
- marker key. advance.
- */
- checkUnused();
- }
- return;
+ void BtreeCursor::noteLocation() {
+ if ( !eof() ) {
+ BSONObj o = bucket.btree()->keyAt(keyOfs).copy();
+ keyAtKeyOfs = o;
+ locAtKeyOfs = bucket.btree()->k(keyOfs).recordLoc;
}
}
- /* normally we don't get to here. when we do, old position is no longer
- valid and we must refind where we left off (which is expensive)
+ /* Since the last noteLocation(), our key may have moved around, and that old cached
+ information may thus be stale and wrong (although often it is right). We check
+ that here; if we have moved, we have to search back for where we were at.
+
+ i.e., after operations on the index, the BtreeCursor's cached location info may
+ be invalid. This function ensures validity, so you should call it before using
+ the cursor if other writers have used the database since the last noteLocation
+ call.
*/
+ void BtreeCursor::checkLocation() {
+ if ( eof() )
+ return;
- bool found;
+ if ( keyOfs >= 0 ) {
+ BtreeBucket *b = bucket.btree();
+
+ assert( !keyAtKeyOfs.isEmpty() );
+
+ // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
+ // which is possible as keys may have been deleted.
+ if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
+ b->k(keyOfs).recordLoc == locAtKeyOfs ) {
+ if ( !b->k(keyOfs).isUsed() ) {
+ /* we were deleted but still exist as an unused
+ marker key. advance.
+ */
+ checkUnused();
+ }
+ return;
+ }
+ }
- /* TODO: Switch to keep indexdetails and do idx.head! */
- bucket = indexDetails.head.btree()->locate(indexDetails.head, keyAtKeyOfs, order, keyOfs, found, locAtKeyOfs, direction);
- RARELY log() << " key seems to have moved in the index, refinding. found:" << found << endl;
- if ( found )
- checkUnused();
-}
+ /* normally we don't get to here. when we do, old position is no longer
+ valid and we must refind where we left off (which is expensive)
+ */
-/* ----------------------------------------------------------------------------- */
+ bool found;
-struct BtreeUnitTest {
- BtreeUnitTest() {
- assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ /* TODO: Switch to keep indexdetails and do idx.head! */
+ bucket = indexDetails.head.btree()->locate(indexDetails.head, keyAtKeyOfs, order, keyOfs, found, locAtKeyOfs, direction);
+ RARELY log() << " key seems to have moved in the index, refinding. found:" << found << endl;
+ if ( found )
+ checkUnused();
}
-} btut;
+
+ /* ----------------------------------------------------------------------------- */
+
+ struct BtreeUnitTest {
+ BtreeUnitTest() {
+ assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ }
+ } btut;
} // namespace mongo
diff --git a/db/clientcursor.cpp b/db/clientcursor.cpp
index 2ec0d7bfe97..400c912038e 100644
--- a/db/clientcursor.cpp
+++ b/db/clientcursor.cpp
@@ -30,201 +30,201 @@
namespace mongo {
-/* TODO: FIX cleanup of clientCursors when hit the end. (ntoreturn insufficient) */
+ /* TODO: FIX cleanup of clientCursors when hit the end. (ntoreturn insufficient) */
-CCById clientCursorsById;
+ CCById clientCursorsById;
-/* ------------------------------------------- */
+ /* ------------------------------------------- */
-typedef multimap<DiskLoc, ClientCursor*> ByLoc;
-ByLoc byLoc;
-unsigned byLocSize() {
- return byLoc.size();
-}
+ typedef multimap<DiskLoc, ClientCursor*> ByLoc;
+ ByLoc byLoc;
+ unsigned byLocSize() {
+ return byLoc.size();
+ }
-void ClientCursor::setLastLoc(DiskLoc L) {
- if ( L == _lastLoc )
- return;
+ void ClientCursor::setLastLoc(DiskLoc L) {
+ if ( L == _lastLoc )
+ return;
- if ( !_lastLoc.isNull() ) {
- ByLoc::iterator i = kv_find(byLoc, _lastLoc, this);
- if ( i != byLoc.end() )
- byLoc.erase(i);
- }
+ if ( !_lastLoc.isNull() ) {
+ ByLoc::iterator i = kv_find(byLoc, _lastLoc, this);
+ if ( i != byLoc.end() )
+ byLoc.erase(i);
+ }
- if ( !L.isNull() )
- byLoc.insert( make_pair(L, this) );
- _lastLoc = L;
-}
+ if ( !L.isNull() )
+ byLoc.insert( make_pair(L, this) );
+ _lastLoc = L;
+ }
-/* ------------------------------------------- */
+ /* ------------------------------------------- */
-/* must call this when a btree node is updated */
+ /* must call this when a btree node is updated */
//void removedKey(const DiskLoc& btreeLoc, int keyPos) {
//}
-/* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
- works fine as the prefix will end with '.'. however, when used with drop and
- deleteIndexes, this could take out cursors that belong to something else -- if you
- drop "foo", currently, this will kill cursors for "foobar".
-*/
-void ClientCursor::invalidate(const char *nsPrefix) {
- vector<ClientCursor*> toDelete;
-
- int len = strlen(nsPrefix);
- assert( len > 0 && strchr(nsPrefix, '.') );
- for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
- ClientCursor *cc = i->second;
- if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 )
- toDelete.push_back(i->second);
- }
+ /* todo: this implementation is incomplete. we use it as a prefix for dropDatabase, which
+ works fine as the prefix will end with '.'. however, when used with drop and
+ deleteIndexes, this could take out cursors that belong to something else -- if you
+ drop "foo", currently, this will kill cursors for "foobar".
+ */
+ void ClientCursor::invalidate(const char *nsPrefix) {
+ vector<ClientCursor*> toDelete;
+
+ int len = strlen(nsPrefix);
+ assert( len > 0 && strchr(nsPrefix, '.') );
+ for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); ++i ) {
+ ClientCursor *cc = i->second;
+ if ( strncmp(nsPrefix, cc->ns.c_str(), len) == 0 )
+ toDelete.push_back(i->second);
+ }
- for ( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
- delete (*i);
-}
+ for ( vector<ClientCursor*>::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ delete (*i);
+ }
-/* must call when a btree bucket going away.
- note this is potentially slow
-*/
-void aboutToDeleteBucket(const DiskLoc& b) {
- RARELY if ( byLoc.size() > 70 ) {
- log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
+ /* must call when a btree bucket going away.
+ note this is potentially slow
+ */
+ void aboutToDeleteBucket(const DiskLoc& b) {
+ RARELY if ( byLoc.size() > 70 ) {
+ log() << "perf warning: byLoc.size=" << byLoc.size() << " in aboutToDeleteBucket\n";
+ }
+ for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); i++ )
+ i->second->c->aboutToDeleteBucket(b);
}
- for ( ByLoc::iterator i = byLoc.begin(); i != byLoc.end(); i++ )
- i->second->c->aboutToDeleteBucket(b);
-}
-
-/* must call this on a delete so we clean up the cursors. */
-void aboutToDelete(const DiskLoc& dl) {
- ByLoc::iterator j = byLoc.lower_bound(dl);
- ByLoc::iterator stop = byLoc.upper_bound(dl);
- if ( j == stop )
- return;
-
- assert( dbMutexInfo.isLocked() );
- vector<ClientCursor*> toAdvance;
-
- while ( 1 ) {
- toAdvance.push_back(j->second);
- WIN assert( j->first == dl );
- ++j;
+
+ /* must call this on a delete so we clean up the cursors. */
+ void aboutToDelete(const DiskLoc& dl) {
+ ByLoc::iterator j = byLoc.lower_bound(dl);
+ ByLoc::iterator stop = byLoc.upper_bound(dl);
if ( j == stop )
- break;
- }
+ return;
- wassert( toAdvance.size() < 5000 );
+ assert( dbMutexInfo.isLocked() );
+ vector<ClientCursor*> toAdvance;
- for ( vector<ClientCursor*>::iterator i = toAdvance.begin();
- i != toAdvance.end(); ++i )
- {
- Cursor *c = (*i)->c.get();
- DiskLoc tmp1 = c->currLoc();
- if ( tmp1 != dl ) {
- /* this might indicate a failure to call ClientCursor::updateLocation() */
- problem() << "warning: cursor loc does not match byLoc position!" << endl;
- }
- c->checkLocation();
- if ( c->tailing() ) {
- DEV cout << "killing cursor as we would have to advance it and it is tailable" << endl;
- delete *i;
- continue;
- }
- c->advance();
- DiskLoc newLoc = c->currLoc();
- if ( newLoc.isNull() ) {
- // advanced to end -- delete cursor
- delete *i;
+ while ( 1 ) {
+ toAdvance.push_back(j->second);
+ WIN assert( j->first == dl );
+ ++j;
+ if ( j == stop )
+ break;
}
- else {
- wassert( newLoc != dl );
- (*i)->updateLocation();
+
+ wassert( toAdvance.size() < 5000 );
+
+ for ( vector<ClientCursor*>::iterator i = toAdvance.begin();
+ i != toAdvance.end(); ++i )
+ {
+ Cursor *c = (*i)->c.get();
+ DiskLoc tmp1 = c->currLoc();
+ if ( tmp1 != dl ) {
+ /* this might indicate a failure to call ClientCursor::updateLocation() */
+ problem() << "warning: cursor loc does not match byLoc position!" << endl;
+ }
+ c->checkLocation();
+ if ( c->tailing() ) {
+ DEV out() << "killing cursor as we would have to advance it and it is tailable" << endl;
+ delete *i;
+ continue;
+ }
+ c->advance();
+ DiskLoc newLoc = c->currLoc();
+ if ( newLoc.isNull() ) {
+ // advanced to end -- delete cursor
+ delete *i;
+ }
+ else {
+ wassert( newLoc != dl );
+ (*i)->updateLocation();
+ }
}
}
-}
-
-ClientCursor::~ClientCursor() {
- assert( pos != -2 );
- setLastLoc( DiskLoc() ); // removes us from bylocation multimap
- clientCursorsById.erase(cursorid);
- // defensive:
- (CursorId&) cursorid = -1;
- pos = -2;
-}
-
-/* call when cursor's location changes so that we can update the
- cursorsbylocation map. if you are locked and internally iterating, only
- need to call when you are ready to "unlock".
-*/
-void ClientCursor::updateLocation() {
- assert( cursorid );
- DiskLoc cl = c->currLoc();
- if ( lastLoc() == cl ) {
- //log() << "info: lastloc==curloc " << ns << '\n';
- return;
- }
- setLastLoc(cl);
- c->noteLocation();
-}
-
-int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
-long long ClientCursor::allocCursorId() {
- long long x;
- int ctm = (int) curTimeMillis();
- while ( 1 ) {
- x = (((long long)rand()) << 32);
- x = x | ctm | 0x80000000; // OR to make sure not zero
- if ( ctm != ctmLast || ClientCursor::find(x, false) == 0 )
- break;
- }
- ctmLast = ctm;
- DEV cout << " alloccursorid " << x << endl;
- return x;
-}
-
-class CursInspector : public SingleResultObjCursor {
- Cursor* clone() {
- return new CursInspector();
+
+ ClientCursor::~ClientCursor() {
+ assert( pos != -2 );
+ setLastLoc( DiskLoc() ); // removes us from bylocation multimap
+ clientCursorsById.erase(cursorid);
+ // defensive:
+ (CursorId&) cursorid = -1;
+ pos = -2;
}
- void fill() {
- b.append("byLocation_size", byLoc.size());
- b.append("clientCursors_size", clientCursorsById.size());
- /* todo update for new impl:
- stringstream ss;
- ss << '\n';
- int x = 40;
- DiskToCC::iterator it = clientCursorsByLocation.begin();
- while( it != clientCursorsByLocation.end() ) {
- DiskLoc dl = it->first;
- ss << dl.toString() << " -> \n";
- set<ClientCursor*>::iterator j = it->second.begin();
- while( j != it->second.end() ) {
- ss << " cid:" << j->second->cursorid << ' ' << j->second->ns << " pos:" << j->second->pos << " LL:" << j->second->lastLoc.toString();
- try {
- setClient(j->second->ns.c_str());
- Record *r = dl.rec();
- ss << " lwh:" << hex << r->lengthWithHeaders << " nxt:" << r->nextOfs << " prv:" << r->prevOfs << dec << ' ' << j->second->c->toString();
- if( r->nextOfs >= 0 && r->nextOfs < 16 )
- ss << " DELETED??? (!)";
- }
- catch(...) {
- ss << " EXCEPTION";
- }
- ss << "\n";
- j++;
- }
- if( --x <= 0 ) {
- ss << "only first 40 shown\n" << endl;
- break;
- }
- it++;
- }
- b.append("dump", ss.str().c_str());
- */
+
+ /* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
+ need to call when you are ready to "unlock".
+ */
+ void ClientCursor::updateLocation() {
+ assert( cursorid );
+ DiskLoc cl = c->currLoc();
+ if ( lastLoc() == cl ) {
+ //log() << "info: lastloc==curloc " << ns << '\n';
+ return;
+ }
+ setLastLoc(cl);
+ c->noteLocation();
}
-public:
- CursInspector() {
- reg("intr.cursors");
+
+ int ctmLast = 0; // so we don't have to do find() which is a little slow very often.
+ long long ClientCursor::allocCursorId() {
+ long long x;
+ int ctm = (int) curTimeMillis();
+ while ( 1 ) {
+ x = (((long long)rand()) << 32);
+ x = x | ctm | 0x80000000; // OR to make sure not zero
+ if ( ctm != ctmLast || ClientCursor::find(x, false) == 0 )
+ break;
+ }
+ ctmLast = ctm;
+ DEV out() << " alloccursorid " << x << endl;
+ return x;
}
-} _ciproto;
+
+ class CursInspector : public SingleResultObjCursor {
+ Cursor* clone() {
+ return new CursInspector();
+ }
+ void fill() {
+ b.append("byLocation_size", byLoc.size());
+ b.append("clientCursors_size", clientCursorsById.size());
+ /* todo update for new impl:
+ stringstream ss;
+ ss << '\n';
+ int x = 40;
+ DiskToCC::iterator it = clientCursorsByLocation.begin();
+ while( it != clientCursorsByLocation.end() ) {
+ DiskLoc dl = it->first;
+ ss << dl.toString() << " -> \n";
+ set<ClientCursor*>::iterator j = it->second.begin();
+ while( j != it->second.end() ) {
+ ss << " cid:" << j->second->cursorid << ' ' << j->second->ns << " pos:" << j->second->pos << " LL:" << j->second->lastLoc.toString();
+ try {
+ setClient(j->second->ns.c_str());
+ Record *r = dl.rec();
+ ss << " lwh:" << hex << r->lengthWithHeaders << " nxt:" << r->nextOfs << " prv:" << r->prevOfs << dec << ' ' << j->second->c->toString();
+ if( r->nextOfs >= 0 && r->nextOfs < 16 )
+ ss << " DELETED??? (!)";
+ }
+ catch(...) {
+ ss << " EXCEPTION";
+ }
+ ss << "\n";
+ j++;
+ }
+ if( --x <= 0 ) {
+ ss << "only first 40 shown\n" << endl;
+ break;
+ }
+ it++;
+ }
+ b.append("dump", ss.str().c_str());
+ */
+ }
+ public:
+ CursInspector() {
+ reg("intr.cursors");
+ }
+ } _ciproto;
} // namespace mongo
diff --git a/db/clientcursor.h b/db/clientcursor.h
index 49d14ecdae0..b04085ec2b2 100644
--- a/db/clientcursor.h
+++ b/db/clientcursor.h
@@ -28,65 +28,65 @@
namespace mongo {
-typedef long long CursorId;
-class Cursor;
-class ClientCursor;
-typedef map<CursorId, ClientCursor*> CCById;
-extern CCById clientCursorsById;
+ typedef long long CursorId;
+ class Cursor;
+ class ClientCursor;
+ typedef map<CursorId, ClientCursor*> CCById;
+ extern CCById clientCursorsById;
-class ClientCursor {
- friend class CursInspector;
- DiskLoc _lastLoc; // use getter and setter not this.
- static CursorId allocCursorId();
-public:
- ClientCursor() : cursorid( allocCursorId() ), pos(0) {
- clientCursorsById.insert( make_pair(cursorid, this) );
- }
- ~ClientCursor();
- const CursorId cursorid;
- string ns;
- //BSONObj pattern; // the query object
- auto_ptr<JSMatcher> matcher;
- auto_ptr<Cursor> c;
- int pos;
- DiskLoc lastLoc() const {
- return _lastLoc;
- }
- void setLastLoc(DiskLoc);
- auto_ptr< set<string> > filter; // which fields query wants returned
- Message originalMessage; // this is effectively an auto ptr for data the matcher points to.
+ class ClientCursor {
+ friend class CursInspector;
+ DiskLoc _lastLoc; // use getter and setter not this.
+ static CursorId allocCursorId();
+ public:
+ ClientCursor() : cursorid( allocCursorId() ), pos(0) {
+ clientCursorsById.insert( make_pair(cursorid, this) );
+ }
+ ~ClientCursor();
+ const CursorId cursorid;
+ string ns;
+ //BSONObj pattern; // the query object
+ auto_ptr<JSMatcher> matcher;
+ auto_ptr<Cursor> c;
+ int pos;
+ DiskLoc lastLoc() const {
+ return _lastLoc;
+ }
+ void setLastLoc(DiskLoc);
+ auto_ptr< set<string> > filter; // which fields query wants returned
+ Message originalMessage; // this is effectively an auto ptr for data the matcher points to.
- /* Get rid of cursors for namespaces that begin with nsprefix.
- Used by drop, deleteIndexes, dropDatabase.
- */
- static void invalidate(const char *nsPrefix);
+ /* Get rid of cursors for namespaces that begin with nsprefix.
+ Used by drop, deleteIndexes, dropDatabase.
+ */
+ static void invalidate(const char *nsPrefix);
- static bool erase(CursorId id) {
- ClientCursor *cc = find(id);
- if ( cc ) {
- delete cc;
- return true;
+ static bool erase(CursorId id) {
+ ClientCursor *cc = find(id);
+ if ( cc ) {
+ delete cc;
+ return true;
+ }
+ return false;
}
- return false;
- }
- static ClientCursor* find(CursorId id, bool warn = true) {
- CCById::iterator it = clientCursorsById.find(id);
- if ( it == clientCursorsById.end() ) {
- if ( warn )
- OCCASIONALLY cout << "ClientCursor::find(): cursor not found in map " << id << " (ok after a drop)\n";
- return 0;
+ static ClientCursor* find(CursorId id, bool warn = true) {
+ CCById::iterator it = clientCursorsById.find(id);
+ if ( it == clientCursorsById.end() ) {
+ if ( warn )
+ OCCASIONALLY out() << "ClientCursor::find(): cursor not found in map " << id << " (ok after a drop)\n";
+ return 0;
+ }
+ return it->second;
}
- return it->second;
- }
- /* call when cursor's location changes so that we can update the
- cursorsbylocation map. if you are locked and internally iterating, only
- need to call when you are ready to "unlock".
- */
- void updateLocation();
+ /* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
+ need to call when you are ready to "unlock".
+ */
+ void updateLocation();
- void cleanupByLocation(DiskLoc loc);
-};
+ void cleanupByLocation(DiskLoc loc);
+ };
} // namespace mongo
diff --git a/db/cloner.cpp b/db/cloner.cpp
index ddde078313e..6f09ccdae72 100644
--- a/db/cloner.cpp
+++ b/db/cloner.cpp
@@ -29,241 +29,241 @@
namespace mongo {
-void ensureHaveIdIndex(const char *ns);
-extern int port;
+ void ensureHaveIdIndex(const char *ns);
+ extern int port;
-class Cloner: boost::noncopyable {
- auto_ptr< DBClientInterface > conn;
- void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
- bool masterSameProcess, bool slaveOk);
-public:
- Cloner() { }
+ class Cloner: boost::noncopyable {
+ auto_ptr< DBClientInterface > conn;
+ void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
+ bool masterSameProcess, bool slaveOk);
+ public:
+ Cloner() { }
- /* slaveOk - if true it is ok if the source of the data is !ismaster.
- */
- bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk);
-};
+ /* slaveOk - if true it is ok if the source of the data is !ismaster.
+ */
+ bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk);
+ };
-/* for index info object:
- { "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
- we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
- copy to a new name.
-*/
-BSONObj fixindex(BSONObj o) {
- BSONObjBuilder b;
- BSONObjIterator i(o);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- if ( string("ns") == e.fieldName() ) {
- uassert("bad ns field for index during dbcopy", e.type() == String);
- const char *p = strchr(e.valuestr(), '.');
- uassert("bad ns field for index during dbcopy [2]", p);
- string newname = database->name + p;
- b.append("ns", newname);
+ /* for index info object:
+ { "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
+ we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
+ copy to a new name.
+ */
+ BSONObj fixindex(BSONObj o) {
+ BSONObjBuilder b;
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( string("ns") == e.fieldName() ) {
+ uassert("bad ns field for index during dbcopy", e.type() == String);
+ const char *p = strchr(e.valuestr(), '.');
+ uassert("bad ns field for index during dbcopy [2]", p);
+ string newname = database->name + p;
+ b.append("ns", newname);
+ }
+ else
+ b.append(e);
}
- else
- b.append(e);
- }
- BSONObj res= b.doneAndDecouple();
-
- /* if( mod ) {
- cout << "before: " << o.toString() << endl;
- o.dump();
- cout << "after: " << res.toString() << endl;
- res.dump();
- }*/
+ BSONObj res= b.doneAndDecouple();
- return res;
-}
+ /* if( mod ) {
+ out() << "before: " << o.toString() << endl;
+ o.dump();
+ out() << "after: " << res.toString() << endl;
+ res.dump();
+ }*/
-/* copy the specified collection
- isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
-*/
-void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk) {
- auto_ptr<DBClientCursor> c;
- {
- dbtemprelease r;
- c = conn->query( from_collection, emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
+ return res;
}
- assert( c.get() );
- while ( 1 ) {
+
+ /* copy the specified collection
+ isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
+ */
+ void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk) {
+ auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
- if ( !c->more() )
- break;
+ c = conn->query( from_collection, emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
}
- BSONObj tmp = c->next();
+ assert( c.get() );
+ while ( 1 ) {
+ {
+ dbtemprelease r;
+ if ( !c->more() )
+ break;
+ }
+ BSONObj tmp = c->next();
- /* assure object is valid. note this will slow us down a good bit. */
- if ( !tmp.valid() ) {
- cout << "skipping corrupt object from " << from_collection << '\n';
- continue;
- }
+ /* assure object is valid. note this will slow us down a good bit. */
+ if ( !tmp.valid() ) {
+ out() << "skipping corrupt object from " << from_collection << '\n';
+ continue;
+ }
- BSONObj js = tmp;
- if ( isindex ) {
- assert( strstr(from_collection, "system.indexes") );
- js = fixindex(tmp);
- }
+ BSONObj js = tmp;
+ if ( isindex ) {
+ assert( strstr(from_collection, "system.indexes") );
+ js = fixindex(tmp);
+ }
- theDataFileMgr.insert(to_collection, (void*) js.objdata(), js.objsize());
- if ( logForRepl )
- logOp("i", to_collection, js);
- }
-}
-
-bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk) {
- string todb = database->name;
- stringstream a,b;
- a << "localhost:" << port;
- b << "127.0.0.1:" << port;
- bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
- if ( masterSameProcess ) {
- if ( fromdb == todb && database->path == dbpath ) {
- // guard against an "infinite" loop
- /* if you are replicating, the local.sources config may be wrong if you get this */
- errmsg = "can't clone from self (localhost).";
- return false;
+ theDataFileMgr.insert(to_collection, (void*) js.objdata(), js.objsize());
+ if ( logForRepl )
+ logOp("i", to_collection, js);
}
}
- /* todo: we can put thesee releases inside dbclient or a dbclient specialization.
- or just wait until we get rid of global lock anyway.
- */
- string ns = fromdb + ".system.namespaces";
- auto_ptr<DBClientCursor> c;
- {
- dbtemprelease r;
- if ( !masterSameProcess ) {
- auto_ptr< DBClientConnection > c( new DBClientConnection() );
- if ( !c->connect( masterHost, errmsg ) )
+
+ bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk) {
+ string todb = database->name;
+ stringstream a,b;
+ a << "localhost:" << port;
+ b << "127.0.0.1:" << port;
+ bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
+ if ( masterSameProcess ) {
+ if ( fromdb == todb && database->path == dbpath ) {
+ // guard against an "infinite" loop
+ /* if you are replicating, the local.sources config may be wrong if you get this */
+ errmsg = "can't clone from self (localhost).";
return false;
- conn = c;
- } else {
- conn = auto_ptr< DBClientInterface >( new DBDirectClient() );
+ }
}
- c = conn->query( ns.c_str(), emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
- }
- if ( c.get() == 0 ) {
- errmsg = "query failed " + ns;
- return false;
- }
-
- while ( 1 ) {
+ /* todo: we can put thesee releases inside dbclient or a dbclient specialization.
+ or just wait until we get rid of global lock anyway.
+ */
+ string ns = fromdb + ".system.namespaces";
+ auto_ptr<DBClientCursor> c;
{
dbtemprelease r;
- if ( !c->more() )
- break;
- }
- BSONObj collection = c->next();
- BSONElement e = collection.findElement("name");
- if ( e.eoo() ) {
- string s = "bad system.namespaces object " + collection.toString();
-
- /* temp
- cout << masterHost << endl;
- cout << ns << endl;
- cout << e.toString() << endl;
- exit(1);*/
-
- massert(s.c_str(), false);
+ if ( !masterSameProcess ) {
+ auto_ptr< DBClientConnection > c( new DBClientConnection() );
+ if ( !c->connect( masterHost, errmsg ) )
+ return false;
+ conn = c;
+ } else {
+ conn = auto_ptr< DBClientInterface >( new DBDirectClient() );
+ }
+ c = conn->query( ns.c_str(), emptyObj, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
}
- assert( !e.eoo() );
- assert( e.type() == String );
- const char *from_name = e.valuestr();
- if ( strstr(from_name, ".system.") || strchr(from_name, '$') ) {
- continue;
+ if ( c.get() == 0 ) {
+ errmsg = "query failed " + ns;
+ return false;
}
- BSONObj options = collection.getObjectField("options");
- /* change name "<fromdb>.collection" -> <todb>.collection */
- const char *p = strchr(from_name, '.');
- assert(p);
- string to_name = todb + p;
+ while ( 1 ) {
+ {
+ dbtemprelease r;
+ if ( !c->more() )
+ break;
+ }
+ BSONObj collection = c->next();
+ BSONElement e = collection.findElement("name");
+ if ( e.eoo() ) {
+ string s = "bad system.namespaces object " + collection.toString();
- //if( !options.isEmpty() )
- {
- string err;
- const char *toname = to_name.c_str();
- userCreateNS(toname, options, err, logForRepl);
+ /* temp
+ out() << masterHost << endl;
+ out() << ns << endl;
+ out() << e.toString() << endl;
+ exit(1);*/
- /* chunks are big enough that we should create the _id index up front, that should
- be faster. perhaps we should do that for everything? Not doing that yet -- not sure
- how we want to handle _id-less collections, and we might not want to create the index
- there.
- */
- if ( strstr(toname, "._chunks") )
- ensureHaveIdIndex(toname);
- }
- copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk);
- }
+ massert(s.c_str(), false);
+ }
+ assert( !e.eoo() );
+ assert( e.type() == String );
+ const char *from_name = e.valuestr();
+ if ( strstr(from_name, ".system.") || strchr(from_name, '$') ) {
+ continue;
+ }
+ BSONObj options = collection.getObjectField("options");
- // now build the indexes
- string system_indexes_from = fromdb + ".system.indexes";
- string system_indexes_to = todb + ".system.indexes";
- copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk);
+ /* change name "<fromdb>.collection" -> <todb>.collection */
+ const char *p = strchr(from_name, '.');
+ assert(p);
+ string to_name = todb + p;
- return true;
-}
+ //if( !options.isEmpty() )
+ {
+ string err;
+ const char *toname = to_name.c_str();
+ userCreateNS(toname, options, err, logForRepl);
-bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk)
-{
- Cloner c;
- return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk);
-}
+ /* chunks are big enough that we should create the _id index up front, that should
+ be faster. perhaps we should do that for everything? Not doing that yet -- not sure
+ how we want to handle _id-less collections, and we might not want to create the index
+ there.
+ */
+ if ( strstr(toname, "._chunks") )
+ ensureHaveIdIndex(toname);
+ }
+ copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk);
+ }
-/* Usage:
- mydb.$cmd.findOne( { clone: "fromhost" } );
-*/
-class CmdClone : public Command {
-public:
- virtual bool slaveOk() {
- return false;
- }
- CmdClone() : Command("clone") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- string from = cmdObj.getStringField("clone");
- if ( from.empty() )
- return false;
- /* replication note: we must logOp() not the command, but the cloned data -- if the slave
- were to clone it would get a different point-in-time and not match.
- */
- return cloneFrom(from.c_str(), errmsg, database->name, /*logForReplication=*/!fromRepl, /*slaveok*/false);
- }
-} cmdclone;
+ // now build the indexes
+ string system_indexes_from = fromdb + ".system.indexes";
+ string system_indexes_to = todb + ".system.indexes";
+ copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk);
-/* Usage:
- admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db> } );
-*/
-class CmdCopyDb : public Command {
-public:
- CmdCopyDb() : Command("copydb") { }
- virtual bool adminOnly() {
return true;
}
- virtual bool slaveOk() {
- return false;
+
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk)
+ {
+ Cloner c;
+ return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk);
}
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- string fromhost = cmdObj.getStringField("fromhost");
- if ( fromhost.empty() ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << port;
- fromhost = ss.str();
+
+ /* Usage:
+ mydb.$cmd.findOne( { clone: "fromhost" } );
+ */
+ class CmdClone : public Command {
+ public:
+ virtual bool slaveOk() {
+ return false;
}
- string fromdb = cmdObj.getStringField("fromdb");
- string todb = cmdObj.getStringField("todb");
- if ( fromhost.empty() || todb.empty() || fromdb.empty() ) {
- errmsg = "parms missing - {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
+ CmdClone() : Command("clone") { }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string from = cmdObj.getStringField("clone");
+ if ( from.empty() )
+ return false;
+ /* replication note: we must logOp() not the command, but the cloned data -- if the slave
+ were to clone it would get a different point-in-time and not match.
+ */
+ return cloneFrom(from.c_str(), errmsg, database->name, /*logForReplication=*/!fromRepl, /*slaveok*/false);
+ }
+ } cmdclone;
+
+ /* Usage:
+ admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db> } );
+ */
+ class CmdCopyDb : public Command {
+ public:
+ CmdCopyDb() : Command("copydb") { }
+ virtual bool adminOnly() {
+ return true;
+ }
+ virtual bool slaveOk() {
return false;
}
- setClient(todb.c_str());
- bool res = cloneFrom(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, /*slaveok*/false);
- database = 0;
- return res;
- }
-} cmdcopydb;
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string fromhost = cmdObj.getStringField("fromhost");
+ if ( fromhost.empty() ) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << port;
+ fromhost = ss.str();
+ }
+ string fromdb = cmdObj.getStringField("fromdb");
+ string todb = cmdObj.getStringField("todb");
+ if ( fromhost.empty() || todb.empty() || fromdb.empty() ) {
+ errmsg = "parms missing - {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
+ return false;
+ }
+ setClient(todb.c_str());
+ bool res = cloneFrom(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, /*slaveok*/false);
+ database = 0;
+ return res;
+ }
+ } cmdcopydb;
} // namespace mongo
diff --git a/db/commands.cpp b/db/commands.cpp
index b61c149b947..42c7246286b 100644
--- a/db/commands.cpp
+++ b/db/commands.cpp
@@ -23,50 +23,50 @@
namespace mongo {
-map<string,Command*> *commands;
+ map<string,Command*> *commands;
-Command::Command(const char *_name) : name(_name) {
- // register ourself.
- if ( commands == 0 )
- commands = new map<string,Command*>;
- (*commands)[name] = this;
-}
+ Command::Command(const char *_name) : name(_name) {
+ // register ourself.
+ if ( commands == 0 )
+ commands = new map<string,Command*>;
+ (*commands)[name] = this;
+ }
-bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
- const char *p = strchr(ns, '.');
- if ( !p ) return false;
- if ( strcmp(p, ".$cmd") != 0 ) return false;
+ bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
- bool ok = false;
- bool valid = false;
+ bool ok = false;
+ bool valid = false;
- BSONElement e;
- e = jsobj.firstElement();
+ BSONElement e;
+ e = jsobj.firstElement();
- map<string,Command*>::iterator i;
+ map<string,Command*>::iterator i;
- if ( e.eoo() )
- ;
- /* check for properly registered command objects. Note that all the commands below should be
- migrated over to the command object format.
- */
- else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
- valid = true;
- string errmsg;
- Command *c = i->second;
- if ( c->adminOnly() && strncmp(ns, "admin", p-ns) != 0 ) {
- ok = false;
- errmsg = "access denied";
- }
- else {
- ok = c->run(ns, jsobj, errmsg, anObjBuilder, false);
+ if ( e.eoo() )
+ ;
+ /* check for properly registered command objects. Note that all the commands below should be
+ migrated over to the command object format.
+ */
+ else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
+ valid = true;
+ string errmsg;
+ Command *c = i->second;
+ if ( c->adminOnly() && strncmp(ns, "admin", p-ns) != 0 ) {
+ ok = false;
+ errmsg = "access denied";
+ }
+ else {
+ ok = c->run(ns, jsobj, errmsg, anObjBuilder, false);
+ }
+ if ( !ok )
+ anObjBuilder.append("errmsg", errmsg);
+ return true;
}
- if ( !ok )
- anObjBuilder.append("errmsg", errmsg);
- return true;
- }
- return false;
-}
+ return false;
+ }
} // namespace mongo
diff --git a/db/commands.h b/db/commands.h
index e0719a7e00d..47a49e75a71 100644
--- a/db/commands.h
+++ b/db/commands.h
@@ -19,47 +19,47 @@
namespace mongo {
-class BSONObj;
-class BSONObjBuilder;
+ class BSONObj;
+ class BSONObjBuilder;
// db "commands" (sent via db.$cmd.findOne(...))
// subclass to make a command.
-class Command {
-public:
- string name;
+ class Command {
+ public:
+ string name;
- /* run the given command
- implement this...
+ /* run the given command
+ implement this...
- fromRepl - command is being invoked as part of replication syncing. In this situation you
- normally do not want to log the command to the local oplog.
+ fromRepl - command is being invoked as part of replication syncing. In this situation you
+ normally do not want to log the command to the local oplog.
- return value is true if succeeded. if false, set errmsg text.
- */
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
+ return value is true if succeeded. if false, set errmsg text.
+ */
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) = 0;
- /* Return true if only the admin ns has privileges to run this command. */
- virtual bool adminOnly() {
- return false;
- }
+ /* Return true if only the admin ns has privileges to run this command. */
+ virtual bool adminOnly() {
+ return false;
+ }
- /* Return true if slaves of a replication pair are allowed to execute the command
- (the command directly from a client -- if fromRepl, always allowed).
- */
- virtual bool slaveOk() = 0;
+ /* Return true if slaves of a replication pair are allowed to execute the command
+ (the command directly from a client -- if fromRepl, always allowed).
+ */
+ virtual bool slaveOk() = 0;
- /* Override and return true to if true,log the operation (logOp()) to the replication log.
- (not done if fromRepl of course)
+ /* Override and return true to if true,log the operation (logOp()) to the replication log.
+ (not done if fromRepl of course)
- Note if run() returns false, we do NOT log.
- */
- virtual bool logTheOp() {
- return false;
- }
+ Note if run() returns false, we do NOT log.
+ */
+ virtual bool logTheOp() {
+ return false;
+ }
- Command(const char *_name);
-};
+ Command(const char *_name);
+ };
-bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder);
+ bool runCommandAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder);
} // namespace mongo
diff --git a/db/cursor.cpp b/db/cursor.cpp
index 3223195f412..2471cfb8973 100644
--- a/db/cursor.cpp
+++ b/db/cursor.cpp
@@ -18,120 +18,120 @@
namespace mongo {
-class Forward : public AdvanceStrategy {
- virtual DiskLoc next( const DiskLoc &prev ) const {
- return prev.rec()->getNext( prev );
- }
-} _forward;
+ class Forward : public AdvanceStrategy {
+ virtual DiskLoc next( const DiskLoc &prev ) const {
+ return prev.rec()->getNext( prev );
+ }
+ } _forward;
-class Reverse : public AdvanceStrategy {
- virtual DiskLoc next( const DiskLoc &prev ) const {
- return prev.rec()->getPrev( prev );
- }
-} _reverse;
+ class Reverse : public AdvanceStrategy {
+ virtual DiskLoc next( const DiskLoc &prev ) const {
+ return prev.rec()->getPrev( prev );
+ }
+ } _reverse;
-AdvanceStrategy *forward() {
- return &_forward;
-}
-AdvanceStrategy *reverse() {
- return &_reverse;
-}
+ AdvanceStrategy *forward() {
+ return &_forward;
+ }
+ AdvanceStrategy *reverse() {
+ return &_reverse;
+ }
-DiskLoc nextLoop( NamespaceDetails *nsd, const DiskLoc &prev ) {
- assert( nsd->capLooped() );
- DiskLoc next = forward()->next( prev );
- if ( !next.isNull() )
- return next;
- return nsd->firstRecord();
-}
+ DiskLoc nextLoop( NamespaceDetails *nsd, const DiskLoc &prev ) {
+ assert( nsd->capLooped() );
+ DiskLoc next = forward()->next( prev );
+ if ( !next.isNull() )
+ return next;
+ return nsd->firstRecord();
+ }
-DiskLoc prevLoop( NamespaceDetails *nsd, const DiskLoc &curr ) {
- assert( nsd->capLooped() );
- DiskLoc prev = reverse()->next( curr );
- if ( !prev.isNull() )
- return prev;
- return nsd->lastRecord();
-}
+ DiskLoc prevLoop( NamespaceDetails *nsd, const DiskLoc &curr ) {
+ assert( nsd->capLooped() );
+ DiskLoc prev = reverse()->next( curr );
+ if ( !prev.isNull() )
+ return prev;
+ return nsd->lastRecord();
+ }
-ForwardCappedCursor::ForwardCappedCursor( NamespaceDetails *_nsd ) :
- BasicCursor( DiskLoc(), this ),
- nsd( _nsd ) {
- if ( !nsd )
- return;
- DiskLoc start;
- if ( !nsd->capLooped() )
- start = nsd->firstRecord();
- else {
- start = nsd->capExtent.ext()->firstRecord;
- if ( !start.isNull() && start == nsd->capFirstNewRecord ) {
- start = nsd->capExtent.ext()->lastRecord;
- start = nextLoop( nsd, start );
+ ForwardCappedCursor::ForwardCappedCursor( NamespaceDetails *_nsd ) :
+ BasicCursor( DiskLoc(), this ),
+ nsd( _nsd ) {
+ if ( !nsd )
+ return;
+ DiskLoc start;
+ if ( !nsd->capLooped() )
+ start = nsd->firstRecord();
+ else {
+ start = nsd->capExtent.ext()->firstRecord;
+ if ( !start.isNull() && start == nsd->capFirstNewRecord ) {
+ start = nsd->capExtent.ext()->lastRecord;
+ start = nextLoop( nsd, start );
+ }
}
+ curr = start;
}
- curr = start;
-}
-DiskLoc ForwardCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
- if ( !nsd->capLooped() )
- return forward()->next( prev );
+ DiskLoc ForwardCappedCursor::next( const DiskLoc &prev ) const {
+ assert( nsd );
+ if ( !nsd->capLooped() )
+ return forward()->next( prev );
- DiskLoc i = prev;
- // Last record
- if ( i == nsd->capExtent.ext()->lastRecord )
- return DiskLoc();
- i = nextLoop( nsd, i );
- // If we become capFirstNewRecord from same extent, advance to next extent.
- if ( i == nsd->capFirstNewRecord &&
- i != nsd->capExtent.ext()->firstRecord )
- i = nextLoop( nsd, nsd->capExtent.ext()->lastRecord );
- // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
- if ( i == nsd->capExtent.ext()->firstRecord )
- i = nsd->capFirstNewRecord;
+ DiskLoc i = prev;
+ // Last record
+ if ( i == nsd->capExtent.ext()->lastRecord )
+ return DiskLoc();
+ i = nextLoop( nsd, i );
+ // If we become capFirstNewRecord from same extent, advance to next extent.
+ if ( i == nsd->capFirstNewRecord &&
+ i != nsd->capExtent.ext()->firstRecord )
+ i = nextLoop( nsd, nsd->capExtent.ext()->lastRecord );
+ // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
+ if ( i == nsd->capExtent.ext()->firstRecord )
+ i = nsd->capFirstNewRecord;
- return i;
-}
+ return i;
+ }
-ReverseCappedCursor::ReverseCappedCursor( NamespaceDetails *_nsd ) :
- BasicCursor( DiskLoc(), this ),
- nsd( _nsd ) {
- if ( !nsd )
- return;
- DiskLoc start;
- if ( !nsd->capLooped() )
- start = nsd->lastRecord();
- else
- start = nsd->capExtent.ext()->lastRecord;
- curr = start;
-}
+ ReverseCappedCursor::ReverseCappedCursor( NamespaceDetails *_nsd ) :
+ BasicCursor( DiskLoc(), this ),
+ nsd( _nsd ) {
+ if ( !nsd )
+ return;
+ DiskLoc start;
+ if ( !nsd->capLooped() )
+ start = nsd->lastRecord();
+ else
+ start = nsd->capExtent.ext()->lastRecord;
+ curr = start;
+ }
-DiskLoc ReverseCappedCursor::next( const DiskLoc &prev ) const {
- assert( nsd );
- if ( !nsd->capLooped() )
- return reverse()->next( prev );
+ DiskLoc ReverseCappedCursor::next( const DiskLoc &prev ) const {
+ assert( nsd );
+ if ( !nsd->capLooped() )
+ return reverse()->next( prev );
- DiskLoc i = prev;
- // Last record
- if ( nsd->capFirstNewRecord == nsd->capExtent.ext()->firstRecord ) {
- if ( i == nextLoop( nsd, nsd->capExtent.ext()->lastRecord ) ) {
- return DiskLoc();
+ DiskLoc i = prev;
+ // Last record
+ if ( nsd->capFirstNewRecord == nsd->capExtent.ext()->firstRecord ) {
+ if ( i == nextLoop( nsd, nsd->capExtent.ext()->lastRecord ) ) {
+ return DiskLoc();
+ }
+ } else {
+ if ( i == nsd->capExtent.ext()->firstRecord ) {
+ return DiskLoc();
+ }
}
- } else {
- if ( i == nsd->capExtent.ext()->firstRecord ) {
- return DiskLoc();
- }
- }
- // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
- if ( i == nsd->capFirstNewRecord )
- i = prevLoop( nsd, nsd->capExtent.ext()->firstRecord );
- else
- i = prevLoop( nsd, i );
- // If we just became last in cap extent, advance past capFirstNewRecord
- // (We know capExtent.ext()->firstRecord != capFirstNewRecord, since would
- // have returned DiskLoc() earlier otherwise.)
- if ( i == nsd->capExtent.ext()->lastRecord )
- i = reverse()->next( nsd->capFirstNewRecord );
+ // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
+ if ( i == nsd->capFirstNewRecord )
+ i = prevLoop( nsd, nsd->capExtent.ext()->firstRecord );
+ else
+ i = prevLoop( nsd, i );
+ // If we just became last in cap extent, advance past capFirstNewRecord
+ // (We know capExtent.ext()->firstRecord != capFirstNewRecord, since would
+ // have returned DiskLoc() earlier otherwise.)
+ if ( i == nsd->capExtent.ext()->lastRecord )
+ i = reverse()->next( nsd->capFirstNewRecord );
- return i;
-}
+ return i;
+ }
} // namespace mongo
diff --git a/db/cursor.h b/db/cursor.h
index cdbbfffc79b..a80a0db36cf 100644
--- a/db/cursor.h
+++ b/db/cursor.h
@@ -20,184 +20,184 @@
namespace mongo {
-/* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
- concept and is for the user's cursor.
-*/
-class Cursor {
-public:
- virtual bool ok() = 0;
- bool eof() {
- return !ok();
- }
- virtual Record* _current() = 0;
- virtual BSONObj current() = 0;
- virtual DiskLoc currLoc() = 0;
- virtual bool advance() = 0; /*true=ok*/
-
- /* Implement these if you want the cursor to be "tailable" */
- /* tailable(): if true, cursor has tailable capability AND
- the user requested use of those semantics. */
- virtual bool tailable() {
- return false;
- }
- /* indicates we should mark where we are and go into tail mode. */
- virtual void setAtTail() {
- assert(false);
- }
- /* you must call tailResume before reusing the cursor */
- virtual void tailResume() { }
- /* indicates ifi we are actively tailing. once it goes active,
- this should return treu even after tailResume(). */
- virtual bool tailing() {
- return false;
- }
-
- virtual void aboutToDeleteBucket(const DiskLoc& b) { }
-
- /* optional to implement. if implemented, means 'this' is a prototype */
- virtual Cursor* clone() {
- return 0;
- }
-
- virtual BSONObj indexKeyPattern() {
- return BSONObj();
- }
-
- /* called after every query block is iterated -- i.e. between getMore() blocks
- so you can note where we are, if necessary.
- */
- virtual void noteLocation() { }
-
- /* called before query getmore block is iterated */
- virtual void checkLocation() { }
-
- virtual string toString() {
- return "abstract?";
- }
-
- /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches() */
- set<DiskLoc> dups;
- bool getsetdup(DiskLoc loc) {
- /* to save mem only call this when there is risk of dups (e.g. when 'deep'/multikey) */
- if ( dups.count(loc) > 0 )
- return true;
- dups.insert(loc);
- return false;
- }
-};
-
-class AdvanceStrategy {
-public:
- virtual DiskLoc next( const DiskLoc &prev ) const = 0;
-};
-
-AdvanceStrategy *forward();
-AdvanceStrategy *reverse();
-
-/* table-scan style cursor */
-class BasicCursor : public Cursor {
-protected:
- DiskLoc curr, last;
- AdvanceStrategy *s;
-
-private:
- // for tailing:
- enum State { Normal, TailPoint, TailResumed } state;
- void init() {
- state = Normal;
- }
-
-public:
- bool ok() {
- return !curr.isNull();
- }
- Record* _current() {
- assert( ok() );
- return curr.rec();
- }
- BSONObj current() {
- Record *r = _current();
- BSONObj j(r);
- return j;
- }
- virtual DiskLoc currLoc() {
- return curr;
- }
-
- bool advance() {
- if ( eof() )
+ /* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
+ concept and is for the user's cursor.
+ */
+ class Cursor {
+ public:
+ virtual bool ok() = 0;
+ bool eof() {
+ return !ok();
+ }
+ virtual Record* _current() = 0;
+ virtual BSONObj current() = 0;
+ virtual DiskLoc currLoc() = 0;
+ virtual bool advance() = 0; /*true=ok*/
+
+ /* Implement these if you want the cursor to be "tailable" */
+ /* tailable(): if true, cursor has tailable capability AND
+ the user requested use of those semantics. */
+ virtual bool tailable() {
+ return false;
+ }
+ /* indicates we should mark where we are and go into tail mode. */
+ virtual void setAtTail() {
+ assert(false);
+ }
+ /* you must call tailResume before reusing the cursor */
+ virtual void tailResume() { }
+ /* indicates ifi we are actively tailing. once it goes active,
+ this should return treu even after tailResume(). */
+ virtual bool tailing() {
+ return false;
+ }
+
+ virtual void aboutToDeleteBucket(const DiskLoc& b) { }
+
+ /* optional to implement. if implemented, means 'this' is a prototype */
+ virtual Cursor* clone() {
+ return 0;
+ }
+
+ virtual BSONObj indexKeyPattern() {
+ return BSONObj();
+ }
+
+ /* called after every query block is iterated -- i.e. between getMore() blocks
+ so you can note where we are, if necessary.
+ */
+ virtual void noteLocation() { }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() { }
+
+ virtual string toString() {
+ return "abstract?";
+ }
+
+ /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches() */
+ set<DiskLoc> dups;
+ bool getsetdup(DiskLoc loc) {
+ /* to save mem only call this when there is risk of dups (e.g. when 'deep'/multikey) */
+ if ( dups.count(loc) > 0 )
+ return true;
+ dups.insert(loc);
return false;
- _current();
- last = curr;
- curr = s->next( curr );
- return ok();
- }
-
- BasicCursor(DiskLoc dl, AdvanceStrategy *_s = forward()) : curr(dl), s( _s ) {
- init();
- }
- BasicCursor(AdvanceStrategy *_s = forward()) : s( _s ) {
- init();
- }
- virtual string toString() {
- return "BasicCursor";
- }
-
- virtual void tailResume() {
- if ( state == TailPoint ) {
- state = TailResumed;
- advance();
- }
- }
- virtual void setAtTail() {
- assert( state != TailPoint );
- assert( curr.isNull() );
- assert( !last.isNull() );
- curr = last;
- last.Null();
- state = TailPoint;
- }
- virtual bool tailable() {
- // to go into tail mode we need a non-null point of reference for resumption
- return !last.isNull();
- }
- virtual bool tailing() {
- return state != Normal;
- }
-};
-
-/* used for order { $natural: -1 } */
-class ReverseCursor : public BasicCursor {
-public:
- ReverseCursor(DiskLoc dl) : BasicCursor( dl, reverse() ) { }
- ReverseCursor() : BasicCursor( reverse() ) { }
- virtual string toString() {
- return "ReverseCursor";
- }
-};
-
-class NamespaceDetails;
-
-class ForwardCappedCursor : public BasicCursor, public AdvanceStrategy {
-public:
- ForwardCappedCursor( NamespaceDetails *nsd = 0 );
- virtual string toString() {
- return "ForwardCappedCursor";
- }
- virtual DiskLoc next( const DiskLoc &prev ) const;
-private:
- NamespaceDetails *nsd;
-};
-
-class ReverseCappedCursor : public BasicCursor, public AdvanceStrategy {
-public:
- ReverseCappedCursor( NamespaceDetails *nsd = 0 );
- virtual string toString() {
- return "ReverseCappedCursor";
- }
- virtual DiskLoc next( const DiskLoc &prev ) const;
-private:
- NamespaceDetails *nsd;
-};
+ }
+ };
+
+ class AdvanceStrategy {
+ public:
+ virtual DiskLoc next( const DiskLoc &prev ) const = 0;
+ };
+
+ AdvanceStrategy *forward();
+ AdvanceStrategy *reverse();
+
+ /* table-scan style cursor */
+ class BasicCursor : public Cursor {
+ protected:
+ DiskLoc curr, last;
+ AdvanceStrategy *s;
+
+ private:
+ // for tailing:
+ enum State { Normal, TailPoint, TailResumed } state;
+ void init() {
+ state = Normal;
+ }
+
+ public:
+ bool ok() {
+ return !curr.isNull();
+ }
+ Record* _current() {
+ assert( ok() );
+ return curr.rec();
+ }
+ BSONObj current() {
+ Record *r = _current();
+ BSONObj j(r);
+ return j;
+ }
+ virtual DiskLoc currLoc() {
+ return curr;
+ }
+
+ bool advance() {
+ if ( eof() )
+ return false;
+ _current();
+ last = curr;
+ curr = s->next( curr );
+ return ok();
+ }
+
+ BasicCursor(DiskLoc dl, AdvanceStrategy *_s = forward()) : curr(dl), s( _s ) {
+ init();
+ }
+ BasicCursor(AdvanceStrategy *_s = forward()) : s( _s ) {
+ init();
+ }
+ virtual string toString() {
+ return "BasicCursor";
+ }
+
+ virtual void tailResume() {
+ if ( state == TailPoint ) {
+ state = TailResumed;
+ advance();
+ }
+ }
+ virtual void setAtTail() {
+ assert( state != TailPoint );
+ assert( curr.isNull() );
+ assert( !last.isNull() );
+ curr = last;
+ last.Null();
+ state = TailPoint;
+ }
+ virtual bool tailable() {
+ // to go into tail mode we need a non-null point of reference for resumption
+ return !last.isNull();
+ }
+ virtual bool tailing() {
+ return state != Normal;
+ }
+ };
+
+ /* used for order { $natural: -1 } */
+ class ReverseCursor : public BasicCursor {
+ public:
+ ReverseCursor(DiskLoc dl) : BasicCursor( dl, reverse() ) { }
+ ReverseCursor() : BasicCursor( reverse() ) { }
+ virtual string toString() {
+ return "ReverseCursor";
+ }
+ };
+
+ class NamespaceDetails;
+
+ class ForwardCappedCursor : public BasicCursor, public AdvanceStrategy {
+ public:
+ ForwardCappedCursor( NamespaceDetails *nsd = 0 );
+ virtual string toString() {
+ return "ForwardCappedCursor";
+ }
+ virtual DiskLoc next( const DiskLoc &prev ) const;
+ private:
+ NamespaceDetails *nsd;
+ };
+
+ class ReverseCappedCursor : public BasicCursor, public AdvanceStrategy {
+ public:
+ ReverseCappedCursor( NamespaceDetails *nsd = 0 );
+ virtual string toString() {
+ return "ReverseCappedCursor";
+ }
+ virtual DiskLoc next( const DiskLoc &prev ) const;
+ private:
+ NamespaceDetails *nsd;
+ };
} // namespace mongo
diff --git a/db/database.h b/db/database.h
index fdb6f2f3ea8..fd3080dc6bc 100644
--- a/db/database.h
+++ b/db/database.h
@@ -24,111 +24,111 @@
namespace mongo {
-class Database {
-public:
- Database(const char *nm, bool& justCreated, const char *_path = dbpath) :
- name(nm),
- path(_path)
- {
+ class Database {
+ public:
+ Database(const char *nm, bool& justCreated, const char *_path = dbpath) :
+ name(nm),
+ path(_path)
{
- int L = strlen(nm);
- uassert( "db name is empty", L > 0 );
- uassert( "bad db name [1]", *nm != '.' );
- uassert( "bad db name [2]", nm[L-1] != '.' );
- uassert( "bad char(s) in db name", strchr(nm, ' ') == 0 );
- uassert( "db name too long", L < 64 );
- }
+ {
+ int L = strlen(nm);
+ uassert( "db name is empty", L > 0 );
+ uassert( "bad db name [1]", *nm != '.' );
+ uassert( "bad db name [2]", nm[L-1] != '.' );
+ uassert( "bad char(s) in db name", strchr(nm, ' ') == 0 );
+ uassert( "db name too long", L < 64 );
+ }
- justCreated = namespaceIndex.init(_path, nm);
- profile = 0;
- profileName = name + ".system.profile";
- }
- ~Database() {
- int n = files.size();
- for ( int i = 0; i < n; i++ )
- delete files[i];
- }
-
- PhysicalDataFile* getFile( int n, int sizeNeeded = 0 ) {
- assert(this);
-
- if ( n < 0 || n >= DiskLoc::MaxFiles ) {
- cout << "getFile(): n=" << n << endl;
- assert( n >= 0 && n < DiskLoc::MaxFiles );
+ justCreated = namespaceIndex.init(_path, nm);
+ profile = 0;
+ profileName = name + ".system.profile";
}
- DEV {
- if ( n > 100 )
- cout << "getFile(): n=" << n << "?" << endl;
+ ~Database() {
+ int n = files.size();
+ for ( int i = 0; i < n; i++ )
+ delete files[i];
}
- while ( n >= (int) files.size() )
- files.push_back(0);
- PhysicalDataFile* p = files[n];
- if ( p == 0 ) {
- stringstream ss;
- ss << name << '.' << n;
- boost::filesystem::path fullName;
- fullName = boost::filesystem::path(path) / ss.str();
- string fullNameString = fullName.string();
- p = new PhysicalDataFile(n);
- int minSize = 0;
- if ( n != 0 && files[ n - 1 ] )
- minSize = files[ n - 1 ]->getHeader()->fileLength;
- if ( sizeNeeded + PDFHeader::headerSize() > minSize )
- minSize = sizeNeeded + PDFHeader::headerSize();
- try {
- p->open( fullNameString.c_str(), minSize );
+
+ PhysicalDataFile* getFile( int n, int sizeNeeded = 0 ) {
+ assert(this);
+
+ if ( n < 0 || n >= DiskLoc::MaxFiles ) {
+ out() << "getFile(): n=" << n << endl;
+ assert( n >= 0 && n < DiskLoc::MaxFiles );
}
- catch ( AssertionException& u ) {
- delete p;
- throw u;
+ DEV {
+ if ( n > 100 )
+ out() << "getFile(): n=" << n << "?" << endl;
+ }
+ while ( n >= (int) files.size() )
+ files.push_back(0);
+ PhysicalDataFile* p = files[n];
+ if ( p == 0 ) {
+ stringstream ss;
+ ss << name << '.' << n;
+ boost::filesystem::path fullName;
+ fullName = boost::filesystem::path(path) / ss.str();
+ string fullNameString = fullName.string();
+ p = new PhysicalDataFile(n);
+ int minSize = 0;
+ if ( n != 0 && files[ n - 1 ] )
+ minSize = files[ n - 1 ]->getHeader()->fileLength;
+ if ( sizeNeeded + PDFHeader::headerSize() > minSize )
+ minSize = sizeNeeded + PDFHeader::headerSize();
+ try {
+ p->open( fullNameString.c_str(), minSize );
+ }
+ catch ( AssertionException& u ) {
+ delete p;
+ throw u;
+ }
+ files[n] = p;
+ }
+ return p;
+ }
+
+ PhysicalDataFile* addAFile( int sizeNeeded = 0 ) {
+ int n = (int) files.size();
+ return getFile( n, sizeNeeded );
+ }
+
+ PhysicalDataFile* suitableFile( int sizeNeeded ) {
+ PhysicalDataFile* f = newestFile();
+ for ( int i = 0; i < 8; i++ ) {
+ if ( f->getHeader()->unusedLength >= sizeNeeded )
+ break;
+ f = addAFile( sizeNeeded );
+ if ( f->getHeader()->fileLength >= PhysicalDataFile::maxSize() ) // this is as big as they get so might as well stop
+ break;
}
- files[n] = p;
+ return f;
+ }
+
+ PhysicalDataFile* newestFile() {
+ int n = (int) files.size();
+ if ( n > 0 ) n--;
+ return getFile(n);
}
- return p;
- }
-
- PhysicalDataFile* addAFile( int sizeNeeded = 0 ) {
- int n = (int) files.size();
- return getFile( n, sizeNeeded );
- }
-
- PhysicalDataFile* suitableFile( int sizeNeeded ) {
- PhysicalDataFile* f = newestFile();
- for ( int i = 0; i < 8; i++ ) {
- if ( f->getHeader()->unusedLength >= sizeNeeded )
- break;
- f = addAFile( sizeNeeded );
- if ( f->getHeader()->fileLength >= PhysicalDataFile::maxSize() ) // this is as big as they get so might as well stop
- break;
+
+ void finishInit(); // ugly...
+
+ vector<PhysicalDataFile*> files;
+ string name; // "alleyinsider"
+ string path;
+ NamespaceIndex namespaceIndex;
+ int profile; // 0=off.
+ string profileName; // "alleyinsider.system.profile"
+ QueryOptimizer optimizer;
+
+ bool haveLogged() {
+ return _haveLogged;
}
- return f;
- }
-
- PhysicalDataFile* newestFile() {
- int n = (int) files.size();
- if ( n > 0 ) n--;
- return getFile(n);
- }
-
- void finishInit(); // ugly...
-
- vector<PhysicalDataFile*> files;
- string name; // "alleyinsider"
- string path;
- NamespaceIndex namespaceIndex;
- int profile; // 0=off.
- string profileName; // "alleyinsider.system.profile"
- QueryOptimizer optimizer;
-
- bool haveLogged() {
- return _haveLogged;
- }
- void setHaveLogged();
-
-private:
- // see dbinfo.h description. if true, we have logged to the replication log.
- bool _haveLogged;
-};
+ void setHaveLogged();
+
+ private:
+ // see dbinfo.h description. if true, we have logged to the replication log.
+ bool _haveLogged;
+ };
extern Database *database;
diff --git a/db/db.cpp b/db/db.cpp
index 47d66601806..503e22e9033 100644
--- a/db/db.cpp
+++ b/db/db.cpp
@@ -29,116 +29,116 @@
namespace mongo {
-extern bool objcheck, quiet, quota, verbose, cpu;
-bool useJNI = true;
-
-/* only off if --nocursors which is for debugging. */
-extern bool useCursors;
-
-extern int port;
-extern int curOp;
-extern string dashDashSource;
-extern int opLogging;
-extern long long oplogSize;
-extern OpLog _oplog;
-
-extern int ctr;
-extern int callDepth;
-
-void setupSignals();
-void closeAllSockets();
-void startReplication();
-void pairWith(const char *remoteEnd, const char *arb);
-
-struct MyStartupTests {
- MyStartupTests() {
- assert( sizeof(OID) == 12 );
- }
-} mystartupdbcpp;
-
-void quicktest() {
- cout << "quicktest()\n";
+ extern bool objcheck, quiet, quota, verbose, cpu;
+ bool useJNI = true;
+
+ /* only off if --nocursors which is for debugging. */
+ extern bool useCursors;
+
+ extern int port;
+ extern int curOp;
+ extern string dashDashSource;
+ extern int opLogging;
+ extern long long oplogSize;
+ extern OpLog _oplog;
+
+ extern int ctr;
+ extern int callDepth;
+
+ void setupSignals();
+ void closeAllSockets();
+ void startReplication();
+ void pairWith(const char *remoteEnd, const char *arb);
+
+ struct MyStartupTests {
+ MyStartupTests() {
+ assert( sizeof(OID) == 12 );
+ }
+ } mystartupdbcpp;
- MemoryMappedFile mmf;
- char *m = (char *) mmf.map("/tmp/quicktest", 16384);
- // cout << "mmf reads: " << m << endl;
- strcpy_s(m, 1000, "hello worldz");
-}
+ void quicktest() {
+ out() << "quicktest()\n";
-QueryResult* emptyMoreResult(long long);
+ MemoryMappedFile mmf;
+ char *m = (char *) mmf.map("/tmp/quicktest", 16384);
+ // out() << "mmf reads: " << m << endl;
+ strcpy_s(m, 1000, "hello worldz");
+ }
+ QueryResult* emptyMoreResult(long long);
-void testTheDb() {
- stringstream ss;
- setClient("sys.unittest.pdfile");
+ void testTheDb() {
+ stringstream ss;
- /* this is not validly formatted, if you query this namespace bad things will happen */
- theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
- theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
+ setClient("sys.unittest.pdfile");
- BSONObj j1((const char *) &js1);
- deleteObjects("sys.unittest.delete", j1, false);
- theDataFileMgr.insert("sys.unittest.delete", &js1, sizeof(js1));
- deleteObjects("sys.unittest.delete", j1, false);
- updateObjects("sys.unittest.delete", j1, j1, true,ss);
- updateObjects("sys.unittest.delete", j1, j1, false,ss);
+ /* this is not validly formatted, if you query this namespace bad things will happen */
+ theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
+ theDataFileMgr.insert("sys.unittest.pdfile", (void *) "hello worldx", 13);
- auto_ptr<Cursor> c = theDataFileMgr.findAll("sys.unittest.pdfile");
- while ( c->ok() ) {
- c->_current();
- c->advance();
- }
- cout << endl;
+ BSONObj j1((const char *) &js1);
+ deleteObjects("sys.unittest.delete", j1, false);
+ theDataFileMgr.insert("sys.unittest.delete", &js1, sizeof(js1));
+ deleteObjects("sys.unittest.delete", j1, false);
+ updateObjects("sys.unittest.delete", j1, j1, true,ss);
+ updateObjects("sys.unittest.delete", j1, j1, false,ss);
- database = 0;
-}
+ auto_ptr<Cursor> c = theDataFileMgr.findAll("sys.unittest.pdfile");
+ while ( c->ok() ) {
+ c->_current();
+ c->advance();
+ }
+ out() << endl;
-MessagingPort *grab = 0;
-void connThread();
-
-class OurListener : public Listener {
-public:
- OurListener(int p) : Listener(p) { }
- virtual void accepted(MessagingPort *mp) {
- assert( grab == 0 );
- grab = mp;
- boost::thread thr(connThread);
- while ( grab )
- sleepmillis(1);
+ database = 0;
}
-};
-void webServerThread();
-void pdfileInit();
-
-/* versions
- 114 bad memory bug fixed
- 115 replay, opLogging
-*/
-void listen(int port) {
- const char *Version = "db version 122";
- log() << Version << ", pdfile version " << VERSION << "." << VERSION_MINOR << endl;
- pdfileInit();
- //testTheDb();
- log() << "waiting for connections on port " << port << "..." << endl;
- OurListener l(port);
- startReplication();
- boost::thread thr(webServerThread);
- l.listen();
-}
-
-class JniMessagingPort : public AbstractMessagingPort {
-public:
- JniMessagingPort(Message& _container) : container(_container) { }
- void reply(Message& received, Message& response, MSGID) {
- container = response;
- }
- void reply(Message& received, Message& response) {
- container = response;
+ MessagingPort *grab = 0;
+ void connThread();
+
+ class OurListener : public Listener {
+ public:
+ OurListener(int p) : Listener(p) { }
+ virtual void accepted(MessagingPort *mp) {
+ assert( grab == 0 );
+ grab = mp;
+ boost::thread thr(connThread);
+ while ( grab )
+ sleepmillis(1);
+ }
+ };
+
+ void webServerThread();
+ void pdfileInit();
+
+ /* versions
+ 114 bad memory bug fixed
+ 115 replay, opLogging
+ */
+ void listen(int port) {
+ const char *Version = "db version 122";
+ log() << Version << ", pdfile version " << VERSION << "." << VERSION_MINOR << endl;
+ pdfileInit();
+ //testTheDb();
+ log() << "waiting for connections on port " << port << "..." << endl;
+ OurListener l(port);
+ startReplication();
+ boost::thread thr(webServerThread);
+ l.listen();
}
- Message & container;
-};
+
+ class JniMessagingPort : public AbstractMessagingPort {
+ public:
+ JniMessagingPort(Message& _container) : container(_container) { }
+ void reply(Message& received, Message& response, MSGID) {
+ container = response;
+ }
+ void reply(Message& received, Message& response) {
+ container = response;
+ }
+ Message & container;
+ };
} // namespace mongo
@@ -147,186 +147,186 @@ public:
namespace mongo {
-/* we create one thread for each connection from an app server database.
- app server will open a pool of threads.
-*/
-void connThread()
-{
- AuthenticationInfo *ai = new AuthenticationInfo();
- authInfo.reset(ai);
- LastError *le = new LastError();
- lastError.reset(le);
- try {
-
- MessagingPort& dbMsgPort = *grab;
- grab = 0;
-
- Message m;
- while ( 1 ) {
- m.reset();
-
- if ( !dbMsgPort.recv(m) ) {
- log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
- dbMsgPort.shutdown();
- break;
- }
+ /* we create one thread for each connection from an app server database.
+ app server will open a pool of threads.
+ */
+ void connThread()
+ {
+ AuthenticationInfo *ai = new AuthenticationInfo();
+ authInfo.reset(ai);
+ LastError *le = new LastError();
+ lastError.reset(le);
+ try {
+
+ MessagingPort& dbMsgPort = *grab;
+ grab = 0;
- le->nPrev++;
+ Message m;
+ while ( 1 ) {
+ m.reset();
- DbResponse dbresponse;
- if ( !assembleResponse( m, dbresponse ) ) {
- cout << curTimeMillis() % 10000 << " end msg " << dbMsgPort.farEnd.toString() << endl;
- if ( dbMsgPort.farEnd.isLocalHost() ) {
+ if ( !dbMsgPort.recv(m) ) {
+ log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
dbMsgPort.shutdown();
- sleepmillis(50);
- problem() << "exiting end msg" << endl;
- exit(EXIT_SUCCESS);
+ break;
}
- else {
- cout << " (not from localhost, ignoring end msg)" << endl;
+
+ le->nPrev++;
+
+ DbResponse dbresponse;
+ if ( !assembleResponse( m, dbresponse ) ) {
+ out() << curTimeMillis() % 10000 << " end msg " << dbMsgPort.farEnd.toString() << endl;
+ if ( dbMsgPort.farEnd.isLocalHost() ) {
+ dbMsgPort.shutdown();
+ sleepmillis(50);
+ problem() << "exiting end msg" << endl;
+ exit(EXIT_SUCCESS);
+ }
+ else {
+ out() << " (not from localhost, ignoring end msg)" << endl;
+ }
}
+
+ if ( dbresponse.response )
+ dbMsgPort.reply(m, *dbresponse.response, dbresponse.responseTo);
}
- if ( dbresponse.response )
- dbMsgPort.reply(m, *dbresponse.response, dbresponse.responseTo);
}
-
- }
- catch ( AssertionException& ) {
- problem() << "Uncaught AssertionException, terminating" << endl;
- exit(15);
- }
- catch ( std::exception &e ) {
- problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
- exit( 15 );
- }
- catch ( ... ) {
- problem() << "Uncaught exception, terminating" << endl;
- exit( 15 );
+ catch ( AssertionException& ) {
+ problem() << "Uncaught AssertionException, terminating" << endl;
+ exit(15);
+ }
+ catch ( std::exception &e ) {
+ problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
+ exit( 15 );
+ }
+ catch ( ... ) {
+ problem() << "Uncaught exception, terminating" << endl;
+ exit( 15 );
+ }
}
-}
-void msg(const char *m, const char *address, int port, int extras = 0) {
+ void msg(const char *m, const char *address, int port, int extras = 0) {
- SockAddr db(address, port);
+ SockAddr db(address, port);
// SockAddr db("127.0.0.1", DBPort);
// SockAddr db("192.168.37.1", MessagingPort::DBPort);
// SockAddr db("10.0.21.60", MessagingPort::DBPort);
// SockAddr db("172.16.0.179", MessagingPort::DBPort);
- MessagingPort p;
- if ( !p.connect(db) )
- return;
-
- const int Loops = 1;
- for ( int q = 0; q < Loops; q++ ) {
- Message send;
- Message response;
-
- send.setData( dbMsg , m);
- int len = send.data->dataLen();
-
- for ( int i = 0; i < extras; i++ )
- p.say(/*db, */send);
-
- Timer t;
- bool ok = p.call(send, response);
- double tm = t.micros() + 1;
- cout << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms " <<
- ((double) len) * 8 / 1000000 / (tm/1000000) << "Mbps" << endl;
- if ( q+1 < Loops ) {
- cout << "\t\tSLEEP 8 then sending again as a test" << endl;
- sleepsecs(8);
+ MessagingPort p;
+ if ( !p.connect(db) )
+ return;
+
+ const int Loops = 1;
+ for ( int q = 0; q < Loops; q++ ) {
+ Message send;
+ Message response;
+
+ send.setData( dbMsg , m);
+ int len = send.data->dataLen();
+
+ for ( int i = 0; i < extras; i++ )
+ p.say(/*db, */send);
+
+ Timer t;
+ bool ok = p.call(send, response);
+ double tm = t.micros() + 1;
+ out() << " ****ok. response.data:" << ok << " time:" << tm / 1000.0 << "ms " <<
+ ((double) len) * 8 / 1000000 / (tm/1000000) << "Mbps" << endl;
+ if ( q+1 < Loops ) {
+ out() << "\t\tSLEEP 8 then sending again as a test" << endl;
+ sleepsecs(8);
+ }
}
- }
- sleepsecs(1);
+ sleepsecs(1);
- p.shutdown();
-}
+ p.shutdown();
+ }
-void msg(const char *m, int extras = 0) {
- msg(m, "127.0.0.1", DBPort, extras);
-}
+ void msg(const char *m, int extras = 0) {
+ msg(m, "127.0.0.1", DBPort, extras);
+ }
-void repairDatabases() {
- dblock lk;
- vector< string > dbNames;
- getDatabaseNames( dbNames );
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
- string dbName = *i;
- assert( !setClientTempNs( dbName.c_str() ) );
- PhysicalDataFile *p = database->getFile( 0 );
- PDFHeader *h = p->getHeader();
- if ( !h->currentVersion() ) {
- // QUESTION: Repair even if file format is higher version than code?
- log() << "repairing database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", "
- << "new version: " << VERSION << "." << VERSION_MINOR << endl;
- string errmsg;
- assert( repairDatabase( dbName.c_str(), errmsg ) );
- } else {
- closeClient( dbName.c_str() );
+ void repairDatabases() {
+ dblock lk;
+ vector< string > dbNames;
+ getDatabaseNames( dbNames );
+ for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
+ string dbName = *i;
+ assert( !setClientTempNs( dbName.c_str() ) );
+ PhysicalDataFile *p = database->getFile( 0 );
+ PDFHeader *h = p->getHeader();
+ if ( !h->currentVersion() ) {
+ // QUESTION: Repair even if file format is higher version than code?
+ log() << "repairing database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", "
+ << "new version: " << VERSION << "." << VERSION_MINOR << endl;
+ string errmsg;
+ assert( repairDatabase( dbName.c_str(), errmsg ) );
+ } else {
+ closeClient( dbName.c_str() );
+ }
}
}
-}
-void clearTmpFiles() {
- boost::filesystem::path path( dbpath );
- for ( boost::filesystem::directory_iterator i( path );
- i != boost::filesystem::directory_iterator(); ++i ) {
- string fileName = i->leaf();
- if ( boost::filesystem::is_directory( *i ) &&
- fileName.length() > 2 && fileName.substr( 0, 3 ) == "tmp" )
- boost::filesystem::remove_all( *i );
+ void clearTmpFiles() {
+ boost::filesystem::path path( dbpath );
+ for ( boost::filesystem::directory_iterator i( path );
+ i != boost::filesystem::directory_iterator(); ++i ) {
+ string fileName = i->leaf();
+ if ( boost::filesystem::is_directory( *i ) &&
+ fileName.length() > 2 && fileName.substr( 0, 3 ) == "tmp" )
+ boost::filesystem::remove_all( *i );
+ }
}
-}
-Timer startupSrandTimer;
+ Timer startupSrandTimer;
-void segvhandler(int x);
-void initAndListen(int listenPort, const char *appserverLoc = null) {
- clearTmpFiles();
+ void segvhandler(int x);
+ void initAndListen(int listenPort, const char *appserverLoc = null) {
+ clearTmpFiles();
- if ( opLogging )
- log() << "opLogging = " << opLogging << endl;
- _oplog.init();
+ if ( opLogging )
+ log() << "opLogging = " << opLogging << endl;
+ _oplog.init();
#if !defined(_WIN32)
- assert( signal(SIGSEGV, segvhandler) != SIG_ERR );
+ assert( signal(SIGSEGV, segvhandler) != SIG_ERR );
#endif
#if !defined(_WIN32)
- pid_t pid = 0;
- pid = getpid();
+ pid_t pid = 0;
+ pid = getpid();
#else
- int pid=0;
+ int pid=0;
#endif
- log() << "Mongo DB : starting : pid = " << pid << " port = " << port << " dbpath = " << dbpath
- << " master = " << master << " slave = " << slave << endl;
+ log() << "Mongo DB : starting : pid = " << pid << " port = " << port << " dbpath = " << dbpath
+ << " master = " << master << " slave = " << slave << endl;
#if !defined(NOJNI)
- if ( useJNI ) {
- JavaJS = new JavaJSImpl(appserverLoc);
- javajstest();
- }
+ if ( useJNI ) {
+ JavaJS = new JavaJSImpl(appserverLoc);
+ javajstest();
+ }
#endif
- setupSignals();
+ setupSignals();
- repairDatabases();
+ repairDatabases();
- /* this is for security on certain platforms */
- srand(curTimeMillis() ^ startupSrandTimer.micros());
+ /* this is for security on certain platforms */
+ srand(curTimeMillis() ^ startupSrandTimer.micros());
- listen(listenPort);
-}
+ listen(listenPort);
+ }
//ofstream problems("dbproblems.log", ios_base::app | ios_base::out);
-int test2();
-void testClient();
-void pipeSigHandler( int signal );
+ int test2();
+ void testClient();
+ void pipeSigHandler( int signal );
} // namespace mongo
@@ -341,12 +341,12 @@ int main(int argc, char* argv[], char *envp[] )
unsigned x = 0x12345678;
unsigned char& b = (unsigned char&) x;
if ( b != 0x78 ) {
- cout << "big endian cpus not yet supported" << endl;
+ out() << "big endian cpus not yet supported" << endl;
return 33;
}
}
- DEV cout << "warning: DEV mode enabled\n";
+ DEV out() << "warning: DEV mode enabled\n";
#if !defined(_WIN32)
signal(SIGPIPE, pipeSigHandler);
@@ -363,7 +363,7 @@ int main(int argc, char* argv[], char *envp[] )
JavaJS = new JavaJSImpl();
javajstest();
#else
- cout << "NOJNI build cannot test" << endl;
+ out() << "NOJNI build cannot test" << endl;
#endif
return 0;
}
@@ -467,7 +467,7 @@ int main(int argc, char* argv[], char *envp[] )
else if ( strncmp(s.c_str(), "--oplog", 7) == 0 ) {
int x = s[7] - '0';
if ( x < 0 || x > 7 ) {
- cout << "can't interpret --oplog setting" << endl;
+ out() << "can't interpret --oplog setting" << endl;
exit(13);
}
opLogging = x;
@@ -480,50 +480,50 @@ int main(int argc, char* argv[], char *envp[] )
}
usage:
- cout << "Mongo db ";
+ out() << "Mongo db ";
#if defined(NOJNI)
- cout << "[nojni build] ";
+ out() << "[nojni build] ";
#endif
- cout << "usage:\n";
- cout << " run run db" << endl;
- cout << " msg end [port] shut down db server listening on port (or default)" << endl;
- cout << " msg [msg] [port] send a request to the db server listening on port (or default)" << endl;
- cout << " msglots send a bunch of test messages, and then wait for answer on the last one" << endl;
- cout << " longmsg send a long test message to the db server" << endl;
- cout << " quicktest just check basic assertions and exit" << endl;
- cout << " test2 run test2() - see code" << endl;
- cout << "\nOptions:\n";
- cout << " --help show this usage information\n";
- cout << " --port <portno> specify port number, default is 27017\n";
- cout << " --dbpath <root> directory for datafiles, default is /data/db/\n";
- cout << " --quiet quieter output\n";
- cout << " --cpu show cpu+iowait utilization periodically\n";
- cout << " --verbose\n";
- cout << " --objcheck inspect client data for validity on receipt\n";
- cout << " --quota enable db quota management\n";
- cout << " --appsrvpath <path> root directory for the babble app server\n";
- cout << " --nocursors diagnostic/debugging option\n";
- cout << " --nojni" << endl;
- cout << " --oplog<n> 0=off 1=W 2=R 3=both 7=W+some reads" << endl;
- cout << " --oplogSize <size> custom size for operation log" << endl;
- cout << "\nReplication:" << endl;
- cout << " --master\n";
- cout << " --slave" << endl;
- cout << " --source <server:port>" << endl;
- cout << " --pairwith <server:port> <arbiter>" << endl;
- cout << endl;
+ out() << "usage:\n";
+ out() << " run run db" << endl;
+ out() << " msg end [port] shut down db server listening on port (or default)" << endl;
+ out() << " msg [msg] [port] send a request to the db server listening on port (or default)" << endl;
+ out() << " msglots send a bunch of test messages, and then wait for answer on the last one" << endl;
+ out() << " longmsg send a long test message to the db server" << endl;
+ out() << " quicktest just check basic assertions and exit" << endl;
+ out() << " test2 run test2() - see code" << endl;
+ out() << "\nOptions:\n";
+ out() << " --help show this usage information\n";
+ out() << " --port <portno> specify port number, default is 27017\n";
+ out() << " --dbpath <root> directory for datafiles, default is /data/db/\n";
+ out() << " --quiet quieter output\n";
+ out() << " --cpu show cpu+iowait utilization periodically\n";
+ out() << " --verbose\n";
+ out() << " --objcheck inspect client data for validity on receipt\n";
+ out() << " --quota enable db quota management\n";
+ out() << " --appsrvpath <path> root directory for the babble app server\n";
+ out() << " --nocursors diagnostic/debugging option\n";
+ out() << " --nojni" << endl;
+ out() << " --oplog<n> 0=off 1=W 2=R 3=both 7=W+some reads" << endl;
+ out() << " --oplogSize <size> custom size for operation log" << endl;
+ out() << "\nReplication:" << endl;
+ out() << " --master\n";
+ out() << " --slave" << endl;
+ out() << " --source <server:port>" << endl;
+ out() << " --pairwith <server:port> <arbiter>" << endl;
+ out() << endl;
return 0;
}
namespace mongo {
-/* we do not use log() below as it uses a mutex and that could cause deadlocks.
-*/
+ /* we do not use log() below as it uses a mutex and that could cause deadlocks.
+ */
-string getDbContext();
+ string getDbContext();
-#undef cout
+#undef out()
#if !defined(_WIN32)
@@ -533,45 +533,45 @@ string getDbContext();
namespace mongo {
-void pipeSigHandler( int signal ) {
- psignal( signal, "Signal Received : ");
-}
+ void pipeSigHandler( int signal ) {
+ psignal( signal, "Signal Received : ");
+ }
-int segvs = 0;
-void segvhandler(int x) {
- if ( ++segvs > 1 ) {
- signal(x, SIG_DFL);
- if ( segvs == 2 ) {
- cout << "\n\n\n got 2nd SIGSEGV" << endl;
- sayDbContext();
+ int segvs = 0;
+ void segvhandler(int x) {
+ if ( ++segvs > 1 ) {
+ signal(x, SIG_DFL);
+ if ( segvs == 2 ) {
+ out() << "\n\n\n got 2nd SIGSEGV" << endl;
+ sayDbContext();
+ }
+ return;
}
- return;
- }
- cout << "got SIGSEGV " << x << ", terminating :-(" << endl;
- sayDbContext();
+ out() << "got SIGSEGV " << x << ", terminating :-(" << endl;
+ sayDbContext();
// closeAllSockets();
// MemoryMappedFile::closeAllFiles();
// flushOpLog();
- dbexit(14);
-}
+ dbexit(14);
+ }
-void mysighandler(int x) {
- signal(x, SIG_IGN);
- cout << "got kill or ctrl c signal " << x << ", will terminate after current cmd ends" << endl;
- {
- dblock lk;
- log() << "now exiting" << endl;
- exit(12);
+ void mysighandler(int x) {
+ signal(x, SIG_IGN);
+ out() << "got kill or ctrl c signal " << x << ", will terminate after current cmd ends" << endl;
+ {
+ dblock lk;
+ log() << "now exiting" << endl;
+ exit(12);
+ }
}
-}
-void setupSignals() {
- assert( signal(SIGINT, mysighandler) != SIG_ERR );
- assert( signal(SIGTERM, mysighandler) != SIG_ERR );
-}
+ void setupSignals() {
+ assert( signal(SIGINT, mysighandler) != SIG_ERR );
+ assert( signal(SIGTERM, mysighandler) != SIG_ERR );
+ }
#else
-void setupSignals() {}
+ void setupSignals() {}
#endif
} // namespace mongo
diff --git a/db/db.h b/db/db.h
index 72c5bc8a4c6..cc292112537 100644
--- a/db/db.h
+++ b/db/db.h
@@ -21,57 +21,57 @@
namespace mongo {
-void jniCallback(Message& m, Message& out);
+ void jniCallback(Message& m, Message& out);
-class MutexInfo {
- unsigned long long start, enter, timeLocked; // all in microseconds
- int locked;
+ class MutexInfo {
+ unsigned long long start, enter, timeLocked; // all in microseconds
+ int locked;
-public:
- MutexInfo() : locked(0) {
- start = curTimeMicros64();
- }
- void entered() {
- enter = curTimeMicros64();
- locked++;
- assert( locked == 1 );
- }
- void leaving() {
- locked--;
- assert( locked == 0 );
- timeLocked += curTimeMicros64() - enter;
- }
- int isLocked() const {
- return locked;
- }
- void timingInfo(unsigned long long &s, unsigned long long &tl) {
- s = start;
- tl = timeLocked;
- }
-};
+ public:
+ MutexInfo() : locked(0) {
+ start = curTimeMicros64();
+ }
+ void entered() {
+ enter = curTimeMicros64();
+ locked++;
+ assert( locked == 1 );
+ }
+ void leaving() {
+ locked--;
+ assert( locked == 0 );
+ timeLocked += curTimeMicros64() - enter;
+ }
+ int isLocked() const {
+ return locked;
+ }
+ void timingInfo(unsigned long long &s, unsigned long long &tl) {
+ s = start;
+ tl = timeLocked;
+ }
+ };
-extern boost::mutex dbMutex;
-extern MutexInfo dbMutexInfo;
+ extern boost::mutex dbMutex;
+ extern MutexInfo dbMutexInfo;
//extern int dbLocked;
-struct lock {
- boostlock bl_;
- MutexInfo& info_;
- lock( boost::mutex &mutex, MutexInfo &info ) :
- bl_( mutex ),
- info_( info ) {
- info_.entered();
- }
- ~lock() {
- info_.leaving();
- }
-};
+ struct lock {
+ boostlock bl_;
+ MutexInfo& info_;
+ lock( boost::mutex &mutex, MutexInfo &info ) :
+ bl_( mutex ),
+ info_( info ) {
+ info_.entered();
+ }
+ ~lock() {
+ info_.leaving();
+ }
+ };
-struct dblock : public lock {
- dblock() :
- lock( dbMutex, dbMutexInfo ) {
- }
-};
+ struct dblock : public lock {
+ dblock() :
+ lock( dbMutex, dbMutexInfo ) {
+ }
+ };
} // namespace mongo
@@ -79,25 +79,25 @@ struct dblock : public lock {
namespace mongo {
-/* a scoped release of a mutex temporarily -- like a scopedlock but reversed.
-*/
-struct temprelease {
- boost::mutex& m;
- temprelease(boost::mutex& _m) : m(_m) {
+ /* a scoped release of a mutex temporarily -- like a scopedlock but reversed.
+ */
+ struct temprelease {
+ boost::mutex& m;
+ temprelease(boost::mutex& _m) : m(_m) {
#if BOOST_VERSION >= 103500
- m.unlock();
+ m.unlock();
#else
- boost::detail::thread::lock_ops<boost::mutex>::unlock(m);
+ boost::detail::thread::lock_ops<boost::mutex>::unlock(m);
#endif
- }
- ~temprelease() {
+ }
+ ~temprelease() {
#if BOOST_VERSION >= 103500
- m.lock();
+ m.lock();
#else
- boost::detail::thread::lock_ops<boost::mutex>::lock(m);
+ boost::detail::thread::lock_ops<boost::mutex>::lock(m);
#endif
- }
-};
+ }
+ };
} // namespace mongo
@@ -106,99 +106,99 @@ struct temprelease {
namespace mongo {
// tempish...move to TLS or pass all the way down as a parm
-extern map<string,Database*> databases;
-extern Database *database;
-extern const char *curNs;
-extern bool master;
-
-inline string getKey( const char *ns, const char *path ) {
- char cl[256];
- nsToClient(ns, cl);
- return string( cl ) + ":" + path;
-}
-
-/* returns true if the database ("database") did not exist, and it was created on this call */
-inline bool setClient(const char *ns, const char *path=dbpath) {
- /* we must be in critical section at this point as these are global
- variables.
- */
- assert( dbMutexInfo.isLocked() );
-
- curNs = ns;
- string key = getKey( ns, path );
- map<string,Database*>::iterator it = databases.find(key);
- if ( it != databases.end() ) {
- database = it->second;
- return false;
+ extern map<string,Database*> databases;
+ extern Database *database;
+ extern const char *curNs;
+ extern bool master;
+
+ inline string getKey( const char *ns, const char *path ) {
+ char cl[256];
+ nsToClient(ns, cl);
+ return string( cl ) + ":" + path;
}
- // when master for replication, we advertise all the db's, and that
- // looks like a 'first operation'. so that breaks this log message's
- // meaningfulness. instead of fixing (which would be better), we just
- // stop showing for now.
- // 2008-12-22 We now open every database on startup, so this log is
- // no longer helpful. Commenting.
+ /* returns true if the database ("database") did not exist, and it was created on this call */
+ inline bool setClient(const char *ns, const char *path=dbpath) {
+ /* we must be in critical section at this point as these are global
+ variables.
+ */
+ assert( dbMutexInfo.isLocked() );
+
+ curNs = ns;
+ string key = getKey( ns, path );
+ map<string,Database*>::iterator it = databases.find(key);
+ if ( it != databases.end() ) {
+ database = it->second;
+ return false;
+ }
+
+ // when master for replication, we advertise all the db's, and that
+ // looks like a 'first operation'. so that breaks this log message's
+ // meaningfulness. instead of fixing (which would be better), we just
+ // stop showing for now.
+ // 2008-12-22 We now open every database on startup, so this log is
+ // no longer helpful. Commenting.
// if( !master )
// log() << "first operation for database " << key << endl;
- char cl[256];
- nsToClient(ns, cl);
- bool justCreated;
- Database *c = new Database(cl, justCreated, path);
- databases[key] = c;
- database = c;
- database->finishInit();
+ char cl[256];
+ nsToClient(ns, cl);
+ bool justCreated;
+ Database *c = new Database(cl, justCreated, path);
+ databases[key] = c;
+ database = c;
+ database->finishInit();
- return justCreated;
-}
+ return justCreated;
+ }
// shared functionality for removing references to a database from this program instance
// does not delete the files on disk
-void closeClient( const char *cl, const char *path = dbpath );
+ void closeClient( const char *cl, const char *path = dbpath );
+
+ inline void eraseDatabase( const char *ns, const char *path=dbpath ) {
+ string key = getKey( ns, path );
+ databases.erase( key );
+ }
-inline void eraseDatabase( const char *ns, const char *path=dbpath ) {
- string key = getKey( ns, path );
- databases.erase( key );
-}
+ /* We normally keep around a curNs ptr -- if this ns is temporary,
+ use this instead so we don't have a bad ptr. we could have made a copy,
+ but trying to be fast as we call setClient this for every single operation.
+ */
+ inline bool setClientTempNs(const char *ns) {
+ bool jc = setClient(ns);
+ curNs = "";
+ return jc;
+ }
-/* We normally keep around a curNs ptr -- if this ns is temporary,
- use this instead so we don't have a bad ptr. we could have made a copy,
- but trying to be fast as we call setClient this for every single operation.
-*/
-inline bool setClientTempNs(const char *ns) {
- bool jc = setClient(ns);
- curNs = "";
- return jc;
-}
-
-struct dbtemprelease {
- string clientname;
- string clientpath;
- dbtemprelease() {
- if ( database ) {
- clientname = database->name;
- clientpath = database->path;
- }
- dbMutexInfo.leaving();
+ struct dbtemprelease {
+ string clientname;
+ string clientpath;
+ dbtemprelease() {
+ if ( database ) {
+ clientname = database->name;
+ clientpath = database->path;
+ }
+ dbMutexInfo.leaving();
#if BOOST_VERSION >= 103500
- dbMutex.unlock();
+ dbMutex.unlock();
#else
- boost::detail::thread::lock_ops<boost::mutex>::unlock(dbMutex);
+ boost::detail::thread::lock_ops<boost::mutex>::unlock(dbMutex);
#endif
- }
- ~dbtemprelease() {
+ }
+ ~dbtemprelease() {
#if BOOST_VERSION >= 103500
- dbMutex.lock();
+ dbMutex.lock();
#else
- boost::detail::thread::lock_ops<boost::mutex>::lock(dbMutex);
+ boost::detail::thread::lock_ops<boost::mutex>::lock(dbMutex);
#endif
- dbMutexInfo.entered();
- if ( clientname.empty() )
- database = 0;
- else
- setClient(clientname.c_str(), clientpath.c_str());
- }
-};
+ dbMutexInfo.entered();
+ if ( clientname.empty() )
+ database = 0;
+ else
+ setClient(clientname.c_str(), clientpath.c_str());
+ }
+ };
} // namespace mongo
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index a466db6256e..7e6504b4669 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -34,703 +34,703 @@
namespace mongo {
-extern bool quiet;
-extern int queryTraceLevel;
-extern int otherTraceLevel;
-extern int opLogging;
-void flushOpLog();
-int runCount(const char *ns, BSONObj& cmd, string& err);
-
-void clean(const char *ns, NamespaceDetails *d) {
- for ( int i = 0; i < Buckets; i++ )
- d->deletedList[i].Null();
-}
-
-string validateNS(const char *ns, NamespaceDetails *d) {
- bool valid = true;
- stringstream ss;
- ss << "\nvalidate\n";
- ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
- if ( d->capped )
- ss << " capped:" << d->capped << " max:" << d->max << '\n';
-
- ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->ns.buf << '\n';
- ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->ns.buf << '\n';
- try {
- d->firstExtent.ext()->assertOk();
- d->lastExtent.ext()->assertOk();
- } catch (...) {
- valid=false;
- ss << " extent asserted ";
- }
-
- ss << " datasize?:" << d->datasize << " nrecords?:" << d->nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
- ss << " padding:" << d->paddingFactor << '\n';
- try {
+ extern bool quiet;
+ extern int queryTraceLevel;
+ extern int otherTraceLevel;
+ extern int opLogging;
+ void flushOpLog();
+ int runCount(const char *ns, BSONObj& cmd, string& err);
+
+ void clean(const char *ns, NamespaceDetails *d) {
+ for ( int i = 0; i < Buckets; i++ )
+ d->deletedList[i].Null();
+ }
+
+ string validateNS(const char *ns, NamespaceDetails *d) {
+ bool valid = true;
+ stringstream ss;
+ ss << "\nvalidate\n";
+ ss << " details: " << hex << d << " ofs:" << nsindex(ns)->detailsOffset(d) << dec << endl;
+ if ( d->capped )
+ ss << " capped:" << d->capped << " max:" << d->max << '\n';
+
+ ss << " firstExtent:" << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->ns.buf << '\n';
+ ss << " lastExtent:" << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->ns.buf << '\n';
+ try {
+ d->firstExtent.ext()->assertOk();
+ d->lastExtent.ext()->assertOk();
+ } catch (...) {
+ valid=false;
+ ss << " extent asserted ";
+ }
+ ss << " datasize?:" << d->datasize << " nrecords?:" << d->nrecords << " lastExtentSize:" << d->lastExtentSize << '\n';
+ ss << " padding:" << d->paddingFactor << '\n';
try {
- ss << " first extent:\n";
- d->firstExtent.ext()->dump(ss);
- valid = valid && d->firstExtent.ext()->validates();
- }
- catch (...) {
- ss << "\n exception firstextent\n" << endl;
- }
-
- auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- int n = 0;
- long long len = 0;
- long long nlen = 0;
- set<DiskLoc> recs;
- int outOfOrder = 0;
- DiskLoc cl_last;
- while ( c->ok() ) {
- n++;
-
- DiskLoc cl = c->currLoc();
- if ( n < 1000000 )
- recs.insert(cl);
+
+ try {
+ ss << " first extent:\n";
+ d->firstExtent.ext()->dump(ss);
+ valid = valid && d->firstExtent.ext()->validates();
+ }
+ catch (...) {
+ ss << "\n exception firstextent\n" << endl;
+ }
+
+ auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ int n = 0;
+ long long len = 0;
+ long long nlen = 0;
+ set<DiskLoc> recs;
+ int outOfOrder = 0;
+ DiskLoc cl_last;
+ while ( c->ok() ) {
+ n++;
+
+ DiskLoc cl = c->currLoc();
+ if ( n < 1000000 )
+ recs.insert(cl);
+ if ( d->capped ) {
+ if ( cl < cl_last )
+ outOfOrder++;
+ cl_last = cl;
+ }
+
+ Record *r = c->_current();
+ len += r->lengthWithHeaders;
+ nlen += r->netLength();
+ c->advance();
+ }
if ( d->capped ) {
- if ( cl < cl_last )
- outOfOrder++;
- cl_last = cl;
+ ss << " capped outOfOrder:" << outOfOrder;
+ if ( outOfOrder > 1 ) {
+ valid = false;
+ ss << " ???";
+ }
+ else ss << " (OK)";
+ ss << '\n';
}
+ ss << " " << n << " objects found, nobj:" << d->nrecords << "\n";
+ ss << " " << len << " bytes data w/headers\n";
+ ss << " " << nlen << " bytes data wout/headers\n";
- Record *r = c->_current();
- len += r->lengthWithHeaders;
- nlen += r->netLength();
- c->advance();
- }
- if ( d->capped ) {
- ss << " capped outOfOrder:" << outOfOrder;
- if ( outOfOrder > 1 ) {
- valid = false;
- ss << " ???";
+ ss << " deletedList: ";
+ for ( int i = 0; i < Buckets; i++ ) {
+ ss << (d->deletedList[i].isNull() ? '0' : '1');
}
- else ss << " (OK)";
- ss << '\n';
- }
- ss << " " << n << " objects found, nobj:" << d->nrecords << "\n";
- ss << " " << len << " bytes data w/headers\n";
- ss << " " << nlen << " bytes data wout/headers\n";
-
- ss << " deletedList: ";
- for ( int i = 0; i < Buckets; i++ ) {
- ss << (d->deletedList[i].isNull() ? '0' : '1');
- }
- ss << endl;
- int ndel = 0;
- long long delSize = 0;
- int incorrect = 0;
- for ( int i = 0; i < Buckets; i++ ) {
- DiskLoc loc = d->deletedList[i];
- try {
- int k = 0;
- while ( !loc.isNull() ) {
- if ( recs.count(loc) )
- incorrect++;
- ndel++;
-
- if ( loc.questionable() ) {
- if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
- ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
- valid = false;
- break;
+ ss << endl;
+ int ndel = 0;
+ long long delSize = 0;
+ int incorrect = 0;
+ for ( int i = 0; i < Buckets; i++ ) {
+ DiskLoc loc = d->deletedList[i];
+ try {
+ int k = 0;
+ while ( !loc.isNull() ) {
+ if ( recs.count(loc) )
+ incorrect++;
+ ndel++;
+
+ if ( loc.questionable() ) {
+ if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
+ ss << " ?bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k << endl;
+ valid = false;
+ break;
+ }
}
- }
- DeletedRecord *d = loc.drec();
- delSize += d->lengthWithHeaders;
- loc = d->nextDeleted;
- k++;
+ DeletedRecord *d = loc.drec();
+ delSize += d->lengthWithHeaders;
+ loc = d->nextDeleted;
+ k++;
+ }
+ } catch (...) {
+ ss <<" ?exception in deleted chain for bucket " << i << endl;
+ valid = false;
}
- } catch (...) {
- ss <<" ?exception in deleted chain for bucket " << i << endl;
+ }
+ ss << " deleted: n: " << ndel << " size: " << delSize << endl;
+ if ( incorrect ) {
+ ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
valid = false;
}
- }
- ss << " deleted: n: " << ndel << " size: " << delSize << endl;
- if ( incorrect ) {
- ss << " ?corrupt: " << incorrect << " records from datafile are in deleted list\n";
- valid = false;
- }
- int idxn = 0;
- try {
- ss << " nIndexes:" << d->nIndexes << endl;
- for ( ; idxn < d->nIndexes; idxn++ ) {
- ss << " " << d->indexes[idxn].indexNamespace() << " keys:" <<
- d->indexes[idxn].head.btree()->fullValidate(d->indexes[idxn].head, d->indexes[idxn].keyPattern()) << endl;
+ int idxn = 0;
+ try {
+ ss << " nIndexes:" << d->nIndexes << endl;
+ for ( ; idxn < d->nIndexes; idxn++ ) {
+ ss << " " << d->indexes[idxn].indexNamespace() << " keys:" <<
+ d->indexes[idxn].head.btree()->fullValidate(d->indexes[idxn].head, d->indexes[idxn].keyPattern()) << endl;
+ }
}
+ catch (...) {
+ ss << "\n exception during index validate idxn:" << idxn << endl;
+ valid=false;
+ }
+
}
- catch (...) {
- ss << "\n exception during index validate idxn:" << idxn << endl;
- valid=false;
+ catch (AssertionException) {
+ ss << "\n exception during validate\n" << endl;
+ valid = false;
}
- }
- catch (AssertionException) {
- ss << "\n exception during validate\n" << endl;
- valid = false;
- }
-
- if ( !valid )
- ss << " ns corrupt, requires dbchk\n";
-
- return ss.str();
-}
+ if ( !valid )
+ ss << " ns corrupt, requires dbchk\n";
-/* reset any errors so that getlasterror comes back clean.
-
- useful before performing a long series of operations where we want to
- see if any of the operations triggered an error, but don't want to check
- after each op as that woudl be a client/server turnaround.
-*/
-class CmdResetError : public Command {
-public:
- virtual bool logTheOp() {
- return false;
+ return ss.str();
}
- virtual bool slaveOk() {
- return true;
- }
- CmdResetError() : Command("reseterror") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- LastError *le = lastError.get();
- assert( le );
- le->resetError();
- return true;
- }
-} cmdResetError;
-class CmdGetLastError : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdGetLastError() : Command("getlasterror") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- LastError *le = lastError.get();
- assert( le );
- le->nPrev--; // we don't count as an operation
- if ( le->nPrev != 1 || !le->haveError() ) {
- result.appendNull("err");
+ /* reset any errors so that getlasterror comes back clean.
+
+ useful before performing a long series of operations where we want to
+ see if any of the operations triggered an error, but don't want to check
+ after each op as that woudl be a client/server turnaround.
+ */
+ class CmdResetError : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
return true;
}
- result.append("err", le->msg);
- return true;
- }
-} cmdGetLastError;
+ CmdResetError() : Command("reseterror") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.get();
+ assert( le );
+ le->resetError();
+ return true;
+ }
+ } cmdResetError;
-/* for testing purposes only */
-class CmdForceError : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdForceError() : Command("forceerror") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- uassert("forced error", false);
- return true;
- }
-} cmdForceError;
+ class CmdGetLastError : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdGetLastError() : Command("getlasterror") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.get();
+ assert( le );
+ le->nPrev--; // we don't count as an operation
+ if ( le->nPrev != 1 || !le->haveError() ) {
+ result.appendNull("err");
+ return true;
+ }
+ result.append("err", le->msg);
+ return true;
+ }
+ } cmdGetLastError;
-class CmdGetPrevError : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdGetPrevError() : Command("getpreverror") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- LastError *le = lastError.get();
- assert( le );
- le->nPrev--; // we don't count as an operation
- if ( !le->haveError() ) {
- result.appendNull("err");
- result.append("nPrev", 1);
- return true;
- }
- result.append("err", le->msg);
- result.append("nPrev", le->nPrev);
- return true;
- }
-} cmdGetPrevError;
+ /* for testing purposes only */
+ class CmdForceError : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdForceError() : Command("forceerror") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ uassert("forced error", false);
+ return true;
+ }
+ } cmdForceError;
-class CmdDropDatabase : public Command {
-public:
- virtual bool logTheOp() {
- return true;
- }
- virtual bool slaveOk() {
- return false;
- }
- CmdDropDatabase() : Command("dropDatabase") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
- log() << "dropDatabase " << ns << endl;
- int p = (int) e.number();
- if ( p != 1 )
+ class CmdGetPrevError : public Command {
+ public:
+ virtual bool logTheOp() {
return false;
- dropDatabase(ns);
- return true;
- }
-} cmdDropDatabase;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdGetPrevError() : Command("getpreverror") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.get();
+ assert( le );
+ le->nPrev--; // we don't count as an operation
+ if ( !le->haveError() ) {
+ result.appendNull("err");
+ result.append("nPrev", 1);
+ return true;
+ }
+ result.append("err", le->msg);
+ result.append("nPrev", le->nPrev);
+ return true;
+ }
+ } cmdGetPrevError;
-class CmdRepairDatabase : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdRepairDatabase() : Command("repairDatabase") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
- log() << "repairDatabase " << ns << endl;
- int p = (int) e.number();
- if ( p != 1 )
+ class CmdDropDatabase : public Command {
+ public:
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
return false;
- e = cmdObj.findElement( "preserveClonedFilesOnFailure" );
- bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
- e = cmdObj.findElement( "backupOriginalFiles" );
- bool backupOriginalFiles = e.isBoolean() && e.boolean();
- return repairDatabase( ns, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
- }
-} cmdRepairDatabase;
+ }
+ CmdDropDatabase() : Command("dropDatabase") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.findElement(name);
+ log() << "dropDatabase " << ns << endl;
+ int p = (int) e.number();
+ if ( p != 1 )
+ return false;
+ dropDatabase(ns);
+ return true;
+ }
+ } cmdDropDatabase;
-/* set db profiling level
- todo: how do we handle profiling information put in the db with replication?
- sensibly or not?
-*/
-class CmdProfile : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdProfile() : Command("profile") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONElement e = cmdObj.findElement(name);
- result.append("was", (double) database->profile);
- int p = (int) e.number();
- bool ok = false;
- if ( p == -1 )
- ok = true;
- else if ( p >= 0 && p <= 2 ) {
- ok = true;
- database->profile = p;
+ class CmdRepairDatabase : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
}
- return ok;
- }
-} cmdProfile;
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdRepairDatabase() : Command("repairDatabase") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.findElement(name);
+ log() << "repairDatabase " << ns << endl;
+ int p = (int) e.number();
+ if ( p != 1 )
+ return false;
+ e = cmdObj.findElement( "preserveClonedFilesOnFailure" );
+ bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
+ e = cmdObj.findElement( "backupOriginalFiles" );
+ bool backupOriginalFiles = e.isBoolean() && e.boolean();
+ return repairDatabase( ns, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
+ }
+ } cmdRepairDatabase;
+
+ /* set db profiling level
+ todo: how do we handle profiling information put in the db with replication?
+ sensibly or not?
+ */
+ class CmdProfile : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdProfile() : Command("profile") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.findElement(name);
+ result.append("was", (double) database->profile);
+ int p = (int) e.number();
+ bool ok = false;
+ if ( p == -1 )
+ ok = true;
+ else if ( p >= 0 && p <= 2 ) {
+ ok = true;
+ database->profile = p;
+ }
+ return ok;
+ }
+ } cmdProfile;
-class CmdTimeInfo : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdTimeInfo() : Command("timeinfo") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- unsigned long long last, start, timeLocked;
- dbMutexInfo.timingInfo(start, timeLocked);
- last = curTimeMicros64();
- double tt = (double) last-start;
- double tl = (double) timeLocked;
- result.append("totalTime", tt);
- result.append("lockTime", tl);
- result.append("ratio", tl/tt);
- return true;
- }
-} cmdTimeInfo;
+ class CmdTimeInfo : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdTimeInfo() : Command("timeinfo") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ unsigned long long last, start, timeLocked;
+ dbMutexInfo.timingInfo(start, timeLocked);
+ last = curTimeMicros64();
+ double tt = (double) last-start;
+ double tl = (double) timeLocked;
+ result.append("totalTime", tt);
+ result.append("lockTime", tl);
+ result.append("ratio", tl/tt);
+ return true;
+ }
+ } cmdTimeInfo;
-/* just to check if the db has asserted */
-class CmdAssertInfo : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdAssertInfo() : Command("assertinfo") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- result.appendBool("dbasserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet());
- result.appendBool("asserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet() || lastAssert[3].isSet());
- result.append("assert", lastAssert[AssertRegular].toString());
- result.append("assertw", lastAssert[AssertW].toString());
- result.append("assertmsg", lastAssert[AssertMsg].toString());
- result.append("assertuser", lastAssert[AssertUser].toString());
- return true;
- }
-} cmdAsserts;
+ /* just to check if the db has asserted */
+ class CmdAssertInfo : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdAssertInfo() : Command("assertinfo") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.appendBool("dbasserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet());
+ result.appendBool("asserted", lastAssert[0].isSet() || lastAssert[1].isSet() || lastAssert[2].isSet() || lastAssert[3].isSet());
+ result.append("assert", lastAssert[AssertRegular].toString());
+ result.append("assertw", lastAssert[AssertW].toString());
+ result.append("assertmsg", lastAssert[AssertMsg].toString());
+ result.append("assertuser", lastAssert[AssertUser].toString());
+ return true;
+ }
+ } cmdAsserts;
-class CmdGetOpTime : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdGetOpTime() : Command("getoptime") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- result.appendDate("optime", OpTime::now().asDate());
- return true;
- }
-} cmdgetoptime;
-
-/*
-class Cmd : public Command {
-public:
- Cmd() : Command("") { }
- bool adminOnly() { return true; }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) {
- return true;
- }
-} cmd;
-*/
+ class CmdGetOpTime : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdGetOpTime() : Command("getoptime") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.appendDate("optime", OpTime::now().asDate());
+ return true;
+ }
+ } cmdgetoptime;
+
+ /*
+ class Cmd : public Command {
+ public:
+ Cmd() : Command("") { }
+ bool adminOnly() { return true; }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) {
+ return true;
+ }
+ } cmd;
+ */
-class CmdOpLogging : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdOpLogging() : Command("opLogging") { }
- bool adminOnly() {
- return true;
- }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- opLogging = (int) cmdObj.findElement(name).number();
- flushOpLog();
- if ( !quiet )
- log() << "CMD: opLogging set to " << opLogging << endl;
- return true;
- }
-} cmdoplogging;
+ class CmdOpLogging : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdOpLogging() : Command("opLogging") { }
+ bool adminOnly() {
+ return true;
+ }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ opLogging = (int) cmdObj.findElement(name).number();
+ flushOpLog();
+ if ( !quiet )
+ log() << "CMD: opLogging set to " << opLogging << endl;
+ return true;
+ }
+ } cmdoplogging;
-/* drop collection */
-class CmdDrop : public Command {
-public:
- CmdDrop() : Command("drop") { }
- virtual bool logTheOp() {
- return true;
- }
- virtual bool slaveOk() {
- return false;
- }
- virtual bool adminOnly() {
- return false;
- }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string nsToDrop = database->name + '.' + cmdObj.findElement(name).valuestr();
- NamespaceDetails *d = nsdetails(nsToDrop.c_str());
- if ( !quiet )
- log() << "CMD: drop " << nsToDrop << endl;
- if ( d == 0 ) {
- errmsg = "ns not found";
+ /* drop collection */
+ class CmdDrop : public Command {
+ public:
+ CmdDrop() : Command("drop") { }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
return false;
}
- if ( d->nIndexes != 0 ) {
- // client helper function is supposed to drop the indexes first
- errmsg = "ns has indexes (not permitted on drop)";
+ virtual bool adminOnly() {
return false;
}
- result.append("ns", nsToDrop.c_str());
- ClientCursor::invalidate(nsToDrop.c_str());
- dropNS(nsToDrop);
- return true;
- }
-} cmdDrop;
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ string nsToDrop = database->name + '.' + cmdObj.findElement(name).valuestr();
+ NamespaceDetails *d = nsdetails(nsToDrop.c_str());
+ if ( !quiet )
+ log() << "CMD: drop " << nsToDrop << endl;
+ if ( d == 0 ) {
+ errmsg = "ns not found";
+ return false;
+ }
+ if ( d->nIndexes != 0 ) {
+ // client helper function is supposed to drop the indexes first
+ errmsg = "ns has indexes (not permitted on drop)";
+ return false;
+ }
+ result.append("ns", nsToDrop.c_str());
+ ClientCursor::invalidate(nsToDrop.c_str());
+ dropNS(nsToDrop);
+ return true;
+ }
+ } cmdDrop;
-class CmdQueryTraceLevel : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdQueryTraceLevel() : Command("queryTraceLevel") { }
- bool adminOnly() {
- return true;
- }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- queryTraceLevel = (int) cmdObj.findElement(name).number();
- return true;
- }
-} cmdquerytracelevel;
+ class CmdQueryTraceLevel : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdQueryTraceLevel() : Command("queryTraceLevel") { }
+ bool adminOnly() {
+ return true;
+ }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ queryTraceLevel = (int) cmdObj.findElement(name).number();
+ return true;
+ }
+ } cmdquerytracelevel;
-class CmdTraceAll : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdTraceAll() : Command("traceAll") { }
- bool adminOnly() {
- return true;
- }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- queryTraceLevel = otherTraceLevel = (int) cmdObj.findElement(name).number();
- return true;
- }
-} cmdtraceall;
-
-/* select count(*) */
-class CmdCount : public Command {
-public:
- CmdCount() : Command("count") { }
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return false;
- }
- virtual bool adminOnly() {
- return false;
- }
- virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
- string err;
- int n = runCount(ns.c_str(), cmdObj, err);
- int nn = n;
- bool ok = true;
- if ( n < 0 ) {
- ok = false;
- nn = 0;
- if ( !err.empty() )
+ class CmdTraceAll : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdTraceAll() : Command("traceAll") { }
+ bool adminOnly() {
+ return true;
+ }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ queryTraceLevel = otherTraceLevel = (int) cmdObj.findElement(name).number();
+ return true;
+ }
+ } cmdtraceall;
+
+ /* select count(*) */
+ class CmdCount : public Command {
+ public:
+ CmdCount() : Command("count") { }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual bool adminOnly() {
+ return false;
+ }
+ virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
+ string err;
+ int n = runCount(ns.c_str(), cmdObj, err);
+ int nn = n;
+ bool ok = true;
+ if ( n < 0 ) {
+ ok = false;
+ nn = 0;
+ if ( !err.empty() )
+ errmsg = err;
+ }
+ result.append("n", (double) nn);
+ return ok;
+ }
+ } cmdCount;
+
+ /* create collection */
+ class CmdCreate : public Command {
+ public:
+ CmdCreate() : Command("create") { }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ virtual bool adminOnly() {
+ return false;
+ }
+ virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
+ string err;
+ bool ok = userCreateNS(ns.c_str(), cmdObj, err, true);
+ if ( !ok && !err.empty() )
errmsg = err;
+ return ok;
}
- result.append("n", (double) nn);
- return ok;
- }
-} cmdCount;
+ } cmdCreate;
-/* create collection */
-class CmdCreate : public Command {
-public:
- CmdCreate() : Command("create") { }
- virtual bool logTheOp() {
- return true;
- }
- virtual bool slaveOk() {
- return false;
- }
- virtual bool adminOnly() {
- return false;
- }
- virtual bool run(const char *_ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- string ns = database->name + '.' + cmdObj.findElement(name).valuestr();
- string err;
- bool ok = userCreateNS(ns.c_str(), cmdObj, err, true);
- if ( !ok && !err.empty() )
- errmsg = err;
- return ok;
- }
-} cmdCreate;
+ class CmdDeleteIndexes : public Command {
+ public:
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() {
+ return false;
+ }
+ CmdDeleteIndexes() : Command("deleteIndexes") { }
+ bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
+ /* note: temp implementation. space not reclaimed! */
+ BSONElement e = jsobj.findElement(name.c_str());
+ string toDeleteNs = database->name + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
+ if ( !quiet )
+ log() << "CMD: deleteIndexes " << toDeleteNs << endl;
+ if ( d ) {
+ BSONElement f = jsobj.findElement("index");
+ if ( !f.eoo() ) {
-class CmdDeleteIndexes : public Command {
-public:
- virtual bool logTheOp() {
- return true;
- }
- virtual bool slaveOk() {
- return false;
- }
- CmdDeleteIndexes() : Command("deleteIndexes") { }
- bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
- /* note: temp implementation. space not reclaimed! */
- BSONElement e = jsobj.findElement(name.c_str());
- string toDeleteNs = database->name + '.' + e.valuestr();
- NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
- if ( !quiet )
- log() << "CMD: deleteIndexes " << toDeleteNs << endl;
- if ( d ) {
- BSONElement f = jsobj.findElement("index");
- if ( !f.eoo() ) {
-
- d->aboutToDeleteAnIndex();
-
- ClientCursor::invalidate(toDeleteNs.c_str());
-
- // delete a specific index or all?
- if ( f.type() == String ) {
- const char *idxName = f.valuestr();
- if ( *idxName == '*' && idxName[1] == 0 ) {
- log() << " d->nIndexes was " << d->nIndexes << '\n';
- anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
- anObjBuilder.append("msg", "all indexes deleted for collection");
- for ( int i = 0; i < d->nIndexes; i++ )
- d->indexes[i].kill();
- d->nIndexes = 0;
- log() << " alpha implementation, space not reclaimed" << endl;
- }
- else {
- // delete just one index
- int x = d->findIndexByName(idxName);
- if ( x >= 0 ) {
- cout << " d->nIndexes was " << d->nIndexes << endl;
- anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+ d->aboutToDeleteAnIndex();
- /* note it is important we remove the IndexDetails with this
- call, otherwise, on recreate, the old one would be reused, and its
- IndexDetails::info ptr would be bad info.
- */
- d->indexes[x].kill();
-
- d->nIndexes--;
- for ( int i = x; i < d->nIndexes; i++ )
- d->indexes[i] = d->indexes[i+1];
- log() << "deleteIndexes: alpha implementation, space not reclaimed\n";
- } else {
- log() << "deleteIndexes: " << idxName << " not found" << endl;
- errmsg = "index not found";
- return false;
+ ClientCursor::invalidate(toDeleteNs.c_str());
+
+ // delete a specific index or all?
+ if ( f.type() == String ) {
+ const char *idxName = f.valuestr();
+ if ( *idxName == '*' && idxName[1] == 0 ) {
+ log() << " d->nIndexes was " << d->nIndexes << '\n';
+ anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+ anObjBuilder.append("msg", "all indexes deleted for collection");
+ for ( int i = 0; i < d->nIndexes; i++ )
+ d->indexes[i].kill();
+ d->nIndexes = 0;
+ log() << " alpha implementation, space not reclaimed" << endl;
+ }
+ else {
+ // delete just one index
+ int x = d->findIndexByName(idxName);
+ if ( x >= 0 ) {
+ out() << " d->nIndexes was " << d->nIndexes << endl;
+ anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+
+ /* note it is important we remove the IndexDetails with this
+ call, otherwise, on recreate, the old one would be reused, and its
+ IndexDetails::info ptr would be bad info.
+ */
+ d->indexes[x].kill();
+
+ d->nIndexes--;
+ for ( int i = x; i < d->nIndexes; i++ )
+ d->indexes[i] = d->indexes[i+1];
+ log() << "deleteIndexes: alpha implementation, space not reclaimed\n";
+ } else {
+ log() << "deleteIndexes: " << idxName << " not found" << endl;
+ errmsg = "index not found";
+ return false;
+ }
}
}
}
}
+ else {
+ errmsg = "ns not found";
+ return false;
+ }
+ return true;
}
- else {
- errmsg = "ns not found";
+ } cmdDeleteIndexes;
+
+ class CmdListDatabases : public Command {
+ public:
+ virtual bool logTheOp() {
return false;
}
- return true;
- }
-} cmdDeleteIndexes;
-
-class CmdListDatabases : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- virtual bool adminOnly() {
- return true;
- }
- CmdListDatabases() : Command("listDatabases") {}
- bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- vector< string > dbNames;
- getDatabaseNames( dbNames );
- vector< BSONObj > dbInfos;
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
- BSONObjBuilder b;
- b.append( "name", i->c_str() );
- b.append( "sizeOnDisk", (double) dbSize( i->c_str() ) );
- dbInfos.push_back( b.doneAndDecouple() );
- }
- result.append( "databases", dbInfos );
- return true;
- }
-} cmdListDatabases;
+ virtual bool slaveOk() {
+ return true;
+ }
+ virtual bool adminOnly() {
+ return true;
+ }
+ CmdListDatabases() : Command("listDatabases") {}
+ bool run(const char *ns, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ vector< string > dbNames;
+ getDatabaseNames( dbNames );
+ vector< BSONObj > dbInfos;
+ for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
+ BSONObjBuilder b;
+ b.append( "name", i->c_str() );
+ b.append( "sizeOnDisk", (double) dbSize( i->c_str() ) );
+ dbInfos.push_back( b.doneAndDecouple() );
+ }
+ result.append( "databases", dbInfos );
+ return true;
+ }
+ } cmdListDatabases;
-extern map<string,Command*> *commands;
+ extern map<string,Command*> *commands;
-/* TODO make these all command objects -- legacy stuff here
+ /* TODO make these all command objects -- legacy stuff here
- usage:
- abc.$cmd.findOne( { ismaster:1 } );
+ usage:
+ abc.$cmd.findOne( { ismaster:1 } );
- returns true if ran a cmd
-*/
-bool _runCommands(const char *ns, BSONObj& _cmdobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- if ( verbose )
- log() << "run command " << ns << ' ' << _cmdobj.toString() << endl;
+ returns true if ran a cmd
+ */
+ bool _runCommands(const char *ns, BSONObj& _cmdobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
+ if ( verbose )
+ log() << "run command " << ns << ' ' << _cmdobj.toString() << endl;
- const char *p = strchr(ns, '.');
- if ( !p ) return false;
- if ( strcmp(p, ".$cmd") != 0 ) return false;
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
- BSONObj jsobj;
- {
- BSONElement e = _cmdobj.firstElement();
- if ( e.type() == Object && string("query") == e.fieldName() ) {
- jsobj = e.embeddedObject();
- }
- else {
- jsobj = _cmdobj;
+ BSONObj jsobj;
+ {
+ BSONElement e = _cmdobj.firstElement();
+ if ( e.type() == Object && string("query") == e.fieldName() ) {
+ jsobj = e.embeddedObject();
+ }
+ else {
+ jsobj = _cmdobj;
+ }
}
- }
- bool ok = false;
- bool valid = false;
-
- BSONElement e;
- e = jsobj.firstElement();
-
- map<string,Command*>::iterator i;
-
- if ( e.eoo() )
- ;
- /* check for properly registered command objects. Note that all the commands below should be
- migrated over to the command object format.
- */
- else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
- valid = true;
- string errmsg;
- Command *c = i->second;
- if ( c->adminOnly() && !fromRepl && strncmp(ns, "admin", p-ns) != 0 ) {
- ok = false;
- errmsg = "access denied";
- }
- else if ( !isMaster() && !c->slaveOk() && !fromRepl ) {
- /* todo: allow if Option_SlaveOk was set on the query */
- ok = false;
- errmsg = "not master";
- }
- else {
- ok = c->run(ns, jsobj, errmsg, anObjBuilder, fromRepl);
- if ( ok && c->logTheOp() && !fromRepl )
- logOp("c", ns, jsobj);
- }
- if ( !ok )
- anObjBuilder.append("errmsg", errmsg);
- }
- else if ( e.type() == String ) {
- /* { count: "collectionname"[, query: <query>] } */
- string us(ns, p-ns);
+ bool ok = false;
+ bool valid = false;
+
+ BSONElement e;
+ e = jsobj.firstElement();
+
+ map<string,Command*>::iterator i;
- /* we allow clean and validate on slaves */
- if ( strcmp( e.fieldName(), "clean") == 0 ) {
+ if ( e.eoo() )
+ ;
+ /* check for properly registered command objects. Note that all the commands below should be
+ migrated over to the command object format.
+ */
+ else if ( (i = commands->find(e.fieldName())) != commands->end() ) {
valid = true;
- string dropNs = us + '.' + e.valuestr();
- NamespaceDetails *d = nsdetails(dropNs.c_str());
- if ( !quiet )
- log() << "CMD: clean " << dropNs << endl;
- if ( d ) {
- ok = true;
- anObjBuilder.append("ns", dropNs.c_str());
- clean(dropNs.c_str(), d);
+ string errmsg;
+ Command *c = i->second;
+ if ( c->adminOnly() && !fromRepl && strncmp(ns, "admin", p-ns) != 0 ) {
+ ok = false;
+ errmsg = "access denied";
+ }
+ else if ( !isMaster() && !c->slaveOk() && !fromRepl ) {
+ /* todo: allow if Option_SlaveOk was set on the query */
+ ok = false;
+ errmsg = "not master";
}
else {
- anObjBuilder.append("errmsg", "ns not found");
+ ok = c->run(ns, jsobj, errmsg, anObjBuilder, fromRepl);
+ if ( ok && c->logTheOp() && !fromRepl )
+ logOp("c", ns, jsobj);
}
+ if ( !ok )
+ anObjBuilder.append("errmsg", errmsg);
}
- else if ( strcmp( e.fieldName(), "validate") == 0 ) {
- valid = true;
- string toValidateNs = us + '.' + e.valuestr();
- NamespaceDetails *d = nsdetails(toValidateNs.c_str());
- if ( !quiet )
- log() << "CMD: validate " << toValidateNs << endl;
- if ( d ) {
- ok = true;
- anObjBuilder.append("ns", toValidateNs.c_str());
- string s = validateNS(toValidateNs.c_str(), d);
- anObjBuilder.append("result", s.c_str());
+ else if ( e.type() == String ) {
+ /* { count: "collectionname"[, query: <query>] } */
+ string us(ns, p-ns);
+
+ /* we allow clean and validate on slaves */
+ if ( strcmp( e.fieldName(), "clean") == 0 ) {
+ valid = true;
+ string dropNs = us + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(dropNs.c_str());
+ if ( !quiet )
+ log() << "CMD: clean " << dropNs << endl;
+ if ( d ) {
+ ok = true;
+ anObjBuilder.append("ns", dropNs.c_str());
+ clean(dropNs.c_str(), d);
+ }
+ else {
+ anObjBuilder.append("errmsg", "ns not found");
+ }
}
- else {
- anObjBuilder.append("errmsg", "ns not found");
+ else if ( strcmp( e.fieldName(), "validate") == 0 ) {
+ valid = true;
+ string toValidateNs = us + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(toValidateNs.c_str());
+ if ( !quiet )
+ log() << "CMD: validate " << toValidateNs << endl;
+ if ( d ) {
+ ok = true;
+ anObjBuilder.append("ns", toValidateNs.c_str());
+ string s = validateNS(toValidateNs.c_str(), d);
+ anObjBuilder.append("result", s.c_str());
+ }
+ else {
+ anObjBuilder.append("errmsg", "ns not found");
+ }
}
}
- }
- if ( !valid )
- anObjBuilder.append("errmsg", "no such cmd");
- anObjBuilder.append("ok", ok?1.0:0.0);
- BSONObj x = anObjBuilder.done();
- b.append((void*) x.objdata(), x.objsize());
- return true;
-}
+ if ( !valid )
+ anObjBuilder.append("errmsg", "no such cmd");
+ anObjBuilder.append("ok", ok?1.0:0.0);
+ BSONObj x = anObjBuilder.done();
+ b.append((void*) x.objdata(), x.objsize());
+ return true;
+ }
} // namespace mongo
diff --git a/db/dbeval.cpp b/db/dbeval.cpp
index de05394d10a..79d571ca1a4 100644
--- a/db/dbeval.cpp
+++ b/db/dbeval.cpp
@@ -33,94 +33,94 @@
namespace mongo {
-const int edebug=0;
+ const int edebug=0;
-bool dbEval(const char *ns, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
- BSONElement e = cmd.firstElement();
- assert( e.type() == Code || e.type() == CodeWScope || e.type() == String );
+ bool dbEval(const char *ns, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
+ BSONElement e = cmd.firstElement();
+ assert( e.type() == Code || e.type() == CodeWScope || e.type() == String );
- const char *code = 0;
- switch ( e.type() ) {
- case String:
- case Code:
- code = e.valuestr();
- break;
- case CodeWScope:
- code = e.codeWScopeCode();
- break;
- default:
- assert(0);
- }
- assert( code );
+ const char *code = 0;
+ switch ( e.type() ) {
+ case String:
+ case Code:
+ code = e.valuestr();
+ break;
+ case CodeWScope:
+ code = e.codeWScopeCode();
+ break;
+ default:
+ assert(0);
+ }
+ assert( code );
- if ( ! JavaJS ) {
- errmsg = "db side execution is disabled";
- return false;
- }
+ if ( ! JavaJS ) {
+ errmsg = "db side execution is disabled";
+ return false;
+ }
#if !defined(NOJNI)
- jlong f = JavaJS->functionCreate(code);
- if ( f == 0 ) {
- errmsg = "compile failed";
- return false;
- }
+ jlong f = JavaJS->functionCreate(code);
+ if ( f == 0 ) {
+ errmsg = "compile failed";
+ return false;
+ }
- Scope s;
- if ( e.type() == CodeWScope )
- s.init( e.codeWScopeScopeData() );
- s.setString("$client", database->name.c_str());
- BSONElement args = cmd.findElement("args");
- if ( args.type() == Array ) {
- BSONObj eo = args.embeddedObject();
- if ( edebug ) {
- cout << "args:" << eo.toString() << endl;
- cout << "code:\n" << code << endl;
+ Scope s;
+ if ( e.type() == CodeWScope )
+ s.init( e.codeWScopeScopeData() );
+ s.setString("$client", database->name.c_str());
+ BSONElement args = cmd.findElement("args");
+ if ( args.type() == Array ) {
+ BSONObj eo = args.embeddedObject();
+ if ( edebug ) {
+ out() << "args:" << eo.toString() << endl;
+ out() << "code:\n" << code << endl;
+ }
+ s.setObject("args", eo);
}
- s.setObject("args", eo);
- }
- int res;
- {
- Timer t;
- res = s.invoke(f);
- int m = t.millis();
- if ( m > 100 ) {
- stdcout() << "TEMP: dbeval too slow:" << endl;
- problem() << "dbeval time: " << dec << m << "ms " << ns << endl;
- OCCASIONALLY log() << code << endl;
- else if ( m >= 1000 ) log() << code << endl;
+ int res;
+ {
+ Timer t;
+ res = s.invoke(f);
+ int m = t.millis();
+ if ( m > 100 ) {
+ out() << "TEMP: dbeval too slow:" << endl;
+ problem() << "dbeval time: " << dec << m << "ms " << ns << endl;
+ OCCASIONALLY log() << code << endl;
+ else if ( m >= 1000 ) log() << code << endl;
+ }
+ }
+ if ( res ) {
+ result.append("errno", (double) res);
+ errmsg = "invoke failed: ";
+ errmsg += s.getString( "error" );
+ return false;
}
- }
- if ( res ) {
- result.append("errno", (double) res);
- errmsg = "invoke failed: ";
- errmsg += s.getString( "error" );
- return false;
- }
- int type = s.type("return");
- if ( type == Object || type == Array )
- result.append("retval", s.getObject("return"));
- else if ( type == NumberDouble )
- result.append("retval", s.getNumber("return"));
- else if ( type == String )
- result.append("retval", s.getString("return").c_str());
- else if ( type == Bool ) {
- result.appendBool("retval", s.getBoolean("return"));
- }
+ int type = s.type("return");
+ if ( type == Object || type == Array )
+ result.append("retval", s.getObject("return"));
+ else if ( type == NumberDouble )
+ result.append("retval", s.getNumber("return"));
+ else if ( type == String )
+ result.append("retval", s.getString("return").c_str());
+ else if ( type == Bool ) {
+ result.appendBool("retval", s.getBoolean("return"));
+ }
#endif
- return true;
-}
-
-class CmdEval : public Command {
-public:
- virtual bool slaveOk() {
- return false;
+ return true;
}
- CmdEval() : Command("$eval") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- return dbEval(ns, cmdObj, result, errmsg);
- }
-} cmdeval;
+
+ class CmdEval : public Command {
+ public:
+ virtual bool slaveOk() {
+ return false;
+ }
+ CmdEval() : Command("$eval") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ return dbEval(ns, cmdObj, result, errmsg);
+ }
+ } cmdeval;
} // namespace mongo
diff --git a/db/dbhelpers.cpp b/db/dbhelpers.cpp
index 6616fd14314..a290887eee5 100644
--- a/db/dbhelpers.cpp
+++ b/db/dbhelpers.cpp
@@ -23,31 +23,31 @@
namespace mongo {
-/* Get the first object from a collection. Generally only useful if the collection
- only ever has a single object -- which is a "singleton collection.
-
- Returns: true if object exists.
-*/
-bool getSingleton(const char *ns, BSONObj& result) {
- DBContext context(ns);
-
- auto_ptr<Cursor> c = DataFileMgr::findAll(ns);
- if ( !c->ok() )
- return false;
-
- result = c->current();
- return true;
-}
-
-void putSingleton(const char *ns, BSONObj obj) {
- DBContext context(ns);
- stringstream ss;
- updateObjects(ns, obj, /*pattern=*/emptyObj, /*upsert=*/true, ss);
-}
-
-void emptyCollection(const char *ns) {
- DBContext context(ns);
- deleteObjects(ns, emptyObj, false);
-}
+ /* Get the first object from a collection. Generally only useful if the collection
+ only ever has a single object -- which is a "singleton collection.
+
+ Returns: true if object exists.
+ */
+ bool getSingleton(const char *ns, BSONObj& result) {
+ DBContext context(ns);
+
+ auto_ptr<Cursor> c = DataFileMgr::findAll(ns);
+ if ( !c->ok() )
+ return false;
+
+ result = c->current();
+ return true;
+ }
+
+ void putSingleton(const char *ns, BSONObj obj) {
+ DBContext context(ns);
+ stringstream ss;
+ updateObjects(ns, obj, /*pattern=*/emptyObj, /*upsert=*/true, ss);
+ }
+
+ void emptyCollection(const char *ns) {
+ DBContext context(ns);
+ deleteObjects(ns, emptyObj, false);
+ }
} // namespace mongo
diff --git a/db/dbhelpers.h b/db/dbhelpers.h
index c10689d8b04..59210dc2234 100644
--- a/db/dbhelpers.h
+++ b/db/dbhelpers.h
@@ -25,39 +25,39 @@
namespace mongo {
-/* Get/put the first object from a collection. Generally only useful if the collection
- only ever has a single object -- which is a "singleton collection".
-
- You do not need to set the database before calling.
-
- Returns: true if object exists.
-*/
-bool getSingleton(const char *ns, BSONObj& result);
-void putSingleton(const char *ns, BSONObj obj);
-
-
-/* Remove all objects from a collection.
- You do not need to set the database before calling.
-*/
-void emptyCollection(const char *ns);
-
-/* Set database we want to use, then, restores when we finish (are out of scope)
- Note this is also helpful if an exception happens as the state if fixed up.
-*/
-class DBContext {
- Database *old;
-public:
- DBContext(const char *ns) {
- old = database;
- setClientTempNs(ns);
- }
- DBContext(string ns) {
- old = database;
- setClientTempNs(ns.c_str());
- }
- ~DBContext() {
- database = old;
- }
-};
+ /* Get/put the first object from a collection. Generally only useful if the collection
+ only ever has a single object -- which is a "singleton collection".
+
+ You do not need to set the database before calling.
+
+ Returns: true if object exists.
+ */
+ bool getSingleton(const char *ns, BSONObj& result);
+ void putSingleton(const char *ns, BSONObj obj);
+
+
+ /* Remove all objects from a collection.
+ You do not need to set the database before calling.
+ */
+ void emptyCollection(const char *ns);
+
+ /* Set database we want to use, then, restores when we finish (are out of scope)
+ Note this is also helpful if an exception happens as the state if fixed up.
+ */
+ class DBContext {
+ Database *old;
+ public:
+ DBContext(const char *ns) {
+ old = database;
+ setClientTempNs(ns);
+ }
+ DBContext(string ns) {
+ old = database;
+ setClientTempNs(ns.c_str());
+ }
+ ~DBContext() {
+ database = old;
+ }
+ };
} // namespace mongo
diff --git a/db/dbinfo.cpp b/db/dbinfo.cpp
index 20396d3170c..5b23d0564cf 100644
--- a/db/dbinfo.cpp
+++ b/db/dbinfo.cpp
@@ -21,29 +21,29 @@
namespace mongo {
-void DBInfo::setHaveLogged() {
- if ( haveLogged() )
- return;
-
- NamespaceDetails *d = nsdetails(ns.c_str());
- assert( d == 0 || d->nrecords == 0 );
- BSONObjBuilder b;
- b.appendBool("haveLogged", true);
- BSONObj o = b.done();
- theDataFileMgr.insert(ns.c_str(), (void *) o.objdata(), o.objsize());
-}
-
-int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god);
-
-void DBInfo::dbDropped() {
- BSONObj empty;
- deleteObjects(ns.c_str(), empty, false, false);
-
- /* do we also need to clear the info in 'dbs' in local.sources if we
- are a slave?
- TODO if so. need to be careful not to mess up replications of dropDatabase().
- */
-}
+ void DBInfo::setHaveLogged() {
+ if ( haveLogged() )
+ return;
+
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ assert( d == 0 || d->nrecords == 0 );
+ BSONObjBuilder b;
+ b.appendBool("haveLogged", true);
+ BSONObj o = b.done();
+ theDataFileMgr.insert(ns.c_str(), (void *) o.objdata(), o.objsize());
+ }
+
+ int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god);
+
+ void DBInfo::dbDropped() {
+ BSONObj empty;
+ deleteObjects(ns.c_str(), empty, false, false);
+
+ /* do we also need to clear the info in 'dbs' in local.sources if we
+ are a slave?
+ TODO if so. need to be careful not to mess up replications of dropDatabase().
+ */
+ }
} // namespace mongo
diff --git a/db/dbinfo.h b/db/dbinfo.h
index e78776b8de7..7c72ea84624 100644
--- a/db/dbinfo.h
+++ b/db/dbinfo.h
@@ -20,57 +20,57 @@
namespace mongo {
-/* this is an "accessor" class to data held in local.dbinfo.<dbname>
+ /* this is an "accessor" class to data held in local.dbinfo.<dbname>
- system.dbinfo contains:
+ system.dbinfo contains:
- { haveLogged : true }
+ { haveLogged : true }
- haveLogged -- if true, we have already logged events to the oplog for this
- database. missing implies false.
+ haveLogged -- if true, we have already logged events to the oplog for this
+ database. missing implies false.
- other attributes will be added later.
+ other attributes will be added later.
- Note that class Database caches the DBInfo::haveLogged() value to keep things fast.
-*/
-class DBInfo {
- string ns;
- Database *dbold;
-public:
- ~DBInfo() {
- database = dbold;
- }
- DBInfo(const char *db) {
- dbold = database;
- ns = string("local.dbinfo.") + db;
- setClientTempNs(ns.c_str());
- }
+ Note that class Database caches the DBInfo::haveLogged() value to keep things fast.
+ */
+ class DBInfo {
+ string ns;
+ Database *dbold;
+ public:
+ ~DBInfo() {
+ database = dbold;
+ }
+ DBInfo(const char *db) {
+ dbold = database;
+ ns = string("local.dbinfo.") + db;
+ setClientTempNs(ns.c_str());
+ }
- BSONObj getDbInfoObj() {
- auto_ptr<Cursor> c = DataFileMgr::findAll(ns.c_str());
- if ( !c->ok() )
- return BSONObj();
- return c->current();
- }
+ BSONObj getDbInfoObj() {
+ auto_ptr<Cursor> c = DataFileMgr::findAll(ns.c_str());
+ if ( !c->ok() )
+ return BSONObj();
+ return c->current();
+ }
- bool haveLogged() {
- return getDbInfoObj().getBoolField("haveLogged");
- }
+ bool haveLogged() {
+ return getDbInfoObj().getBoolField("haveLogged");
+ }
- void setHaveLogged();
- void dbDropped();
-};
+ void setHaveLogged();
+ void dbDropped();
+ };
-inline void Database::setHaveLogged() {
- if ( _haveLogged ) return;
- DBInfo i(name.c_str());
- i.setHaveLogged();
- _haveLogged = true;
-}
+ inline void Database::setHaveLogged() {
+ if ( _haveLogged ) return;
+ DBInfo i(name.c_str());
+ i.setHaveLogged();
+ _haveLogged = true;
+ }
-inline void Database::finishInit() {
- DBInfo i(name.c_str());
- _haveLogged = i.haveLogged();
-}
+ inline void Database::finishInit() {
+ DBInfo i(name.c_str());
+ _haveLogged = i.haveLogged();
+ }
} // namespace mongo
diff --git a/db/dbmessage.h b/db/dbmessage.h
index 2a5f78842d9..83054acfbba 100644
--- a/db/dbmessage.h
+++ b/db/dbmessage.h
@@ -20,108 +20,108 @@
namespace mongo {
-/* For the database/server protocol, these objects and functions encapsulate
- the various messages transmitted over the connection.
-*/
-
-class DbMessage {
-public:
- DbMessage(const Message& _m) : m(_m) {
- theEnd = _m.data->_data + _m.data->dataLen();
- int *r = (int *) _m.data->_data;
- reserved = *r;
- r++;
- data = (const char *) r;
- nextjsobj = data;
- }
+ /* For the database/server protocol, these objects and functions encapsulate
+ the various messages transmitted over the connection.
+ */
+
+ class DbMessage {
+ public:
+ DbMessage(const Message& _m) : m(_m) {
+ theEnd = _m.data->_data + _m.data->dataLen();
+ int *r = (int *) _m.data->_data;
+ reserved = *r;
+ r++;
+ data = (const char *) r;
+ nextjsobj = data;
+ }
- const char * getns() {
- return data;
- }
- void getns(Namespace& ns) {
- ns = data;
- }
+ const char * getns() {
+ return data;
+ }
+ void getns(Namespace& ns) {
+ ns = data;
+ }
- int pullInt() {
- if ( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- int i = *((int *)nextjsobj);
- nextjsobj += 4;
- return i;
- }
- long long pullInt64() {
- if ( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- long long i = *((long long *)nextjsobj);
- nextjsobj += 8;
- return i;
- }
+ int pullInt() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ int i = *((int *)nextjsobj);
+ nextjsobj += 4;
+ return i;
+ }
+ long long pullInt64() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ long long i = *((long long *)nextjsobj);
+ nextjsobj += 8;
+ return i;
+ }
- OID* getOID() {
- return (OID *) (data + strlen(data) + 1); // skip namespace
- }
+ OID* getOID() {
+ return (OID *) (data + strlen(data) + 1); // skip namespace
+ }
- void getQueryStuff(const char *&query, int& ntoreturn) {
- int *i = (int *) (data + strlen(data) + 1);
- ntoreturn = *i;
- i++;
- query = (const char *) i;
- }
+ void getQueryStuff(const char *&query, int& ntoreturn) {
+ int *i = (int *) (data + strlen(data) + 1);
+ ntoreturn = *i;
+ i++;
+ query = (const char *) i;
+ }
- /* for insert and update msgs */
- bool moreJSObjs() {
- return nextjsobj != 0;
- }
- BSONObj nextJsObj() {
- if ( nextjsobj == data )
- nextjsobj += strlen(data) + 1; // skip namespace
- BSONObj js(nextjsobj);
- assert( js.objsize() < ( theEnd - data ) );
- if ( js.objsize() <= 0 )
- nextjsobj = null;
- else {
- nextjsobj += js.objsize();
- if ( nextjsobj >= theEnd )
- nextjsobj = 0;
+ /* for insert and update msgs */
+ bool moreJSObjs() {
+ return nextjsobj != 0;
+ }
+ BSONObj nextJsObj() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ BSONObj js(nextjsobj);
+ assert( js.objsize() < ( theEnd - data ) );
+ if ( js.objsize() <= 0 )
+ nextjsobj = null;
+ else {
+ nextjsobj += js.objsize();
+ if ( nextjsobj >= theEnd )
+ nextjsobj = 0;
+ }
+ return js;
}
- return js;
- }
- const Message& msg() {
- return m;
- }
+ const Message& msg() {
+ return m;
+ }
-private:
- const Message& m;
- int reserved;
- const char *data;
- const char *nextjsobj;
- const char *theEnd;
-};
-
-/* a request to run a query, received from the database */
-class QueryMessage {
-public:
- const char *ns;
- int ntoskip;
- int ntoreturn;
- int queryOptions;
- BSONObj query;
- auto_ptr< set<string> > fields;
-
- /* parses the message into the above fields */
- QueryMessage(DbMessage& d) {
- ns = d.getns();
- ntoskip = d.pullInt();
- ntoreturn = d.pullInt();
- query = d.nextJsObj();
- if ( d.moreJSObjs() ) {
- fields = auto_ptr< set<string> >(new set<string>());
- d.nextJsObj().getFieldNames(*fields);
+ private:
+ const Message& m;
+ int reserved;
+ const char *data;
+ const char *nextjsobj;
+ const char *theEnd;
+ };
+
+ /* a request to run a query, received from the database */
+ class QueryMessage {
+ public:
+ const char *ns;
+ int ntoskip;
+ int ntoreturn;
+ int queryOptions;
+ BSONObj query;
+ auto_ptr< set<string> > fields;
+
+ /* parses the message into the above fields */
+ QueryMessage(DbMessage& d) {
+ ns = d.getns();
+ ntoskip = d.pullInt();
+ ntoreturn = d.pullInt();
+ query = d.nextJsObj();
+ if ( d.moreJSObjs() ) {
+ fields = auto_ptr< set<string> >(new set<string>());
+ d.nextJsObj().getFieldNames(*fields);
+ }
+ queryOptions = d.msg().data->dataAsInt();
}
- queryOptions = d.msg().data->dataAsInt();
- }
-};
+ };
} // namespace mongo
@@ -129,27 +129,27 @@ public:
namespace mongo {
-inline void replyToQuery(int queryResultFlags,
- MessagingPort& p, Message& requestMsg,
- void *data, int size,
- int nReturned, int startingFrom = 0,
- long long cursorId = 0
- ) {
- BufBuilder b(32768);
- b.skip(sizeof(QueryResult));
- b.append(data, size);
- QueryResult *qr = (QueryResult *) b.buf();
- qr->resultFlags() = queryResultFlags;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->cursorId = cursorId;
- qr->startingFrom = startingFrom;
- qr->nReturned = 1;
- b.decouple();
- Message *resp = new Message();
- resp->setData(qr, true); // transport will free
- p.reply(requestMsg, *resp, requestMsg.data->id);
-}
+ inline void replyToQuery(int queryResultFlags,
+ MessagingPort& p, Message& requestMsg,
+ void *data, int size,
+ int nReturned, int startingFrom = 0,
+ long long cursorId = 0
+ ) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ b.append(data, size);
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->resultFlags() = queryResultFlags;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = cursorId;
+ qr->startingFrom = startingFrom;
+ qr->nReturned = 1;
+ b.decouple();
+ Message *resp = new Message();
+ resp->setData(qr, true); // transport will free
+ p.reply(requestMsg, *resp, requestMsg.data->id);
+ }
} // namespace mongo
@@ -157,13 +157,13 @@ inline void replyToQuery(int queryResultFlags,
namespace mongo {
-inline void replyToQuery(int queryResultFlags,
- MessagingPort& p, Message& requestMsg,
- BSONObj& responseObj)
-{
- replyToQuery(queryResultFlags,
- p, requestMsg,
- (void *) responseObj.objdata(), responseObj.objsize(), 1);
-}
+ inline void replyToQuery(int queryResultFlags,
+ MessagingPort& p, Message& requestMsg,
+ BSONObj& responseObj)
+ {
+ replyToQuery(queryResultFlags,
+ p, requestMsg,
+ (void *) responseObj.objdata(), responseObj.objsize(), 1);
+ }
} // namespace mongo
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index 26405150282..60e8b76f969 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -25,348 +25,348 @@
namespace mongo {
-extern int port;
-extern const char *replInfo;
-
-bool getInitialSyncCompleted();
-
-time_t started = time(0);
-
-/*
- string toString() {
- stringstream ss;
- unsigned long long dt = last - start;
- ss << dt/1000;
- ss << '\t';
- ss << timeLocked/1000 << '\t';
- if( dt )
- ss << (timeLocked*100)/dt << '%';
- return ss.str();
- }
-*/
-
-struct Timing {
- Timing() {
- start = timeLocked = 0;
- }
- unsigned long long start, timeLocked;
-};
-Timing tlast;
-const int NStats = 32;
-string lockStats[NStats];
-unsigned q = 0;
-extern bool cpu;
-
-void statsThread() {
- unsigned long long timeLastPass = 0;
- while ( 1 ) {
- {
- Timer lktm;
- dblock lk;
- q = (q+1)%NStats;
- Timing timing;
- dbMutexInfo.timingInfo(timing.start, timing.timeLocked);
- unsigned long long now = curTimeMicros64();
- if ( timeLastPass ) {
- unsigned long long dt = now - timeLastPass;
- unsigned long long dlocked = timing.timeLocked - tlast.timeLocked;
- {
- stringstream ss;
- ss << dt / 1000 << '\t';
- ss << dlocked / 1000 << '\t';
- if ( dt )
- ss << (dlocked*100)/dt << '%';
- string s = ss.str();
- if ( cpu )
- log() << "cpu: " << s << endl;
- lockStats[q] = s;
- }
- }
- timeLastPass = now;
- tlast = timing;
- }
- sleepsecs(4);
- }
-}
-
-unsigned byLocSize();
-
-bool _bold;
-string bold(bool x) {
- _bold = x;
- return x ? "<b>" : "";
-}
-string bold() {
- return _bold ? "</b>" : "";
-}
-
-class DbWebServer : public MiniWebServer {
-public:
- // caller locks
- void doLockedStuff(stringstream& ss) {
- ss << "# databases: " << databases.size() << '\n';
- if ( database ) {
- ss << "curclient: " << database->name;
- ss << '\n';
+ extern int port;
+ extern const char *replInfo;
+
+ bool getInitialSyncCompleted();
+
+ time_t started = time(0);
+
+ /*
+ string toString() {
+ stringstream ss;
+ unsigned long long dt = last - start;
+ ss << dt/1000;
+ ss << '\t';
+ ss << timeLocked/1000 << '\t';
+ if( dt )
+ ss << (timeLocked*100)/dt << '%';
+ return ss.str();
}
- ss << bold(byLocSize()>10000) << "Cursors byLoc.size(): " << byLocSize() << bold() << '\n';
- ss << "\n<b>replication</b>\n";
- ss << "master: " << master << '\n';
- ss << "slave: " << slave << '\n';
- if ( replPair ) {
- ss << "replpair:\n";
- ss << replPair->getInfo();
+ */
+
+ struct Timing {
+ Timing() {
+ start = timeLocked = 0;
}
- bool seemCaughtUp = getInitialSyncCompleted();
- if ( !seemCaughtUp ) ss << "<b>";
- ss << "initialSyncCompleted: " << seemCaughtUp;
- if ( !seemCaughtUp ) ss << "</b>";
- ss << '\n';
-
- ss << "\n<b>dt\ttlocked</b>\n";
- unsigned i = q;
+ unsigned long long start, timeLocked;
+ };
+ Timing tlast;
+ const int NStats = 32;
+ string lockStats[NStats];
+ unsigned q = 0;
+ extern bool cpu;
+
+ void statsThread() {
+ unsigned long long timeLastPass = 0;
while ( 1 ) {
- ss << lockStats[i] << '\n';
- i = (i-1)%NStats;
- if ( i == q )
- break;
+ {
+ Timer lktm;
+ dblock lk;
+ q = (q+1)%NStats;
+ Timing timing;
+ dbMutexInfo.timingInfo(timing.start, timing.timeLocked);
+ unsigned long long now = curTimeMicros64();
+ if ( timeLastPass ) {
+ unsigned long long dt = now - timeLastPass;
+ unsigned long long dlocked = timing.timeLocked - tlast.timeLocked;
+ {
+ stringstream ss;
+ ss << dt / 1000 << '\t';
+ ss << dlocked / 1000 << '\t';
+ if ( dt )
+ ss << (dlocked*100)/dt << '%';
+ string s = ss.str();
+ if ( cpu )
+ log() << "cpu: " << s << endl;
+ lockStats[q] = s;
+ }
+ }
+ timeLastPass = now;
+ tlast = timing;
+ }
+ sleepsecs(4);
}
}
- void doUnlockedStuff(stringstream& ss) {
- ss << "port: " << port << '\n';
- ss << "dblocked: " << dbMutexInfo.isLocked() << " (initial)\n";
- ss << "uptime: " << time(0)-started << " seconds\n";
- if ( allDead )
- ss << "<b>replication allDead=" << allDead << "</b>\n";
- ss << "\nassertions:\n";
- for ( int i = 0; i < 4; i++ ) {
- if ( lastAssert[i].isSet() ) {
- ss << "<b>";
- if ( i == 3 ) ss << "usererr";
- else ss << i;
- ss << "</b>" << ' ' << lastAssert[i].toString();
- }
- }
+ unsigned byLocSize();
- ss << "\nreplInfo: " << replInfo << '\n';
+ bool _bold;
+ string bold(bool x) {
+ _bold = x;
+ return x ? "<b>" : "";
+ }
+ string bold() {
+ return _bold ? "</b>" : "";
}
- virtual void doRequest(
- const char *rq, // the full request
- string url,
- // set these and return them:
- string& responseMsg,
- int& responseCode,
- vector<string>& headers // if completely empty, content-type: text/html will be added
- )
- {
- //cout << "url [" << url << "]" << endl;
-
- if ( url.size() > 1 ) {
- handleRESTRequest( rq , url , responseMsg , responseCode , headers );
- return;
+ class DbWebServer : public MiniWebServer {
+ public:
+ // caller locks
+ void doLockedStuff(stringstream& ss) {
+ ss << "# databases: " << databases.size() << '\n';
+ if ( database ) {
+ ss << "curclient: " << database->name;
+ ss << '\n';
+ }
+ ss << bold(byLocSize()>10000) << "Cursors byLoc.size(): " << byLocSize() << bold() << '\n';
+ ss << "\n<b>replication</b>\n";
+ ss << "master: " << master << '\n';
+ ss << "slave: " << slave << '\n';
+ if ( replPair ) {
+ ss << "replpair:\n";
+ ss << replPair->getInfo();
+ }
+ bool seemCaughtUp = getInitialSyncCompleted();
+ if ( !seemCaughtUp ) ss << "<b>";
+ ss << "initialSyncCompleted: " << seemCaughtUp;
+ if ( !seemCaughtUp ) ss << "</b>";
+ ss << '\n';
+
+ ss << "\n<b>dt\ttlocked</b>\n";
+ unsigned i = q;
+ while ( 1 ) {
+ ss << lockStats[i] << '\n';
+ i = (i-1)%NStats;
+ if ( i == q )
+ break;
+ }
}
+ void doUnlockedStuff(stringstream& ss) {
+ ss << "port: " << port << '\n';
+ ss << "dblocked: " << dbMutexInfo.isLocked() << " (initial)\n";
+ ss << "uptime: " << time(0)-started << " seconds\n";
+ if ( allDead )
+ ss << "<b>replication allDead=" << allDead << "</b>\n";
+ ss << "\nassertions:\n";
+ for ( int i = 0; i < 4; i++ ) {
+ if ( lastAssert[i].isSet() ) {
+ ss << "<b>";
+ if ( i == 3 ) ss << "usererr";
+ else ss << i;
+ ss << "</b>" << ' ' << lastAssert[i].toString();
+ }
+ }
- responseCode = 200;
- stringstream ss;
- ss << "<html><head><title>";
+ ss << "\nreplInfo: " << replInfo << '\n';
+ }
- string dbname;
+ virtual void doRequest(
+ const char *rq, // the full request
+ string url,
+ // set these and return them:
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers // if completely empty, content-type: text/html will be added
+ )
{
- stringstream z;
- z << "db " << getHostName() << ':' << port << ' ';
- dbname = z.str();
- }
- ss << dbname << "</title></head><body><h2>" << dbname << "</h2><p>\n<pre>";
+ //out() << "url [" << url << "]" << endl;
- doUnlockedStuff(ss);
+ if ( url.size() > 1 ) {
+ handleRESTRequest( rq , url , responseMsg , responseCode , headers );
+ return;
+ }
- int n = 2000;
- Timer t;
- while ( 1 ) {
- if ( !dbMutexInfo.isLocked() ) {
- {
- dblock lk;
- ss << "time to get dblock: " << t.millis() << "ms\n";
- doLockedStuff(ss);
- }
- break;
+
+ responseCode = 200;
+ stringstream ss;
+ ss << "<html><head><title>";
+
+ string dbname;
+ {
+ stringstream z;
+ z << "db " << getHostName() << ':' << port << ' ';
+ dbname = z.str();
}
- sleepmillis(1);
- if ( --n < 0 ) {
- ss << "\n<b>timed out getting dblock</b>\n";
- break;
+ ss << dbname << "</title></head><body><h2>" << dbname << "</h2><p>\n<pre>";
+
+ doUnlockedStuff(ss);
+
+ int n = 2000;
+ Timer t;
+ while ( 1 ) {
+ if ( !dbMutexInfo.isLocked() ) {
+ {
+ dblock lk;
+ ss << "time to get dblock: " << t.millis() << "ms\n";
+ doLockedStuff(ss);
+ }
+ break;
+ }
+ sleepmillis(1);
+ if ( --n < 0 ) {
+ ss << "\n<b>timed out getting dblock</b>\n";
+ break;
+ }
}
+
+ ss << "</pre></body></html>";
+ responseMsg = ss.str();
}
- ss << "</pre></body></html>";
- responseMsg = ss.str();
- }
+ void handleRESTRequest( const char *rq, // the full request
+ string url,
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers // if completely empty, content-type: text/html will be added
+ ) {
+
+ string::size_type first = url.find( "/" , 1 );
+ if ( first == string::npos ) {
+ responseCode = 400;
+ return;
+ }
- void handleRESTRequest( const char *rq, // the full request
- string url,
- string& responseMsg,
- int& responseCode,
- vector<string>& headers // if completely empty, content-type: text/html will be added
- ) {
-
- string::size_type first = url.find( "/" , 1 );
- if ( first == string::npos ) {
- responseCode = 400;
- return;
- }
+ string method = parseMethod( rq );
+ string dbname = url.substr( 1 , first - 1 );
+ string coll = url.substr( first + 1 );
+ string action = "";
- string method = parseMethod( rq );
- string dbname = url.substr( 1 , first - 1 );
- string coll = url.substr( first + 1 );
- string action = "";
+ map<string,string> params;
+ if ( coll.find( "?" ) != string::npos ) {
+ parseParams( params , coll.substr( coll.find( "?" ) + 1 ) );
+ coll = coll.substr( 0 , coll.find( "?" ) );
+ }
- map<string,string> params;
- if ( coll.find( "?" ) != string::npos ) {
- parseParams( params , coll.substr( coll.find( "?" ) + 1 ) );
- coll = coll.substr( 0 , coll.find( "?" ) );
- }
+ string::size_type last = coll.find_last_of( "/" );
+ if ( last == string::npos ) {
+ action = coll;
+ coll = "_defaultCollection";
+ }
+ else {
+ action = coll.substr( last + 1 );
+ coll = coll.substr( 0 , last );
+ }
- string::size_type last = coll.find_last_of( "/" );
- if ( last == string::npos ) {
- action = coll;
- coll = "_defaultCollection";
- }
- else {
- action = coll.substr( last + 1 );
- coll = coll.substr( 0 , last );
- }
+ for ( string::size_type i=0; i<coll.size(); i++ )
+ if ( coll[i] == '/' )
+ coll[i] = '.';
- for ( string::size_type i=0; i<coll.size(); i++ )
- if ( coll[i] == '/' )
- coll[i] = '.';
+ string fullns = dbname + "." + coll;
- string fullns = dbname + "." + coll;
+ headers.push_back( (string)"x-action: " + action );
+ headers.push_back( (string)"x-ns: " + fullns );
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
- headers.push_back( (string)"x-action: " + action );
- headers.push_back( (string)"x-ns: " + fullns );
- headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ stringstream ss;
- stringstream ss;
+ if ( method == "GET" ) {
+ responseCode = 200;
+ handleRESTQuery( fullns , action , params , responseCode , ss );
+ }
+ else if ( method == "POST" ) {
+ responseCode = 201;
+ handlePost( fullns , body( rq ) , params , responseCode , ss );
+ }
+ else {
+ responseCode = 400;
+ headers.push_back( "X_err: bad request" );
+ ss << "don't know how to handle a [" << method << "]";
+ out() << "don't know how to handle a [" << method << "]" << endl;
+ }
- if ( method == "GET" ) {
- responseCode = 200;
- handleRESTQuery( fullns , action , params , responseCode , ss );
- }
- else if ( method == "POST" ) {
- responseCode = 201;
- handlePost( fullns , body( rq ) , params , responseCode , ss );
- }
- else {
- responseCode = 400;
- headers.push_back( "X_err: bad request" );
- ss << "don't know how to handle a [" << method << "]";
- cout << "don't know how to handle a [" << method << "]" << endl;
+ responseMsg = ss.str();
}
- responseMsg = ss.str();
- }
+ void handleRESTQuery( string ns , string action , map<string,string> & params , int & responseCode , stringstream & out ) {
+ Timer t;
- void handleRESTQuery( string ns , string action , map<string,string> & params , int & responseCode , stringstream & out ) {
- Timer t;
+ int skip = _getOption( params["skip"] , 0 );
+ int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
- int skip = _getOption( params["skip"] , 0 );
- int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
-
- int one = 0;
- if ( params["one"].size() > 0 && tolower( params["one"][0] ) == 't' ) {
- num = 1;
- one = 1;
- }
+ int one = 0;
+ if ( params["one"].size() > 0 && tolower( params["one"][0] ) == 't' ) {
+ num = 1;
+ one = 1;
+ }
- BSONObjBuilder queryBuilder;
+ BSONObjBuilder queryBuilder;
- for ( map<string,string>::iterator i = params.begin(); i != params.end(); i++ ) {
- if ( ! i->first.find( "filter_" ) == 0 )
- continue;
+ for ( map<string,string>::iterator i = params.begin(); i != params.end(); i++ ) {
+ if ( ! i->first.find( "filter_" ) == 0 )
+ continue;
- const char * field = i->first.substr( 7 ).c_str();
- const char * val = i->second.c_str();
+ const char * field = i->first.substr( 7 ).c_str();
+ const char * val = i->second.c_str();
- char * temp;
+ char * temp;
- // TODO: this is how i guess if something is a number. pretty lame right now
- double number = strtod( val , &temp );
- if ( temp != val )
- queryBuilder.append( field , number );
- else
- queryBuilder.append( field , val );
- }
+ // TODO: this is how i guess if something is a number. pretty lame right now
+ double number = strtod( val , &temp );
+ if ( temp != val )
+ queryBuilder.append( field , number );
+ else
+ queryBuilder.append( field , val );
+ }
- BSONObj query = queryBuilder.doneAndDecouple();
+ BSONObj query = queryBuilder.doneAndDecouple();
- auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
+ auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
- if ( one ) {
- if ( cursor->more() ) {
- BSONObj obj = cursor->next();
- out << obj.jsonString() << "\n";
- }
- else {
- responseCode = 404;
+ if ( one ) {
+ if ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ out << obj.jsonString() << "\n";
+ }
+ else {
+ responseCode = 404;
+ }
+ return;
}
- return;
- }
- out << "{\n";
- out << " \"offset\" : " << skip << ",\n";
- out << " \"rows\": [\n";
+ out << "{\n";
+ out << " \"offset\" : " << skip << ",\n";
+ out << " \"rows\": [\n";
+
+ int howMany = 0;
+ while ( cursor->more() ) {
+ if ( howMany++ )
+ out << " ,\n";
+ BSONObj obj = cursor->next();
+ out << " " << obj.jsonString();
- int howMany = 0;
- while ( cursor->more() ) {
- if ( howMany++ )
- out << " ,\n";
- BSONObj obj = cursor->next();
- out << " " << obj.jsonString();
+ }
+ out << "\n ]\n\n";
+ out << " \"total_rows\" : " << howMany << " ,\n";
+ out << " \"query\" : " << query.jsonString() << " ,\n";
+ out << " \"millis\" : " << t.millis() << " ,\n";
+ out << "}\n";
}
- out << "\n ]\n\n";
- out << " \"total_rows\" : " << howMany << " ,\n";
- out << " \"query\" : " << query.jsonString() << " ,\n";
- out << " \"millis\" : " << t.millis() << " ,\n";
- out << "}\n";
- }
+ // TODO Generate id and revision per couch POST spec
+ void handlePost( string ns, const char *body, map<string,string> & params, int & responseCode, stringstream & out ) {
+ try {
+ BSONObj obj = fromjson( body );
+ db.insert( ns.c_str(), obj );
+ } catch ( ... ) {
+ responseCode = 400; // Bad Request. Seems reasonable for now.
+ out << "{ \"ok\" : false }";
+ return;
+ }
- // TODO Generate id and revision per couch POST spec
- void handlePost( string ns, const char *body, map<string,string> & params, int & responseCode, stringstream & out ) {
- try {
- BSONObj obj = fromjson( body );
- db.insert( ns.c_str(), obj );
- } catch ( ... ) {
- responseCode = 400; // Bad Request. Seems reasonable for now.
- out << "{ \"ok\" : false }";
- return;
+ responseCode = 201;
+ out << "{ \"ok\" : true }";
}
- responseCode = 201;
- out << "{ \"ok\" : true }";
- }
-
- int _getOption( string val , int def ) {
- if ( val.size() == 0 )
- return def;
- return atoi( val.c_str() );
- }
+ int _getOption( string val , int def ) {
+ if ( val.size() == 0 )
+ return def;
+ return atoi( val.c_str() );
+ }
-private:
- static DBDirectClient db;
-};
+ private:
+ static DBDirectClient db;
+ };
-DBDirectClient DbWebServer::db;
+ DBDirectClient DbWebServer::db;
-void webServerThread() {
- boost::thread thr(statsThread);
- DbWebServer mini;
- if ( mini.init(port+1000) )
- mini.run();
-}
+ void webServerThread() {
+ boost::thread thr(statsThread);
+ DbWebServer mini;
+ if ( mini.init(port+1000) )
+ mini.run();
+ }
} // namespace mongo
diff --git a/db/flushtest.cpp b/db/flushtest.cpp
index 1c153df5c8e..a301e0e9195 100644
--- a/db/flushtest.cpp
+++ b/db/flushtest.cpp
@@ -5,134 +5,130 @@
namespace mongo {
-// logstream defines these, we don't want that:
-#undef cout
-#undef endl
-
#if defined(F_FULLFSYNC)
-void fullsync(int f) {
- fcntl( f, F_FULLFSYNC );
-}
+ void fullsync(int f) {
+ fcntl( f, F_FULLFSYNC );
+ }
#else
-void fullsync(int f) {
- fdatasync(f);
-}
+ void fullsync(int f) {
+ fdatasync(f);
+ }
#endif
-int main(int argc, char* argv[], char *envp[] ) {
- cout << "hello" << endl;
+ int main(int argc, char* argv[], char *envp[] ) {
+ cout << "hello" << endl;
- FILE *f = fopen("/data/db/temptest", "a");
+ FILE *f = fopen("/data/db/temptest", "a");
- if ( f == 0 ) {
- cout << "can't open file\n";
- return 1;
- }
+ if ( f == 0 ) {
+ cout << "can't open file\n";
+ return 1;
+ }
- {
- Timer t;
- for ( int i = 0; i < 50000; i++ )
- fwrite("abc", 3, 1, f);
- cout << "small writes: " << t.millis() << "ms" << endl;
- }
+ {
+ Timer t;
+ for ( int i = 0; i < 50000; i++ )
+ fwrite("abc", 3, 1, f);
+ cout << "small writes: " << t.millis() << "ms" << endl;
+ }
- {
- Timer t;
- for ( int i = 0; i < 10000; i++ ) {
- fwrite("abc", 3, 1, f);
- fflush(f);
- fsync( fileno( f ) );
+ {
+ Timer t;
+ for ( int i = 0; i < 10000; i++ ) {
+ fwrite("abc", 3, 1, f);
+ fflush(f);
+ fsync( fileno( f ) );
+ }
+ int ms = t.millis();
+ cout << "flush: " << ms << "ms, " << ms / 10000.0 << "ms/request" << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 500; i++ ) {
+ fwrite("abc", 3, 1, f);
+ fflush(f);
+ fsync( fileno( f ) );
+ sleepmillis(2);
+ }
+ int ms = t.millis() - 500 * 2;
+ cout << "flush with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
}
- int ms = t.millis();
- cout << "flush: " << ms << "ms, " << ms / 10000.0 << "ms/request" << endl;
- }
- {
- Timer t;
- for ( int i = 0; i < 500; i++ ) {
- fwrite("abc", 3, 1, f);
- fflush(f);
- fsync( fileno( f ) );
- sleepmillis(2);
+ char buf[8192];
+ for ( int pass = 0; pass < 2; pass++ ) {
+ cout << "pass " << pass << endl;
+ {
+ Timer t;
+ int n = 500;
+ for ( int i = 0; i < n; i++ ) {
+ if ( pass == 0 )
+ fwrite("abc", 3, 1, f);
+ else
+ fwrite(buf, 8192, 1, f);
+ buf[0]++;
+ fflush(f);
+ fullsync(fileno(f));
+ }
+ int ms = t.millis();
+ cout << "fullsync: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 500; i++ ) {
+ if ( pass == 0 )
+ fwrite("abc", 3, 1, f);
+ else
+ fwrite(buf, 8192, 1, f);
+ buf[0]++;
+ fflush(f);
+ fullsync(fileno(f));
+ sleepmillis(2);
+ }
+ int ms = t.millis() - 2 * 500;
+ cout << "fullsync with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
+ }
}
- int ms = t.millis() - 500 * 2;
- cout << "flush with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
- }
- char buf[8192];
- for ( int pass = 0; pass < 2; pass++ ) {
- cout << "pass " << pass << endl;
+ // without growing
{
+ fclose(f);
+ /* try from beginning of the file, where we aren't appending and changing the file length,
+ to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
+ */
+ f = fopen("/data/db/temptest", "r+");
Timer t;
int n = 500;
for ( int i = 0; i < n; i++ ) {
- if ( pass == 0 )
- fwrite("abc", 3, 1, f);
- else
- fwrite(buf, 8192, 1, f);
- buf[0]++;
+ fwrite("xyz", 3, 1, f);
fflush(f);
fullsync(fileno(f));
}
int ms = t.millis();
- cout << "fullsync: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ cout << "fullsync without growing: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
}
+ // without growing, with delay
{
+ fclose(f);
+ /* try from beginning of the file, where we aren't appending and changing the file length,
+ to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
+ */
+ f = fopen("/data/db/temptest", "r+");
Timer t;
- for ( int i = 0; i < 500; i++ ) {
- if ( pass == 0 )
- fwrite("abc", 3, 1, f);
- else
- fwrite(buf, 8192, 1, f);
- buf[0]++;
+ int n = 500;
+ for ( int i = 0; i < n; i++ ) {
+ fwrite("xyz", 3, 1, f);
fflush(f);
fullsync(fileno(f));
sleepmillis(2);
}
int ms = t.millis() - 2 * 500;
- cout << "fullsync with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
+ cout << "fullsync without growing with sleeps: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
}
- }
- // without growing
- {
- fclose(f);
- /* try from beginning of the file, where we aren't appending and changing the file length,
- to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
- */
- f = fopen("/data/db/temptest", "r+");
- Timer t;
- int n = 500;
- for ( int i = 0; i < n; i++ ) {
- fwrite("xyz", 3, 1, f);
- fflush(f);
- fullsync(fileno(f));
- }
- int ms = t.millis();
- cout << "fullsync without growing: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ return 0;
}
- // without growing, with delay
- {
- fclose(f);
- /* try from beginning of the file, where we aren't appending and changing the file length,
- to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
- */
- f = fopen("/data/db/temptest", "r+");
- Timer t;
- int n = 500;
- for ( int i = 0; i < n; i++ ) {
- fwrite("xyz", 3, 1, f);
- fflush(f);
- fullsync(fileno(f));
- sleepmillis(2);
- }
- int ms = t.millis() - 2 * 500;
- cout << "fullsync without growing with sleeps: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
- }
-
- return 0;
-}
-
} // namespace mongo
diff --git a/db/instance.cpp b/db/instance.cpp
index 40cb435a288..323e5bb6d29 100644
--- a/db/instance.cpp
+++ b/db/instance.cpp
@@ -28,499 +28,499 @@
namespace mongo {
-int nloggedsome = 0;
+ int nloggedsome = 0;
#define LOGSOME if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
-bool objcheck = false;
-bool quota = false;
-bool slave = false;
-bool master = false; // true means keep an op log
-extern int curOp;
+ bool objcheck = false;
+ bool quota = false;
+ bool slave = false;
+ bool master = false; // true means keep an op log
+ extern int curOp;
-boost::mutex dbMutex;
-MutexInfo dbMutexInfo;
+ boost::mutex dbMutex;
+ MutexInfo dbMutexInfo;
//int dbLocked = 0;
-int port = DBPort;
-/* 0 = off; 1 = writes, 2 = reads, 3 = both
- 7 = log a few reads, and all writes.
-*/
-int opLogging = 0;
-int getOpLogging() {
- return opLogging;
-}
-OpLog _oplog;
+ int port = DBPort;
+ /* 0 = off; 1 = writes, 2 = reads, 3 = both
+ 7 = log a few reads, and all writes.
+ */
+ int opLogging = 0;
+ int getOpLogging() {
+ return opLogging;
+ }
+ OpLog _oplog;
//#define oplog (*(_oplog.f))
-long long oplogSize = 0;
+ long long oplogSize = 0;
-bool useCursors = true;
+ bool useCursors = true;
-void closeAllSockets();
-void flushOpLog() {
- _oplog.flush();
-}
+ void closeAllSockets();
+ void flushOpLog() {
+ _oplog.flush();
+ }
-int ctr = 0;
-bool quiet = false;
-bool cpu = false; // --cpu show cpu time periodically
-bool verbose = false;
+ int ctr = 0;
+ bool quiet = false;
+ bool cpu = false; // --cpu show cpu time periodically
+ bool verbose = false;
// Returns false when request includes 'end'
-bool assembleResponse( Message &m, DbResponse &dbresponse ) {
- dblock lk;
+ bool assembleResponse( Message &m, DbResponse &dbresponse ) {
+ dblock lk;
- stringstream ss;
- char buf[64];
- time_t_to_String(time(0), buf);
- buf[20] = 0; // don't want the year
- ss << buf;
- // ss << curTimeMillis() % 10000 << ' ';
+ stringstream ss;
+ char buf[64];
+ time_t_to_String(time(0), buf);
+ buf[20] = 0; // don't want the year
+ ss << buf;
+ // ss << curTimeMillis() % 10000 << ' ';
- Timer t;
- database = 0;
- curOp = 0;
+ Timer t;
+ database = 0;
+ curOp = 0;
- int ms;
- bool log = false;
- curOp = m.data->operation();
+ int ms;
+ bool log = false;
+ curOp = m.data->operation();
#if 0
- /* use this if you only want to process operations for a particular namespace.
- maybe add to cmd line parms or something fancier.
- */
- DbMessage ddd(m);
- if ( strncmp(ddd.getns(), "clusterstock", 12) != 0 ) {
- static int q;
- if ( ++q < 20 )
- cout << "TEMP skip " << ddd.getns() << endl;
- goto skip;
- }
+ /* use this if you only want to process operations for a particular namespace.
+ maybe add to cmd line parms or something fancier.
+ */
+ DbMessage ddd(m);
+ if ( strncmp(ddd.getns(), "clusterstock", 12) != 0 ) {
+ static int q;
+ if ( ++q < 20 )
+ out() << "TEMP skip " << ddd.getns() << endl;
+ goto skip;
+ }
#endif
- if ( m.data->operation() == dbMsg ) {
- ss << "msg ";
- char *p = m.data->_data;
- int len = strlen(p);
- if ( len > 400 )
- cout << curTimeMillis() % 10000 <<
- " long msg received, len:" << len <<
- " ends with: " << p + len - 10 << endl;
- bool end = strcmp("end", p) == 0;
- Message *resp = new Message();
- resp->setData(opReply, "i am fine");
- dbresponse.response = resp;
- dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
- if ( end )
- return false;
- }
- else if ( m.data->operation() == dbQuery ) {
- receivedQuery(dbresponse, m, ss, true);
- }
- else if ( m.data->operation() == dbInsert ) {
- OPWRITE;
- try {
- ss << "insert ";
- receivedInsert(m, ss);
+ if ( m.data->operation() == dbMsg ) {
+ ss << "msg ";
+ char *p = m.data->_data;
+ int len = strlen(p);
+ if ( len > 400 )
+ out() << curTimeMillis() % 10000 <<
+ " long msg received, len:" << len <<
+ " ends with: " << p + len - 10 << endl;
+ bool end = strcmp("end", p) == 0;
+ Message *resp = new Message();
+ resp->setData(opReply, "i am fine");
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.data->id;
+ //dbMsgPort.reply(m, resp);
+ if ( end )
+ return false;
}
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion insert, continuing\n";
- ss << " exception " + e.toString();
+ else if ( m.data->operation() == dbQuery ) {
+ receivedQuery(dbresponse, m, ss, true);
}
- }
- else if ( m.data->operation() == dbUpdate ) {
- OPWRITE;
- try {
- ss << "update ";
- receivedUpdate(m, ss);
+ else if ( m.data->operation() == dbInsert ) {
+ OPWRITE;
+ try {
+ ss << "insert ";
+ receivedInsert(m, ss);
+ }
+ catch ( AssertionException& e ) {
+ LOGSOME problem() << " Caught Assertion insert, continuing\n";
+ ss << " exception " + e.toString();
+ }
}
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion update, continuing" << endl;
- ss << " exception " + e.toString();
+ else if ( m.data->operation() == dbUpdate ) {
+ OPWRITE;
+ try {
+ ss << "update ";
+ receivedUpdate(m, ss);
+ }
+ catch ( AssertionException& e ) {
+ LOGSOME problem() << " Caught Assertion update, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
}
- }
- else if ( m.data->operation() == dbDelete ) {
- OPWRITE;
- try {
- ss << "remove ";
- receivedDelete(m);
+ else if ( m.data->operation() == dbDelete ) {
+ OPWRITE;
+ try {
+ ss << "remove ";
+ receivedDelete(m);
+ }
+ catch ( AssertionException& e ) {
+ LOGSOME problem() << " Caught Assertion receivedDelete, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
}
- catch ( AssertionException& e ) {
- LOGSOME problem() << " Caught Assertion receivedDelete, continuing" << endl;
- ss << " exception " + e.toString();
+ else if ( m.data->operation() == dbGetMore ) {
+ OPREAD;
+ DEV log = true;
+ ss << "getmore ";
+ receivedGetMore(dbresponse, m, ss);
}
- }
- else if ( m.data->operation() == dbGetMore ) {
- OPREAD;
- DEV log = true;
- ss << "getmore ";
- receivedGetMore(dbresponse, m, ss);
- }
- else if ( m.data->operation() == dbKillCursors ) {
- OPREAD;
- try {
- log = true;
- ss << "killcursors ";
- receivedKillCursors(m);
+ else if ( m.data->operation() == dbKillCursors ) {
+ OPREAD;
+ try {
+ log = true;
+ ss << "killcursors ";
+ receivedKillCursors(m);
+ }
+ catch ( AssertionException& e ) {
+ problem() << " Caught Assertion in kill cursors, continuing" << endl;
+ ss << " exception " + e.toString();
+ }
}
- catch ( AssertionException& e ) {
- problem() << " Caught Assertion in kill cursors, continuing" << endl;
- ss << " exception " + e.toString();
+ else {
+ out() << " operation isn't supported: " << m.data->operation() << endl;
+ assert(false);
}
- }
- else {
- cout << " operation isn't supported: " << m.data->operation() << endl;
- assert(false);
- }
- ms = t.millis();
- log = log || (ctr++ % 512 == 0 && !quiet);
- DEV log = true;
- if ( log || ms > 100 ) {
- ss << ' ' << t.millis() << "ms";
- cout << ss.str().c_str() << endl;
- }
- if ( database && database->profile >= 1 ) {
- if ( database->profile >= 2 || ms >= 100 ) {
- // profile it
- profile(ss.str().c_str()+20/*skip ts*/, ms);
+ ms = t.millis();
+ log = log || (ctr++ % 512 == 0 && !quiet);
+ DEV log = true;
+ if ( log || ms > 100 ) {
+ ss << ' ' << t.millis() << "ms";
+ out() << ss.str().c_str() << endl;
+ }
+ if ( database && database->profile >= 1 ) {
+ if ( database->profile >= 2 || ms >= 100 ) {
+ // profile it
+ profile(ss.str().c_str()+20/*skip ts*/, ms);
+ }
}
- }
- return true;
-}
-
-void killCursors(int n, long long *ids);
-void receivedKillCursors(Message& m) {
- int *x = (int *) m.data->_data;
- x++; // reserved
- int n = *x++;
- assert( n >= 1 );
- if ( n > 2000 ) {
- problem() << "Assertion failure, receivedKillCursors, n=" << n << endl;
- assert( n < 30000 );
- }
- killCursors(n, (long long *) x);
-}
-
-void closeClient( const char *cl, const char *path ) {
- /* reset haveLogged in local.dbinfo */
- if ( string("local") != cl ) {
- DBInfo i(cl);
- i.dbDropped();
+ return true;
}
- /* important: kill all open cursors on the database */
- string prefix(cl);
- prefix += '.';
- ClientCursor::invalidate(prefix.c_str());
-
- eraseDatabase( cl, path );
- delete database; // closes files
- database = 0;
-}
-
-void receivedUpdate(Message& m, stringstream& ss) {
- DbMessage d(m);
- const char *ns = d.getns();
- assert(*ns);
- setClient(ns);
- //if( database->profile )
- ss << ns << ' ';
- int flags = d.pullInt();
- BSONObj query = d.nextJsObj();
-
- assert( d.moreJSObjs() );
- assert( query.objsize() < m.data->dataLen() );
- BSONObj toupdate = d.nextJsObj();
-
- assert( toupdate.objsize() < m.data->dataLen() );
- assert( query.objsize() + toupdate.objsize() < m.data->dataLen() );
- updateObjects(ns, toupdate, query, flags & 1, ss);
-}
-
-void receivedDelete(Message& m) {
- DbMessage d(m);
- const char *ns = d.getns();
- assert(*ns);
- setClient(ns);
- int flags = d.pullInt();
- bool justOne = flags & 1;
- assert( d.moreJSObjs() );
- BSONObj pattern = d.nextJsObj();
- deleteObjects(ns, pattern, justOne);
- logOp("d", ns, pattern, 0, &justOne);
-}
-
-void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss, bool logit) {
- MSGID responseTo = m.data->id;
-
- DbMessage d(m);
- QueryMessage q(d);
-
- if ( opLogging && logit ) {
- if ( strstr(q.ns, ".$cmd") ) {
- /* $cmd queries are "commands" and usually best treated as write operations */
- OPWRITE;
- }
- else {
- OPREAD;
+ void killCursors(int n, long long *ids);
+ void receivedKillCursors(Message& m) {
+ int *x = (int *) m.data->_data;
+ x++; // reserved
+ int n = *x++;
+ assert( n >= 1 );
+ if ( n > 2000 ) {
+ problem() << "Assertion failure, receivedKillCursors, n=" << n << endl;
+ assert( n < 30000 );
}
+ killCursors(n, (long long *) x);
}
- setClient(q.ns);
- QueryResult* msgdata;
+ void closeClient( const char *cl, const char *path ) {
+ /* reset haveLogged in local.dbinfo */
+ if ( string("local") != cl ) {
+ DBInfo i(cl);
+ i.dbDropped();
+ }
- try {
- msgdata = runQuery(m, q.ns, q.ntoskip, q.ntoreturn, q.query, q.fields, ss, q.queryOptions);
- }
- catch ( AssertionException& e ) {
- ss << " exception ";
- LOGSOME problem() << " Caught Assertion in runQuery ns:" << q.ns << ' ' << e.toString() << '\n';
- log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << '\n';
- if ( q.query.valid() )
- log() << " query:" << q.query.toString() << endl;
- else
- log() << " query object is not valid!" << endl;
-
- BSONObjBuilder err;
- err.append("$err", e.msg.empty() ? "assertion during query" : e.msg);
- BSONObj errObj = err.done();
-
- BufBuilder b;
- b.skip(sizeof(QueryResult));
- b.append((void*) errObj.objdata(), errObj.objsize());
-
- // todo: call replyToQuery() from here instead of this. needs a little tweaking
- // though to do that.
- msgdata = (QueryResult *) b.buf();
- b.decouple();
- QueryResult *qr = msgdata;
- qr->resultFlags() = QueryResult::ResultFlag_ErrSet;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->cursorId = 0;
- qr->startingFrom = 0;
- qr->nReturned = 1;
+ /* important: kill all open cursors on the database */
+ string prefix(cl);
+ prefix += '.';
+ ClientCursor::invalidate(prefix.c_str());
+ eraseDatabase( cl, path );
+ delete database; // closes files
+ database = 0;
}
- Message *resp = new Message();
- resp->setData(msgdata, true); // transport will free
- dbresponse.response = resp;
- dbresponse.responseTo = responseTo;
- if ( database ) {
- if ( database->profile )
- ss << " bytes:" << resp->data->dataLen();
- }
- else {
- if ( strstr(q.ns, "$cmd") == 0 ) // (this condition is normal for $cmd dropDatabase)
- log() << "ERROR: receiveQuery: database is null; ns=" << q.ns << endl;
- }
- // dbMsgPort.reply(m, resp, responseTo);
-}
-
-QueryResult* emptyMoreResult(long long);
-
-void receivedGetMore(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss) {
- DbMessage d(m);
- const char *ns = d.getns();
- ss << ns;
- setClient(ns);
- int ntoreturn = d.pullInt();
- long long cursorid = d.pullInt64();
- ss << " cid:" << cursorid;
- ss << " ntoreturn:" << ntoreturn;
- QueryResult* msgdata;
- try {
- msgdata = getMore(ns, ntoreturn, cursorid);
- }
- catch ( AssertionException& e ) {
- ss << " exception " + e.toString();
- msgdata = emptyMoreResult(cursorid);
+
+ void receivedUpdate(Message& m, stringstream& ss) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ assert(*ns);
+ setClient(ns);
+ //if( database->profile )
+ ss << ns << ' ';
+ int flags = d.pullInt();
+ BSONObj query = d.nextJsObj();
+
+ assert( d.moreJSObjs() );
+ assert( query.objsize() < m.data->dataLen() );
+ BSONObj toupdate = d.nextJsObj();
+
+ assert( toupdate.objsize() < m.data->dataLen() );
+ assert( query.objsize() + toupdate.objsize() < m.data->dataLen() );
+ updateObjects(ns, toupdate, query, flags & 1, ss);
}
- Message *resp = new Message();
- resp->setData(msgdata, true);
- ss << " bytes:" << resp->data->dataLen();
- ss << " nreturned:" << msgdata->nReturned;
- dbresponse.response = resp;
- dbresponse.responseTo = m.data->id;
- //dbMsgPort.reply(m, resp);
-}
-
-void receivedInsert(Message& m, stringstream& ss) {
- DbMessage d(m);
- while ( d.moreJSObjs() ) {
- BSONObj js = d.nextJsObj();
+
+ void receivedDelete(Message& m) {
+ DbMessage d(m);
const char *ns = d.getns();
assert(*ns);
setClient(ns);
- ss << ns;
+ int flags = d.pullInt();
+ bool justOne = flags & 1;
+ assert( d.moreJSObjs() );
+ BSONObj pattern = d.nextJsObj();
+ deleteObjects(ns, pattern, justOne);
+ logOp("d", ns, pattern, 0, &justOne);
+ }
+
+ void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss, bool logit) {
+ MSGID responseTo = m.data->id;
+
+ DbMessage d(m);
+ QueryMessage q(d);
- if ( objcheck && !js.valid() ) {
- problem() << "insert error ns: " << ns << '\n';
- uassert("insert: bad object from client", false);
+ if ( opLogging && logit ) {
+ if ( strstr(q.ns, ".$cmd") ) {
+ /* $cmd queries are "commands" and usually best treated as write operations */
+ OPWRITE;
+ }
+ else {
+ OPREAD;
+ }
}
- theDataFileMgr.insert(ns, (void*) js.objdata(), js.objsize());
- logOp("i", ns, js);
+ setClient(q.ns);
+ QueryResult* msgdata;
+
+ try {
+ msgdata = runQuery(m, q.ns, q.ntoskip, q.ntoreturn, q.query, q.fields, ss, q.queryOptions);
+ }
+ catch ( AssertionException& e ) {
+ ss << " exception ";
+ LOGSOME problem() << " Caught Assertion in runQuery ns:" << q.ns << ' ' << e.toString() << '\n';
+ log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << '\n';
+ if ( q.query.valid() )
+ log() << " query:" << q.query.toString() << endl;
+ else
+ log() << " query object is not valid!" << endl;
+
+ BSONObjBuilder err;
+ err.append("$err", e.msg.empty() ? "assertion during query" : e.msg);
+ BSONObj errObj = err.done();
+
+ BufBuilder b;
+ b.skip(sizeof(QueryResult));
+ b.append((void*) errObj.objdata(), errObj.objsize());
+
+ // todo: call replyToQuery() from here instead of this. needs a little tweaking
+ // though to do that.
+ msgdata = (QueryResult *) b.buf();
+ b.decouple();
+ QueryResult *qr = msgdata;
+ qr->resultFlags() = QueryResult::ResultFlag_ErrSet;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+
+ }
+ Message *resp = new Message();
+ resp->setData(msgdata, true); // transport will free
+ dbresponse.response = resp;
+ dbresponse.responseTo = responseTo;
+ if ( database ) {
+ if ( database->profile )
+ ss << " bytes:" << resp->data->dataLen();
+ }
+ else {
+ if ( strstr(q.ns, "$cmd") == 0 ) // (this condition is normal for $cmd dropDatabase)
+ log() << "ERROR: receiveQuery: database is null; ns=" << q.ns << endl;
+ }
+ // dbMsgPort.reply(m, resp, responseTo);
}
-}
-extern int callDepth;
+ QueryResult* emptyMoreResult(long long);
-class JniMessagingPort : public AbstractMessagingPort {
-public:
- JniMessagingPort(Message& _container) : container(_container) { }
- void reply(Message& received, Message& response, MSGID) {
- container = response;
+ void receivedGetMore(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ ss << ns;
+ setClient(ns);
+ int ntoreturn = d.pullInt();
+ long long cursorid = d.pullInt64();
+ ss << " cid:" << cursorid;
+ ss << " ntoreturn:" << ntoreturn;
+ QueryResult* msgdata;
+ try {
+ msgdata = getMore(ns, ntoreturn, cursorid);
+ }
+ catch ( AssertionException& e ) {
+ ss << " exception " + e.toString();
+ msgdata = emptyMoreResult(cursorid);
+ }
+ Message *resp = new Message();
+ resp->setData(msgdata, true);
+ ss << " bytes:" << resp->data->dataLen();
+ ss << " nreturned:" << msgdata->nReturned;
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.data->id;
+ //dbMsgPort.reply(m, resp);
}
- void reply(Message& received, Message& response) {
- container = response;
+
+ void receivedInsert(Message& m, stringstream& ss) {
+ DbMessage d(m);
+ while ( d.moreJSObjs() ) {
+ BSONObj js = d.nextJsObj();
+ const char *ns = d.getns();
+ assert(*ns);
+ setClient(ns);
+ ss << ns;
+
+ if ( objcheck && !js.valid() ) {
+ problem() << "insert error ns: " << ns << '\n';
+ uassert("insert: bad object from client", false);
+ }
+
+ theDataFileMgr.insert(ns, (void*) js.objdata(), js.objsize());
+ logOp("i", ns, js);
+ }
}
- Message & container;
-};
-/* a call from java/js to the database locally.
+ extern int callDepth;
- m - inbound message
- out - outbound message, if there is any, will be set here.
- if there is one, out.data will be non-null on return.
- The out.data buffer will automatically clean up when out
- goes out of scope (out.freeIt==true)
+ class JniMessagingPort : public AbstractMessagingPort {
+ public:
+ JniMessagingPort(Message& _container) : container(_container) { }
+ void reply(Message& received, Message& response, MSGID) {
+ container = response;
+ }
+ void reply(Message& received, Message& response) {
+ container = response;
+ }
+ Message & container;
+ };
- note we should already be in the mutex lock from connThread() at this point.
-*/
-void jniCallback(Message& m, Message& out)
-{
- Database *clientOld = database;
+ /* a call from java/js to the database locally.
- JniMessagingPort jmp(out);
- callDepth++;
- int curOpOld = curOp;
+ m - inbound message
+ out - outbound message, if there is any, will be set here.
+ if there is one, out.data will be non-null on return.
+ The out.data buffer will automatically clean up when out
+ goes out of scope (out.freeIt==true)
- try {
+ note we should already be in the mutex lock from connThread() at this point.
+ */
+ void jniCallback(Message& m, Message& out)
+ {
+ Database *clientOld = database;
- stringstream ss;
- char buf[64];
- time_t_to_String(time(0), buf);
- buf[20] = 0; // don't want the year
- ss << buf << " dbjs ";
-
- {
- Timer t;
-
- bool log = false;
- curOp = m.data->operation();
-
- if ( m.data->operation() == dbQuery ) {
- // on a query, the Message must have m.freeIt true so that the buffer data can be
- // retained by cursors. As freeIt is false, we make a copy here.
- assert( m.data->len > 0 && m.data->len < 32000000 );
- Message copy(malloc(m.data->len), true);
- memcpy(copy.data, m.data, m.data->len);
- DbResponse dbr;
- receivedQuery(dbr, copy, ss, false);
- jmp.reply(m, *dbr.response, dbr.responseTo);
- }
- else if ( m.data->operation() == dbInsert ) {
- ss << "insert ";
- receivedInsert(m, ss);
- }
- else if ( m.data->operation() == dbUpdate ) {
- ss << "update ";
- receivedUpdate(m, ss);
- }
- else if ( m.data->operation() == dbDelete ) {
- ss << "remove ";
- receivedDelete(m);
- }
- else if ( m.data->operation() == dbGetMore ) {
- DEV log = true;
- ss << "getmore ";
- DbResponse dbr;
- receivedGetMore(dbr, m, ss);
- jmp.reply(m, *dbr.response, dbr.responseTo);
- }
- else if ( m.data->operation() == dbKillCursors ) {
- try {
- log = true;
- ss << "killcursors ";
- receivedKillCursors(m);
+ JniMessagingPort jmp(out);
+ callDepth++;
+ int curOpOld = curOp;
+
+ try {
+
+ stringstream ss;
+ char buf[64];
+ time_t_to_String(time(0), buf);
+ buf[20] = 0; // don't want the year
+ ss << buf << " dbjs ";
+
+ {
+ Timer t;
+
+ bool log = false;
+ curOp = m.data->operation();
+
+ if ( m.data->operation() == dbQuery ) {
+ // on a query, the Message must have m.freeIt true so that the buffer data can be
+ // retained by cursors. As freeIt is false, we make a copy here.
+ assert( m.data->len > 0 && m.data->len < 32000000 );
+ Message copy(malloc(m.data->len), true);
+ memcpy(copy.data, m.data, m.data->len);
+ DbResponse dbr;
+ receivedQuery(dbr, copy, ss, false);
+ jmp.reply(m, *dbr.response, dbr.responseTo);
}
- catch ( AssertionException& ) {
- problem() << "Caught Assertion in kill cursors, continuing" << endl;
- ss << " exception ";
+ else if ( m.data->operation() == dbInsert ) {
+ ss << "insert ";
+ receivedInsert(m, ss);
+ }
+ else if ( m.data->operation() == dbUpdate ) {
+ ss << "update ";
+ receivedUpdate(m, ss);
+ }
+ else if ( m.data->operation() == dbDelete ) {
+ ss << "remove ";
+ receivedDelete(m);
+ }
+ else if ( m.data->operation() == dbGetMore ) {
+ DEV log = true;
+ ss << "getmore ";
+ DbResponse dbr;
+ receivedGetMore(dbr, m, ss);
+ jmp.reply(m, *dbr.response, dbr.responseTo);
+ }
+ else if ( m.data->operation() == dbKillCursors ) {
+ try {
+ log = true;
+ ss << "killcursors ";
+ receivedKillCursors(m);
+ }
+ catch ( AssertionException& ) {
+ problem() << "Caught Assertion in kill cursors, continuing" << endl;
+ ss << " exception ";
+ }
+ }
+ else {
+ mongo::out() << " jnicall: operation isn't supported: " << m.data->operation() << endl;
+ assert(false);
}
- }
- else {
- cout << " jnicall: operation isn't supported: " << m.data->operation() << endl;
- assert(false);
- }
- int ms = t.millis();
- log = log || ctr++ % 128 == 0;
- if ( log || ms > 100 ) {
- ss << ' ' << t.millis() << "ms";
- cout << ss.str().c_str() << endl;
- }
- if ( database && database->profile >= 1 ) {
- if ( database->profile >= 2 || ms >= 100 ) {
- // profile it
- profile(ss.str().c_str()+20/*skip ts*/, ms);
+ int ms = t.millis();
+ log = log || ctr++ % 128 == 0;
+ if ( log || ms > 100 ) {
+ ss << ' ' << t.millis() << "ms";
+ mongo::out() << ss.str().c_str() << endl;
+ }
+ if ( database && database->profile >= 1 ) {
+ if ( database->profile >= 2 || ms >= 100 ) {
+ // profile it
+ profile(ss.str().c_str()+20/*skip ts*/, ms);
+ }
}
}
+
+ }
+ catch ( AssertionException& ) {
+ problem() << "Caught AssertionException in jniCall()" << endl;
}
- }
- catch ( AssertionException& ) {
- problem() << "Caught AssertionException in jniCall()" << endl;
+ curOp = curOpOld;
+ callDepth--;
+
+ if ( database != clientOld ) {
+ database = clientOld;
+ wassert(false);
+ }
}
- curOp = curOpOld;
- callDepth--;
+ void getDatabaseNames( vector< string > &names ) {
+ boost::filesystem::path path( dbpath );
+ for ( boost::filesystem::directory_iterator i( path );
+ i != boost::filesystem::directory_iterator(); ++i ) {
+ string fileName = i->leaf();
+ if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
+ names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
+ }
+ }
- if ( database != clientOld ) {
- database = clientOld;
- wassert(false);
+ bool DBDirectClient::call( Message &toSend, Message &response, bool assertOk ) {
+ DbResponse dbResponse;
+ assembleResponse( toSend, dbResponse );
+ assert( dbResponse.response );
+ response = *dbResponse.response;
+ return true;
}
-}
-
-void getDatabaseNames( vector< string > &names ) {
- boost::filesystem::path path( dbpath );
- for ( boost::filesystem::directory_iterator i( path );
- i != boost::filesystem::directory_iterator(); ++i ) {
- string fileName = i->leaf();
- if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
- names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
+
+ void DBDirectClient::say( Message &toSend ) {
+ DbResponse dbResponse;
+ assembleResponse( toSend, dbResponse );
}
-}
-
-bool DBDirectClient::call( Message &toSend, Message &response, bool assertOk ) {
- DbResponse dbResponse;
- assembleResponse( toSend, dbResponse );
- assert( dbResponse.response );
- response = *dbResponse.response;
- return true;
-}
-
-void DBDirectClient::say( Message &toSend ) {
- DbResponse dbResponse;
- assembleResponse( toSend, dbResponse );
-}
-
-/* not using log() herein in case we are called from segvhandler and we were already locked */
+
+ /* not using log() herein in case we are called from segvhandler and we were already locked */
#undef exit
-void dbexit(int rc, const char *why) {
- cout << "dbexit: " << why << "; flushing op log and files" << endl;
- flushOpLog();
+ void dbexit(int rc, const char *why) {
+ out() << "dbexit: " << why << "; flushing op log and files" << endl;
+ flushOpLog();
- /* must do this before unmapping mem or you may get a seg fault */
- closeAllSockets();
+ /* must do this before unmapping mem or you may get a seg fault */
+ closeAllSockets();
- MemoryMappedFile::closeAllFiles();
- cout << "dbexit: really exiting now" << endl;
- exit(rc);
-}
+ MemoryMappedFile::closeAllFiles();
+ out() << "dbexit: really exiting now" << endl;
+ exit(rc);
+ }
} // namespace mongo
diff --git a/db/instance.h b/db/instance.h
index 52bba249637..71348620204 100644
--- a/db/instance.h
+++ b/db/instance.h
@@ -27,81 +27,81 @@ namespace mongo {
//
#define OPLOG if( 0 )
-int getOpLogging();
+ int getOpLogging();
#define OPWRITE if( getOpLogging() & 1 ) _oplog.write((char *) m.data, m.data->len);
#define OPREAD if( getOpLogging() & 2 ) _oplog.readop((char *) m.data, m.data->len);
-struct OpLog {
- ofstream *f;
- OpLog() : f(0) { }
- void init() {
- OPLOG {
- stringstream ss;
- ss << "oplog." << hex << time(0);
- string name = ss.str();
- f = new ofstream(name.c_str(), ios::out | ios::binary);
- if ( ! f->good() ) {
- problem() << "couldn't open log stream" << endl;
- throw 1717;
+ struct OpLog {
+ ofstream *f;
+ OpLog() : f(0) { }
+ void init() {
+ OPLOG {
+ stringstream ss;
+ ss << "oplog." << hex << time(0);
+ string name = ss.str();
+ f = new ofstream(name.c_str(), ios::out | ios::binary);
+ if ( ! f->good() ) {
+ problem() << "couldn't open log stream" << endl;
+ throw 1717;
+ }
}
}
- }
- void flush() {
- OPLOG f->flush();
- }
- void write(char *data,int len) {
- OPLOG f->write(data,len);
- }
- void readop(char *data, int len) {
- OPLOG {
- bool log = (getOpLogging() & 4) == 0;
- OCCASIONALLY log = true;
- if ( log )
- f->write(data,len);
+ void flush() {
+ OPLOG f->flush();
}
- }
-};
+ void write(char *data,int len) {
+ OPLOG f->write(data,len);
+ }
+ void readop(char *data, int len) {
+ OPLOG {
+ bool log = (getOpLogging() & 4) == 0;
+ OCCASIONALLY log = true;
+ if ( log )
+ f->write(data,len);
+ }
+ }
+ };
-/* we defer response until we unlock. don't want a blocked socket to
- keep things locked.
-*/
-struct DbResponse {
- Message *response;
- MSGID responseTo;
- DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt) {
- }
- DbResponse() {
- response = 0;
- }
- ~DbResponse() {
- delete response;
- }
-};
+ /* we defer response until we unlock. don't want a blocked socket to
+ keep things locked.
+ */
+ struct DbResponse {
+ Message *response;
+ MSGID responseTo;
+ DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt) {
+ }
+ DbResponse() {
+ response = 0;
+ }
+ ~DbResponse() {
+ delete response;
+ }
+ };
-bool assembleResponse( Message &m, DbResponse &dbresponse );
+ bool assembleResponse( Message &m, DbResponse &dbresponse );
-void receivedKillCursors(Message& m);
-void receivedUpdate(Message& m, stringstream& ss);
-void receivedDelete(Message& m);
-void receivedInsert(Message& m, stringstream& ss);
-void receivedGetMore(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss);
-void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss, bool logit);
-void getDatabaseNames( vector< string > &names );
+ void receivedKillCursors(Message& m);
+ void receivedUpdate(Message& m, stringstream& ss);
+ void receivedDelete(Message& m);
+ void receivedInsert(Message& m, stringstream& ss);
+ void receivedGetMore(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss);
+ void receivedQuery(DbResponse& dbresponse, /*AbstractMessagingPort& dbMsgPort, */Message& m, stringstream& ss, bool logit);
+ void getDatabaseNames( vector< string > &names );
// --- local client ---
-class DBDirectClient : public DBClientBase {
- virtual string toString() {
- return "DBDirectClient";
- }
- virtual bool call( Message &toSend, Message &response, bool assertOk=true );
- virtual void say( Message &toSend );
- virtual void sayPiggyBack( Message &toSend ) {
- // don't need to piggy back when connected locally
- return say( toSend );
- }
-};
+ class DBDirectClient : public DBClientBase {
+ virtual string toString() {
+ return "DBDirectClient";
+ }
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true );
+ virtual void say( Message &toSend );
+ virtual void sayPiggyBack( Message &toSend ) {
+ // don't need to piggy back when connected locally
+ return say( toSend );
+ }
+ };
} // namespace mongo
diff --git a/db/introspect.cpp b/db/introspect.cpp
index d78bb68e59c..998e46ca8fe 100644
--- a/db/introspect.cpp
+++ b/db/introspect.cpp
@@ -26,34 +26,34 @@
namespace mongo {
-typedef map<string,Cursor*> StringToCursor;
-StringToCursor *specialNamespaces;
-
-auto_ptr<Cursor> getSpecialCursor(const char *ns) {
- StringToCursor::iterator it = specialNamespaces->find(ns);
- return auto_ptr<Cursor>
- (it == specialNamespaces->end() ?
- 0 : it->second->clone());
-}
-
-void SingleResultObjCursor::reg(const char *as) {
- if ( specialNamespaces == 0 )
- specialNamespaces = new StringToCursor();
- if ( specialNamespaces->count(as) == 0 ) {
- (*specialNamespaces)[as] = this;
+ typedef map<string,Cursor*> StringToCursor;
+ StringToCursor *specialNamespaces;
+
+ auto_ptr<Cursor> getSpecialCursor(const char *ns) {
+ StringToCursor::iterator it = specialNamespaces->find(ns);
+ return auto_ptr<Cursor>
+ (it == specialNamespaces->end() ?
+ 0 : it->second->clone());
+ }
+
+ void SingleResultObjCursor::reg(const char *as) {
+ if ( specialNamespaces == 0 )
+ specialNamespaces = new StringToCursor();
+ if ( specialNamespaces->count(as) == 0 ) {
+ (*specialNamespaces)[as] = this;
+ }
+ }
+
+ void profile(const char *str,
+ int millis)
+ {
+ BSONObjBuilder b;
+ b.appendDate("ts", jsTime());
+ b.append("info", str);
+ b.append("millis", (double) millis);
+ BSONObj p = b.done();
+ theDataFileMgr.insert(database->profileName.c_str(),
+ p.objdata(), p.objsize(), true);
}
-}
-
-void profile(const char *str,
- int millis)
-{
- BSONObjBuilder b;
- b.appendDate("ts", jsTime());
- b.append("info", str);
- b.append("millis", (double) millis);
- BSONObj p = b.done();
- theDataFileMgr.insert(database->profileName.c_str(),
- p.objdata(), p.objsize(), true);
-}
} // namespace mongo
diff --git a/db/introspect.h b/db/introspect.h
index 4046243fe0e..a4d1a43bce0 100644
--- a/db/introspect.h
+++ b/db/introspect.h
@@ -25,53 +25,53 @@
namespace mongo {
-auto_ptr<Cursor> getSpecialCursor(const char *ns);
+ auto_ptr<Cursor> getSpecialCursor(const char *ns);
-class SingleResultObjCursor : public Cursor {
- int i;
-protected:
- BSONObjBuilder b;
- void reg(const char *as); /* register as a certain namespace */
-public:
- SingleResultObjCursor() {
- i = 0;
- }
- virtual bool ok() {
- return i == 0;
- }
- virtual Record* _current() {
- assert(false);
- return 0;
- }
- virtual DiskLoc currLoc() {
- assert(false);
- return DiskLoc();
- }
+ class SingleResultObjCursor : public Cursor {
+ int i;
+ protected:
+ BSONObjBuilder b;
+ void reg(const char *as); /* register as a certain namespace */
+ public:
+ SingleResultObjCursor() {
+ i = 0;
+ }
+ virtual bool ok() {
+ return i == 0;
+ }
+ virtual Record* _current() {
+ assert(false);
+ return 0;
+ }
+ virtual DiskLoc currLoc() {
+ assert(false);
+ return DiskLoc();
+ }
- virtual void fill() = 0;
+ virtual void fill() = 0;
- virtual BSONObj current() {
- assert(i == 0);
- fill();
- return b.done();
- }
+ virtual BSONObj current() {
+ assert(i == 0);
+ fill();
+ return b.done();
+ }
- virtual bool advance() {
- i++;
- return false;
- }
+ virtual bool advance() {
+ i++;
+ return false;
+ }
- virtual string toString() {
- return "SingleResultObjCursor";
- }
+ virtual string toString() {
+ return "SingleResultObjCursor";
+ }
-};
+ };
-/* --- profiling --------------------------------------------
- do when database->profile is set
-*/
+ /* --- profiling --------------------------------------------
+ do when database->profile is set
+ */
-void profile(const char *str,
- int millis);
+ void profile(const char *str,
+ int millis);
} // namespace mongo
diff --git a/db/javajs.cpp b/db/javajs.cpp
index 97194359db2..960484c10c4 100644
--- a/db/javajs.cpp
+++ b/db/javajs.cpp
@@ -49,678 +49,678 @@ using namespace std;
namespace mongo {
#if defined(_WIN32)
-/* [dm] this being undefined without us adding it here means there is
- no tss cleanup on windows for boost lib?
- we don't care for now esp on windows only
-
- the boost source says:
-
- This function's sole purpose is to cause a link error in cases where
- automatic tss cleanup is not implemented by Boost.Threads as a
- reminder that user code is responsible for calling the necessary
- functions at the appropriate times (and for implementing an a
- tss_cleanup_implemented() function to eliminate the linker's
- missing symbol error).
-
- If Boost.Threads later implements automatic tss cleanup in cases
- where it currently doesn't (which is the plan), the duplicate
- symbol error will warn the user that their custom solution is no
- longer needed and can be removed.
-*/
-extern "C" void tss_cleanup_implemented(void) {
- //cout << "tss_cleanup_implemented called" << endl;
-}
+ /* [dm] this being undefined without us adding it here means there is
+ no tss cleanup on windows for boost lib?
+ we don't care for now esp on windows only
+
+ the boost source says:
+
+ This function's sole purpose is to cause a link error in cases where
+ automatic tss cleanup is not implemented by Boost.Threads as a
+ reminder that user code is responsible for calling the necessary
+ functions at the appropriate times (and for implementing an a
+ tss_cleanup_implemented() function to eliminate the linker's
+ missing symbol error).
+
+ If Boost.Threads later implements automatic tss cleanup in cases
+ where it currently doesn't (which is the plan), the duplicate
+ symbol error will warn the user that their custom solution is no
+ longer needed and can be removed.
+ */
+ extern "C" void tss_cleanup_implemented(void) {
+ //out() << "tss_cleanup_implemented called" << endl;
+ }
#endif
-JavaJSImpl * JavaJS = 0;
+ JavaJSImpl * JavaJS = 0;
#if !defined(NOJNI)
-void myJNIClean( JNIEnv * env ) {
- JavaJS->detach( env );
-}
+ void myJNIClean( JNIEnv * env ) {
+ JavaJS->detach( env );
+ }
-JavaJSImpl::JavaJSImpl() {
- JavaJSImpl(null);
-}
+ JavaJSImpl::JavaJSImpl() {
+ JavaJSImpl(null);
+ }
#if defined(_WIN32)
-const char SYSTEM_COLON = ';';
+ const char SYSTEM_COLON = ';';
#else
-const char SYSTEM_COLON = ':';
+ const char SYSTEM_COLON = ':';
#endif
-void _addClassPath( const char * ed , stringstream & ss , const char * subdir ) {
- path includeDir(ed);
- includeDir /= subdir;
- directory_iterator end;
- try {
- directory_iterator i(includeDir);
- while ( i != end ) {
- path p = *i;
- ss << SYSTEM_COLON << p.string();
- i++;
+ void _addClassPath( const char * ed , stringstream & ss , const char * subdir ) {
+ path includeDir(ed);
+ includeDir /= subdir;
+ directory_iterator end;
+ try {
+ directory_iterator i(includeDir);
+ while ( i != end ) {
+ path p = *i;
+ ss << SYSTEM_COLON << p.string();
+ i++;
+ }
+ }
+ catch (...) {
+ problem() << "exception looking for ed class path includeDir: " << includeDir.string() << endl;
+ sleepsecs(3);
+ dbexit(116);
}
}
- catch (...) {
- problem() << "exception looking for ed class path includeDir: " << includeDir.string() << endl;
- sleepsecs(3);
- dbexit(116);
- }
-}
-JavaJSImpl::JavaJSImpl(const char *appserverPath) {
- _jvm = 0;
- _mainEnv = 0;
- _dbhook = 0;
+ JavaJSImpl::JavaJSImpl(const char *appserverPath) {
+ _jvm = 0;
+ _mainEnv = 0;
+ _dbhook = 0;
- stringstream ss;
- string edTemp;
+ stringstream ss;
+ string edTemp;
- const char * ed = 0;
- ss << "-Djava.class.path=.";
+ const char * ed = 0;
+ ss << "-Djava.class.path=.";
- if ( appserverPath ) {
- ed = findEd(appserverPath);
- assert( ed );
+ if ( appserverPath ) {
+ ed = findEd(appserverPath);
+ assert( ed );
- ss << SYSTEM_COLON << ed << "/build/";
+ ss << SYSTEM_COLON << ed << "/build/";
- _addClassPath( ed , ss , "include" );
- _addClassPath( ed , ss , "include/jython/" );
- _addClassPath( ed , ss , "include/jython/javalib" );
- }
- else {
+ _addClassPath( ed , ss , "include" );
+ _addClassPath( ed , ss , "include/jython/" );
+ _addClassPath( ed , ss , "include/jython/javalib" );
+ }
+ else {
- const char * jars = findJars();
- _addClassPath( jars , ss , "jars" );
+ const char * jars = findJars();
+ _addClassPath( jars , ss , "jars" );
- edTemp += (string)jars + "/jars/babble.jar";
- ed = edTemp.c_str();
+ edTemp += (string)jars + "/jars/babble.jar";
+ ed = edTemp.c_str();
#if !defined(_WIN32)
- const char * otherEd = findEd();
- if ( otherEd ) {
- log() << "found ed as well" << endl;
- ed = otherEd;
+ const char * otherEd = findEd();
+ if ( otherEd ) {
+ log() << "found ed as well" << endl;
+ ed = otherEd;
- ss << SYSTEM_COLON << ed << "/build/";
+ ss << SYSTEM_COLON << ed << "/build/";
- _addClassPath( ed , ss , "include" );
- _addClassPath( ed , ss , "include/jython/" );
- _addClassPath( ed , ss , "include/jython/javalib" );
- }
+ _addClassPath( ed , ss , "include" );
+ _addClassPath( ed , ss , "include/jython/" );
+ _addClassPath( ed , ss , "include/jython/javalib" );
+ }
#endif
- }
+ }
#if defined(_WIN32)
- ss << SYSTEM_COLON << "C:\\Program Files\\Java\\jdk\\lib\\tools.jar";
+ ss << SYSTEM_COLON << "C:\\Program Files\\Java\\jdk\\lib\\tools.jar";
#else
- ss << SYSTEM_COLON << "/opt/java/lib/tools.jar";
+ ss << SYSTEM_COLON << "/opt/java/lib/tools.jar";
#endif
- if ( getenv( "CLASSPATH" ) )
- ss << SYSTEM_COLON << getenv( "CLASSPATH" );
+ if ( getenv( "CLASSPATH" ) )
+ ss << SYSTEM_COLON << getenv( "CLASSPATH" );
- string s = ss.str();
- char * p = (char *)malloc( s.size() * 4 );
- strcpy( p , s.c_str() );
- char *q = p;
+ string s = ss.str();
+ char * p = (char *)malloc( s.size() * 4 );
+ strcpy( p , s.c_str() );
+ char *q = p;
#if defined(_WIN32)
- while ( *p ) {
- if ( *p == '/' ) *p = '\\';
- p++;
- }
+ while ( *p ) {
+ if ( *p == '/' ) *p = '\\';
+ p++;
+ }
#endif
- JavaVMOption * options = new JavaVMOption[3];
- options[0].optionString = q;
- options[1].optionString = (char*)"-Djava.awt.headless=true";
- options[2].optionString = (char*)"-Xmx300m";
+ JavaVMOption * options = new JavaVMOption[3];
+ options[0].optionString = q;
+ options[1].optionString = (char*)"-Djava.awt.headless=true";
+ options[2].optionString = (char*)"-Xmx300m";
// -Xcheck:jni
- _vmArgs = new JavaVMInitArgs();
- _vmArgs->version = JNI_VERSION_1_4;
- _vmArgs->options = options;
- _vmArgs->nOptions = 3;
- _vmArgs->ignoreUnrecognized = JNI_FALSE;
-
- log() << "loading JVM" << endl;
- jint res = JNI_CreateJavaVM( &_jvm, (void**)&_mainEnv, _vmArgs );
-
- if ( res ) {
- log() << "using classpath: " << q << endl;
- log()
- << " res : " << (unsigned) res << " "
- << "_jvm : " << _jvm << " "
- << "_env : " << _mainEnv << " "
- << endl;
- }
+ _vmArgs = new JavaVMInitArgs();
+ _vmArgs->version = JNI_VERSION_1_4;
+ _vmArgs->options = options;
+ _vmArgs->nOptions = 3;
+ _vmArgs->ignoreUnrecognized = JNI_FALSE;
+
+ log() << "loading JVM" << endl;
+ jint res = JNI_CreateJavaVM( &_jvm, (void**)&_mainEnv, _vmArgs );
+
+ if ( res ) {
+ log() << "using classpath: " << q << endl;
+ log()
+ << " res : " << (unsigned) res << " "
+ << "_jvm : " << _jvm << " "
+ << "_env : " << _mainEnv << " "
+ << endl;
+ }
- if ( res ) {
- problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
- log() << "(try --nojni if you do not require that functionality)" << endl;
- exit(22);
- }
- jassert( res == 0 );
- jassert( _jvm > 0 );
- jassert( _mainEnv > 0 );
-
- _envs = new boost::thread_specific_ptr<JNIEnv>( myJNIClean );
- assert( ! _envs->get() );
- _envs->reset( _mainEnv );
-
- _dbhook = findClass( "ed/db/JSHook" );
- if ( _dbhook == 0 ) {
- log() << "using classpath: " << q << endl;
- printException();
- }
- jassert( _dbhook );
+ if ( res ) {
+ problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
+ log() << "(try --nojni if you do not require that functionality)" << endl;
+ exit(22);
+ }
+ jassert( res == 0 );
+ jassert( _jvm > 0 );
+ jassert( _mainEnv > 0 );
+
+ _envs = new boost::thread_specific_ptr<JNIEnv>( myJNIClean );
+ assert( ! _envs->get() );
+ _envs->reset( _mainEnv );
+
+ _dbhook = findClass( "ed/db/JSHook" );
+ if ( _dbhook == 0 ) {
+ log() << "using classpath: " << q << endl;
+ printException();
+ }
+ jassert( _dbhook );
- if ( ed ) {
- jmethodID init = _mainEnv->GetStaticMethodID( _dbhook , "init" , "(Ljava/lang/String;)V" );
- jassert( init );
- _mainEnv->CallStaticVoidMethod( _dbhook , init , _getEnv()->NewStringUTF( ed ) );
- }
+ if ( ed ) {
+ jmethodID init = _mainEnv->GetStaticMethodID( _dbhook , "init" , "(Ljava/lang/String;)V" );
+ jassert( init );
+ _mainEnv->CallStaticVoidMethod( _dbhook , init , _getEnv()->NewStringUTF( ed ) );
+ }
- _dbjni = findClass( "ed/db/DBJni" );
- jassert( _dbjni );
-
- _scopeCreate = _mainEnv->GetStaticMethodID( _dbhook , "scopeCreate" , "()J" );
- _scopeInit = _mainEnv->GetStaticMethodID( _dbhook , "scopeInit" , "(JLjava/nio/ByteBuffer;)Z" );
- _scopeSetThis = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetThis" , "(JLjava/nio/ByteBuffer;)Z" );
- _scopeReset = _mainEnv->GetStaticMethodID( _dbhook , "scopeReset" , "(J)Z" );
- _scopeFree = _mainEnv->GetStaticMethodID( _dbhook , "scopeFree" , "(J)V" );
-
- _scopeGetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetNumber" , "(JLjava/lang/String;)D" );
- _scopeGetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetString" , "(JLjava/lang/String;)Ljava/lang/String;" );
- _scopeGetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetBoolean" , "(JLjava/lang/String;)Z" );
- _scopeGetType = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetType" , "(JLjava/lang/String;)B" );
- _scopeGetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)I" );
- _scopeGuessObjectSize = _mainEnv->GetStaticMethodID( _dbhook , "scopeGuessObjectSize" , "(JLjava/lang/String;)J" );
-
- _scopeSetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetNumber" , "(JLjava/lang/String;D)Z" );
- _scopeSetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetBoolean" , "(JLjava/lang/String;Z)Z" );
- _scopeSetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetString" , "(JLjava/lang/String;Ljava/lang/String;)Z" );
- _scopeSetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)Z" );
-
- _functionCreate = _mainEnv->GetStaticMethodID( _dbhook , "functionCreate" , "(Ljava/lang/String;)J" );
- _invoke = _mainEnv->GetStaticMethodID( _dbhook , "invoke" , "(JJ)I" );
-
- jassert( _scopeCreate );
- jassert( _scopeInit );
- jassert( _scopeSetThis );
- jassert( _scopeReset );
- jassert( _scopeFree );
-
- jassert( _scopeGetNumber );
- jassert( _scopeGetString );
- jassert( _scopeGetObject );
- jassert( _scopeGetBoolean );
- jassert( _scopeGetType );
- jassert( _scopeGuessObjectSize );
-
- jassert( _scopeSetNumber );
- jassert( _scopeSetBoolean );
- jassert( _scopeSetString );
- jassert( _scopeSetObject );
-
- jassert( _functionCreate );
- jassert( _invoke );
-
- JNINativeMethod * nativeSay = new JNINativeMethod();
- nativeSay->name = (char*)"native_say";
- nativeSay->signature = (char*)"(Ljava/nio/ByteBuffer;)V";
- nativeSay->fnPtr = (void*)java_native_say;
- _mainEnv->RegisterNatives( _dbjni , nativeSay , 1 );
-
-
- JNINativeMethod * nativeCall = new JNINativeMethod();
- nativeCall->name = (char*)"native_call";
- nativeCall->signature = (char*)"(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)I";
- nativeCall->fnPtr = (void*)java_native_call;
- _mainEnv->RegisterNatives( _dbjni , nativeCall , 1 );
+ _dbjni = findClass( "ed/db/DBJni" );
+ jassert( _dbjni );
+
+ _scopeCreate = _mainEnv->GetStaticMethodID( _dbhook , "scopeCreate" , "()J" );
+ _scopeInit = _mainEnv->GetStaticMethodID( _dbhook , "scopeInit" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeSetThis = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetThis" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeReset = _mainEnv->GetStaticMethodID( _dbhook , "scopeReset" , "(J)Z" );
+ _scopeFree = _mainEnv->GetStaticMethodID( _dbhook , "scopeFree" , "(J)V" );
+
+ _scopeGetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetNumber" , "(JLjava/lang/String;)D" );
+ _scopeGetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetString" , "(JLjava/lang/String;)Ljava/lang/String;" );
+ _scopeGetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetBoolean" , "(JLjava/lang/String;)Z" );
+ _scopeGetType = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetType" , "(JLjava/lang/String;)B" );
+ _scopeGetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)I" );
+ _scopeGuessObjectSize = _mainEnv->GetStaticMethodID( _dbhook , "scopeGuessObjectSize" , "(JLjava/lang/String;)J" );
+
+ _scopeSetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetNumber" , "(JLjava/lang/String;D)Z" );
+ _scopeSetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetBoolean" , "(JLjava/lang/String;Z)Z" );
+ _scopeSetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetString" , "(JLjava/lang/String;Ljava/lang/String;)Z" );
+ _scopeSetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)Z" );
+
+ _functionCreate = _mainEnv->GetStaticMethodID( _dbhook , "functionCreate" , "(Ljava/lang/String;)J" );
+ _invoke = _mainEnv->GetStaticMethodID( _dbhook , "invoke" , "(JJ)I" );
+
+ jassert( _scopeCreate );
+ jassert( _scopeInit );
+ jassert( _scopeSetThis );
+ jassert( _scopeReset );
+ jassert( _scopeFree );
+
+ jassert( _scopeGetNumber );
+ jassert( _scopeGetString );
+ jassert( _scopeGetObject );
+ jassert( _scopeGetBoolean );
+ jassert( _scopeGetType );
+ jassert( _scopeGuessObjectSize );
+
+ jassert( _scopeSetNumber );
+ jassert( _scopeSetBoolean );
+ jassert( _scopeSetString );
+ jassert( _scopeSetObject );
+
+ jassert( _functionCreate );
+ jassert( _invoke );
+
+ JNINativeMethod * nativeSay = new JNINativeMethod();
+ nativeSay->name = (char*)"native_say";
+ nativeSay->signature = (char*)"(Ljava/nio/ByteBuffer;)V";
+ nativeSay->fnPtr = (void*)java_native_say;
+ _mainEnv->RegisterNatives( _dbjni , nativeSay , 1 );
+
+
+ JNINativeMethod * nativeCall = new JNINativeMethod();
+ nativeCall->name = (char*)"native_call";
+ nativeCall->signature = (char*)"(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)I";
+ nativeCall->fnPtr = (void*)java_native_call;
+ _mainEnv->RegisterNatives( _dbjni , nativeCall , 1 );
-}
+ }
-JavaJSImpl::~JavaJSImpl() {
- if ( _jvm ) {
- _jvm->DestroyJavaVM();
- cerr << "Destroying JVM" << endl;
+ JavaJSImpl::~JavaJSImpl() {
+ if ( _jvm ) {
+ _jvm->DestroyJavaVM();
+ cerr << "Destroying JVM" << endl;
+ }
}
-}
// scope
-jlong JavaJSImpl::scopeCreate() {
- return _getEnv()->CallStaticLongMethod( _dbhook , _scopeCreate );
-}
+ jlong JavaJSImpl::scopeCreate() {
+ return _getEnv()->CallStaticLongMethod( _dbhook , _scopeCreate );
+ }
-jboolean JavaJSImpl::scopeReset( jlong id ) {
- return _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeReset );
-}
+ jboolean JavaJSImpl::scopeReset( jlong id ) {
+ return _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeReset );
+ }
-void JavaJSImpl::scopeFree( jlong id ) {
- _getEnv()->CallStaticVoidMethod( _dbhook , _scopeFree , id );
-}
+ void JavaJSImpl::scopeFree( jlong id ) {
+ _getEnv()->CallStaticVoidMethod( _dbhook , _scopeFree , id );
+ }
// scope setters
-int JavaJSImpl::scopeSetBoolean( jlong id , const char * field , jboolean val ) {
- jstring fieldString = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
- _getEnv()->DeleteLocalRef( fieldString );
- return res;
-}
-
-int JavaJSImpl::scopeSetNumber( jlong id , const char * field , double val ) {
- jstring fieldString = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
- _getEnv()->DeleteLocalRef( fieldString );
- return res;
-}
+ int JavaJSImpl::scopeSetBoolean( jlong id , const char * field , jboolean val ) {
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
+ }
-int JavaJSImpl::scopeSetString( jlong id , const char * field , const char * val ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- jstring s2 = _getEnv()->NewStringUTF( val );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetString , id , s1 , s2 );
- _getEnv()->DeleteLocalRef( s1 );
- _getEnv()->DeleteLocalRef( s2 );
- return res;
-}
+ int JavaJSImpl::scopeSetNumber( jlong id , const char * field , double val ) {
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
+ }
-int JavaJSImpl::scopeSetObject( jlong id , const char * field , BSONObj * obj ) {
- jobject bb = 0;
- if ( obj ) {
- bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
+ int JavaJSImpl::scopeSetString( jlong id , const char * field , const char * val ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s2 = _getEnv()->NewStringUTF( val );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetString , id , s1 , s2 );
+ _getEnv()->DeleteLocalRef( s1 );
+ _getEnv()->DeleteLocalRef( s2 );
+ return res;
}
- jstring s1 = _getEnv()->NewStringUTF( field );
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetObject , id , s1 , bb );
- _getEnv()->DeleteLocalRef( s1 );
- if ( bb )
- _getEnv()->DeleteLocalRef( bb );
+ int JavaJSImpl::scopeSetObject( jlong id , const char * field , BSONObj * obj ) {
+ jobject bb = 0;
+ if ( obj ) {
+ bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+ }
- return res;
-}
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetObject , id , s1 , bb );
+ _getEnv()->DeleteLocalRef( s1 );
+ if ( bb )
+ _getEnv()->DeleteLocalRef( bb );
-int JavaJSImpl::scopeInit( jlong id , BSONObj * obj ) {
- if ( ! obj )
- return 0;
+ return res;
+ }
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
+ int JavaJSImpl::scopeInit( jlong id , BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeInit , id , bb );
- _getEnv()->DeleteLocalRef( bb );
- return res;
-}
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
-int JavaJSImpl::scopeSetThis( jlong id , BSONObj * obj ) {
- if ( ! obj )
- return 0;
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeInit , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
+ }
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
- jassert( bb );
+ int JavaJSImpl::scopeSetThis( jlong id , BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
- int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetThis , id , bb );
- _getEnv()->DeleteLocalRef( bb );
- return res;
-}
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetThis , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
+ }
// scope getters
-char JavaJSImpl::scopeGetType( jlong id , const char * field ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- int res =_getEnv()->CallStaticByteMethod( _dbhook , _scopeGetType , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
-}
+ char JavaJSImpl::scopeGetType( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res =_getEnv()->CallStaticByteMethod( _dbhook , _scopeGetType , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
-double JavaJSImpl::scopeGetNumber( jlong id , const char * field ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- double res = _getEnv()->CallStaticDoubleMethod( _dbhook , _scopeGetNumber , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
-}
+ double JavaJSImpl::scopeGetNumber( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ double res = _getEnv()->CallStaticDoubleMethod( _dbhook , _scopeGetNumber , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
-jboolean JavaJSImpl::scopeGetBoolean( jlong id , const char * field ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- jboolean res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeGetBoolean , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
- return res;
-}
+ jboolean JavaJSImpl::scopeGetBoolean( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jboolean res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeGetBoolean , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
-string JavaJSImpl::scopeGetString( jlong id , const char * field ) {
- jstring s1 = _getEnv()->NewStringUTF( field );
- jstring s = (jstring)_getEnv()->CallStaticObjectMethod( _dbhook , _scopeGetString , id , s1 );
- _getEnv()->DeleteLocalRef( s1 );
+ string JavaJSImpl::scopeGetString( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s = (jstring)_getEnv()->CallStaticObjectMethod( _dbhook , _scopeGetString , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
- if ( ! s )
- return "";
+ if ( ! s )
+ return "";
- const char * c = _getEnv()->GetStringUTFChars( s , 0 );
- string retStr(c);
- _getEnv()->ReleaseStringUTFChars( s , c );
- return retStr;
-}
+ const char * c = _getEnv()->GetStringUTFChars( s , 0 );
+ string retStr(c);
+ _getEnv()->ReleaseStringUTFChars( s , c );
+ return retStr;
+ }
#ifdef J_USE_OBJ
-BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field )
-{
- jstring s1 = _getEnv()->NewStringUTF( field );
- int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
- _getEnv()->DeleteLocalRef( s1 );
-
- char * buf = (char *) malloc(guess);
- jobject bb = _getEnv()->NewDirectByteBuffer( (void*)buf , guess );
- jassert( bb );
-
- int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
- _getEnv()->DeleteLocalRef( bb );
- //cout << "len : " << len << endl;
- jassert( len > 0 && len < guess );
-
- BSONObj obj(buf, true);
- assert( obj.objsize() <= guess );
- return obj;
-}
+ BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field )
+ {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
+ _getEnv()->DeleteLocalRef( s1 );
+
+ char * buf = (char *) malloc(guess);
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)buf , guess );
+ jassert( bb );
+
+ int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ //out() << "len : " << len << endl;
+ jassert( len > 0 && len < guess );
+
+ BSONObj obj(buf, true);
+ assert( obj.objsize() <= guess );
+ return obj;
+ }
#endif
// other
-jlong JavaJSImpl::functionCreate( const char * code ) {
- jstring s = _getEnv()->NewStringUTF( code );
- jassert( s );
- jlong id = _getEnv()->CallStaticLongMethod( _dbhook , _functionCreate , s );
- _getEnv()->DeleteLocalRef( s );
- return id;
-}
+ jlong JavaJSImpl::functionCreate( const char * code ) {
+ jstring s = _getEnv()->NewStringUTF( code );
+ jassert( s );
+ jlong id = _getEnv()->CallStaticLongMethod( _dbhook , _functionCreate , s );
+ _getEnv()->DeleteLocalRef( s );
+ return id;
+ }
-int JavaJSImpl::invoke( jlong scope , jlong function ) {
- return _getEnv()->CallStaticIntMethod( _dbhook , _invoke , scope , function );
-}
+ int JavaJSImpl::invoke( jlong scope , jlong function ) {
+ return _getEnv()->CallStaticIntMethod( _dbhook , _invoke , scope , function );
+ }
// --- fun run method
-void JavaJSImpl::run( const char * js ) {
- jclass c = findClass( "ed/js/JS" );
- jassert( c );
+ void JavaJSImpl::run( const char * js ) {
+ jclass c = findClass( "ed/js/JS" );
+ jassert( c );
- jmethodID m = _getEnv()->GetStaticMethodID( c , "eval" , "(Ljava/lang/String;)Ljava/lang/Object;" );
- jassert( m );
+ jmethodID m = _getEnv()->GetStaticMethodID( c , "eval" , "(Ljava/lang/String;)Ljava/lang/Object;" );
+ jassert( m );
- jstring s = _getEnv()->NewStringUTF( js );
- log() << _getEnv()->CallStaticObjectMethod( c , m , s ) << endl;
- _getEnv()->DeleteLocalRef( s );
-}
+ jstring s = _getEnv()->NewStringUTF( js );
+ log() << _getEnv()->CallStaticObjectMethod( c , m , s ) << endl;
+ _getEnv()->DeleteLocalRef( s );
+ }
+
+ void JavaJSImpl::printException() {
+ jthrowable exc = _getEnv()->ExceptionOccurred();
+ if ( exc ) {
+ _getEnv()->ExceptionDescribe();
+ _getEnv()->ExceptionClear();
+ }
-void JavaJSImpl::printException() {
- jthrowable exc = _getEnv()->ExceptionOccurred();
- if ( exc ) {
- _getEnv()->ExceptionDescribe();
- _getEnv()->ExceptionClear();
}
-}
+ JNIEnv * JavaJSImpl::_getEnv() {
+ JNIEnv * env = _envs->get();
+ if ( env )
+ return env;
-JNIEnv * JavaJSImpl::_getEnv() {
- JNIEnv * env = _envs->get();
- if ( env )
- return env;
+ int res = _jvm->AttachCurrentThread( (void**)&env , (void*)&_vmArgs );
+ if ( res ) {
+ out() << "ERROR javajs attachcurrentthread fails res:" << res << '\n';
+ assert(false);
+ }
- int res = _jvm->AttachCurrentThread( (void**)&env , (void*)&_vmArgs );
- if ( res ) {
- cout << "ERROR javajs attachcurrentthread fails res:" << res << '\n';
- assert(false);
+ _envs->reset( env );
+ return env;
}
- _envs->reset( env );
- return env;
-}
-
-void jasserted(const char *msg, const char *file, unsigned line) {
- log() << "jassert failed " << msg << " " << file << " " << line << endl;
- if ( JavaJS ) JavaJS->printException();
- throw AssertionException();
-}
+ void jasserted(const char *msg, const char *file, unsigned line) {
+ log() << "jassert failed " << msg << " " << file << " " << line << endl;
+ if ( JavaJS ) JavaJS->printException();
+ throw AssertionException();
+ }
-const char* findEd(const char *path) {
+ const char* findEd(const char *path) {
#if defined(_WIN32)
- if (!path) {
- path = findEd();
- }
+ if (!path) {
+ path = findEd();
+ }
- // @TODO check validity
+ // @TODO check validity
- return path;
+ return path;
#else
- if (!path) {
- return findEd();
- }
+ if (!path) {
+ return findEd();
+ }
- log() << "Appserver location specified : " << path << endl;
+ log() << "Appserver location specified : " << path << endl;
- if (!path) {
- log() << " invalid appserver location : " << path << " : terminating - prepare for bus error" << endl;
- return 0;
- }
+ if (!path) {
+ log() << " invalid appserver location : " << path << " : terminating - prepare for bus error" << endl;
+ return 0;
+ }
- DIR *testDir = opendir(path);
+ DIR *testDir = opendir(path);
- if (testDir) {
- log() << " found directory for appserver : " << path << endl;
- closedir(testDir);
- return path;
- }
- else {
- log() << " ERROR : not a directory for specified appserver location : " << path << " - prepare for bus error" << endl;
- return null;
- }
+ if (testDir) {
+ log() << " found directory for appserver : " << path << endl;
+ closedir(testDir);
+ return path;
+ }
+ else {
+ log() << " ERROR : not a directory for specified appserver location : " << path << " - prepare for bus error" << endl;
+ return null;
+ }
#endif
-}
+ }
-const char * findEd() {
+ const char * findEd() {
#if defined(_WIN32)
- log() << "Appserver location will be WIN32 default : c:/l/ed/" << endl;
- return "c:/l/ed";
+ log() << "Appserver location will be WIN32 default : c:/l/ed/" << endl;
+ return "c:/l/ed";
#else
- static list<const char*> possibleEdDirs;
- if ( ! possibleEdDirs.size() ) {
- possibleEdDirs.push_back( "../../ed/ed/" ); // this one for dwight dev box
- possibleEdDirs.push_back( "../ed/" );
- possibleEdDirs.push_back( "../../ed/" );
- possibleEdDirs.push_back( "../babble/" );
- possibleEdDirs.push_back( "../../babble/" );
- }
+ static list<const char*> possibleEdDirs;
+ if ( ! possibleEdDirs.size() ) {
+ possibleEdDirs.push_back( "../../ed/ed/" ); // this one for dwight dev box
+ possibleEdDirs.push_back( "../ed/" );
+ possibleEdDirs.push_back( "../../ed/" );
+ possibleEdDirs.push_back( "../babble/" );
+ possibleEdDirs.push_back( "../../babble/" );
+ }
- for ( list<const char*>::iterator i = possibleEdDirs.begin() ; i != possibleEdDirs.end(); i++ ) {
- const char * temp = *i;
- DIR * test = opendir( temp );
- if ( ! test )
- continue;
+ for ( list<const char*>::iterator i = possibleEdDirs.begin() ; i != possibleEdDirs.end(); i++ ) {
+ const char * temp = *i;
+ DIR * test = opendir( temp );
+ if ( ! test )
+ continue;
- closedir( test );
- log() << "found directory for appserver : " << temp << endl;
- return temp;
- }
+ closedir( test );
+ log() << "found directory for appserver : " << temp << endl;
+ return temp;
+ }
- return 0;
+ return 0;
#endif
-};
+ };
-const char * findJars() {
+ const char * findJars() {
- static list<const char*> possible;
- if ( ! possible.size() ) {
- possible.push_back( "./" );
- possible.push_back( "../" );
- }
+ static list<const char*> possible;
+ if ( ! possible.size() ) {
+ possible.push_back( "./" );
+ possible.push_back( "../" );
+ }
- for ( list<const char*>::iterator i = possible.begin() ; i != possible.end(); i++ ) {
- const char * temp = *i;
- const string jarDir = ((string)temp) + "jars/";
+ for ( list<const char*>::iterator i = possible.begin() ; i != possible.end(); i++ ) {
+ const char * temp = *i;
+ const string jarDir = ((string)temp) + "jars/";
- path p(jarDir );
- if ( ! boost::filesystem::exists( p) )
- continue;
+ path p(jarDir );
+ if ( ! boost::filesystem::exists( p) )
+ continue;
- log() << "found directory for jars : " << jarDir << endl;
- return temp;
- }
+ log() << "found directory for jars : " << jarDir << endl;
+ return temp;
+ }
- problem() << "ERROR : can't find directory for jars - terminating" << endl;
- exit(44);
- return 0;
+ problem() << "ERROR : can't find directory for jars - terminating" << endl;
+ exit(44);
+ return 0;
-};
+ };
// ---
-JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ) {
- JNI_DEBUG( "native say called!" );
+ JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ) {
+ JNI_DEBUG( "native say called!" );
- Message out( env->GetDirectBufferAddress( outBuffer ) , false );
- Message in;
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
- jniCallback( out , in );
- assert( ! out.doIFreeIt() );
- curNs = 0;
-}
+ jniCallback( out , in );
+ assert( ! out.doIFreeIt() );
+ curNs = 0;
+ }
-JNIEXPORT jint JNICALL java_native_call(JNIEnv * env , jclass, jobject outBuffer , jobject inBuffer ) {
- JNI_DEBUG( "native call called!" );
+ JNIEXPORT jint JNICALL java_native_call(JNIEnv * env , jclass, jobject outBuffer , jobject inBuffer ) {
+ JNI_DEBUG( "native call called!" );
- Message out( env->GetDirectBufferAddress( outBuffer ) , false );
- Message in;
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
- jniCallback( out , in );
- curNs = 0;
+ jniCallback( out , in );
+ curNs = 0;
- JNI_DEBUG( "in.data : " << in.data );
- if ( in.data && in.data->len > 0 ) {
- JNI_DEBUG( "copying data of len :" << in.data->len );
- assert( env->GetDirectBufferCapacity( inBuffer ) >= in.data->len );
- memcpy( env->GetDirectBufferAddress( inBuffer ) , in.data , in.data->len );
+ JNI_DEBUG( "in.data : " << in.data );
+ if ( in.data && in.data->len > 0 ) {
+ JNI_DEBUG( "copying data of len :" << in.data->len );
+ assert( env->GetDirectBufferCapacity( inBuffer ) >= in.data->len );
+ memcpy( env->GetDirectBufferAddress( inBuffer ) , in.data , in.data->len );
- assert( ! out.doIFreeIt() );
- assert( in.doIFreeIt() );
- return in.data->len;
- }
+ assert( ! out.doIFreeIt() );
+ assert( in.doIFreeIt() );
+ return in.data->len;
+ }
- return 0;
-}
+ return 0;
+ }
// ----
-int javajstest() {
+ int javajstest() {
- const int debug = 0;
+ const int debug = 0;
- JavaJSImpl& JavaJS = *mongo::JavaJS;
+ JavaJSImpl& JavaJS = *mongo::JavaJS;
- if ( debug ) log() << "about to create scope" << endl;
- jlong scope = JavaJS.scopeCreate();
- jassert( scope );
- if ( debug ) cout << "got scope" << endl;
+ if ( debug ) log() << "about to create scope" << endl;
+ jlong scope = JavaJS.scopeCreate();
+ jassert( scope );
+ if ( debug ) out() << "got scope" << endl;
- jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
- jassert( ! JavaJS.invoke( scope , func1 ) );
+ jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
+ jassert( ! JavaJS.invoke( scope , func1 ) );
- jassert( 5.6 == JavaJS.scopeGetNumber( scope , "foo" ) );
- jassert( ((string)"eliot") == JavaJS.scopeGetString( scope , "bar" ) );
+ jassert( 5.6 == JavaJS.scopeGetNumber( scope , "foo" ) );
+ jassert( ((string)"eliot") == JavaJS.scopeGetString( scope , "bar" ) );
- if ( debug ) cout << "func2 start" << endl;
- jassert( JavaJS.scopeSetNumber( scope , "a" , 5.17 ) );
- jassert( JavaJS.scopeSetString( scope , "b" , "eliot" ) );
- jlong func2 = JavaJS.functionCreate( "assert( 5.17 == a ); assert( \"eliot\" == b );" );
- jassert( ! JavaJS.invoke( scope , func2 ) );
- if ( debug ) cout << "func2 end" << endl;
+ if ( debug ) out() << "func2 start" << endl;
+ jassert( JavaJS.scopeSetNumber( scope , "a" , 5.17 ) );
+ jassert( JavaJS.scopeSetString( scope , "b" , "eliot" ) );
+ jlong func2 = JavaJS.functionCreate( "assert( 5.17 == a ); assert( \"eliot\" == b );" );
+ jassert( ! JavaJS.invoke( scope , func2 ) );
+ if ( debug ) out() << "func2 end" << endl;
- if ( debug ) cout << "func3 start" << endl;
- jlong func3 = JavaJS.functionCreate( "function(){ z = true; } " );
- jassert( func3 );
- jassert( ! JavaJS.invoke( scope , func3 ) );
- jassert( JavaJS.scopeGetBoolean( scope , "z" ) );
- if ( debug ) cout << "func3 done" << endl;
+ if ( debug ) out() << "func3 start" << endl;
+ jlong func3 = JavaJS.functionCreate( "function(){ z = true; } " );
+ jassert( func3 );
+ jassert( ! JavaJS.invoke( scope , func3 ) );
+ jassert( JavaJS.scopeGetBoolean( scope , "z" ) );
+ if ( debug ) out() << "func3 done" << endl;
#ifdef J_USE_OBJ
- if ( debug ) cout << "going to get object" << endl;
- BSONObj obj = JavaJS.scopeGetObject( scope , "abc" );
- if ( debug ) cout << "done getting object" << endl;
-
- if ( debug ) {
- cout << "obj : " << obj.toString() << endl;
- }
+ if ( debug ) out() << "going to get object" << endl;
+ BSONObj obj = JavaJS.scopeGetObject( scope , "abc" );
+ if ( debug ) out() << "done getting object" << endl;
- {
- time_t start = time(0);
- for ( int i=0; i<5000; i++ ) {
- JavaJS.scopeSetObject( scope , "obj" , &obj );
+ if ( debug ) {
+ out() << "obj : " << obj.toString() << endl;
}
- time_t end = time(0);
- if ( debug )
- cout << "time : " << (unsigned) ( end - start ) << endl;
- }
+ {
+ time_t start = time(0);
+ for ( int i=0; i<5000; i++ ) {
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ }
+ time_t end = time(0);
+
+ if ( debug )
+ out() << "time : " << (unsigned) ( end - start ) << endl;
+ }
- if ( debug ) cout << "func4 start" << endl;
- JavaJS.scopeSetObject( scope , "obj" , &obj );
- if ( debug ) cout << "\t here 1" << endl;
- jlong func4 = JavaJS.functionCreate( "tojson( obj );" );
- if ( debug ) cout << "\t here 2" << endl;
- jassert( ! JavaJS.invoke( scope , func4 ) );
- if ( debug ) cout << "func4 end" << endl;
-
- if ( debug ) cout << "func5 start" << endl;
- jassert( JavaJS.scopeSetObject( scope , "c" , &obj ) );
- jlong func5 = JavaJS.functionCreate( "assert.eq( 517 , c.foo );" );
- jassert( func5 );
- jassert( ! JavaJS.invoke( scope , func5 ) );
- if ( debug ) cout << "func5 done" << endl;
+ if ( debug ) out() << "func4 start" << endl;
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ if ( debug ) out() << "\t here 1" << endl;
+ jlong func4 = JavaJS.functionCreate( "tojson( obj );" );
+ if ( debug ) out() << "\t here 2" << endl;
+ jassert( ! JavaJS.invoke( scope , func4 ) );
+ if ( debug ) out() << "func4 end" << endl;
+
+ if ( debug ) out() << "func5 start" << endl;
+ jassert( JavaJS.scopeSetObject( scope , "c" , &obj ) );
+ jlong func5 = JavaJS.functionCreate( "assert.eq( 517 , c.foo );" );
+ jassert( func5 );
+ jassert( ! JavaJS.invoke( scope , func5 ) );
+ if ( debug ) out() << "func5 done" << endl;
#endif
- if ( debug ) cout << "func6 start" << endl;
- for ( int i=0; i<100; i++ ) {
- double val = i + 5;
- JavaJS.scopeSetNumber( scope , "zzz" , val );
- jlong func6 = JavaJS.functionCreate( " xxx = zzz; " );
- jassert( ! JavaJS.invoke( scope , func6 ) );
- double n = JavaJS.scopeGetNumber( scope , "xxx" );
- jassert( val == n );
- }
- if ( debug ) cout << "func6 done" << endl;
+ if ( debug ) out() << "func6 start" << endl;
+ for ( int i=0; i<100; i++ ) {
+ double val = i + 5;
+ JavaJS.scopeSetNumber( scope , "zzz" , val );
+ jlong func6 = JavaJS.functionCreate( " xxx = zzz; " );
+ jassert( ! JavaJS.invoke( scope , func6 ) );
+ double n = JavaJS.scopeGetNumber( scope , "xxx" );
+ jassert( val == n );
+ }
+ if ( debug ) out() << "func6 done" << endl;
- jlong func7 = JavaJS.functionCreate( "return 11;" );
- jassert( ! JavaJS.invoke( scope , func7 ) );
- assert( 11 == JavaJS.scopeGetNumber( scope , "return" ) );
+ jlong func7 = JavaJS.functionCreate( "return 11;" );
+ jassert( ! JavaJS.invoke( scope , func7 ) );
+ assert( 11 == JavaJS.scopeGetNumber( scope , "return" ) );
- scope = JavaJS.scopeCreate();
- jlong func8 = JavaJS.functionCreate( "function(){ return 12; }" );
- jassert( ! JavaJS.invoke( scope , func8 ) );
- assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
+ scope = JavaJS.scopeCreate();
+ jlong func8 = JavaJS.functionCreate( "function(){ return 12; }" );
+ jassert( ! JavaJS.invoke( scope , func8 ) );
+ assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
- return 0;
+ return 0;
-}
+ }
} // namespace mongo
diff --git a/db/javajs.h b/db/javajs.h
index 6f36475ecbb..b5a0ab95feb 100644
--- a/db/javajs.h
+++ b/db/javajs.h
@@ -37,172 +37,172 @@
namespace mongo {
-void jasserted(const char *msg, const char *file, unsigned line);
+ void jasserted(const char *msg, const char *file, unsigned line);
#define jassert(_Expression) if ( ! ( _Expression ) ){ jasserted(#_Expression, __FILE__, __LINE__); }
-int javajstest();
+ int javajstest();
-const char * findEd();
-const char * findEd(const char *);
-const char * findJars();
+ const char * findEd();
+ const char * findEd(const char *);
+ const char * findJars();
-class BSONObj;
+ class BSONObj;
-class JavaJSImpl {
-public:
- JavaJSImpl();
- JavaJSImpl(const char *);
- ~JavaJSImpl();
+ class JavaJSImpl {
+ public:
+ JavaJSImpl();
+ JavaJSImpl(const char *);
+ ~JavaJSImpl();
- jlong scopeCreate();
- int scopeInit( jlong id , BSONObj * obj );
- int scopeSetThis( jlong id , BSONObj * obj );
- jboolean scopeReset( jlong id );
- void scopeFree( jlong id );
+ jlong scopeCreate();
+ int scopeInit( jlong id , BSONObj * obj );
+ int scopeSetThis( jlong id , BSONObj * obj );
+ jboolean scopeReset( jlong id );
+ void scopeFree( jlong id );
- double scopeGetNumber( jlong id , const char * field );
- string scopeGetString( jlong id , const char * field );
- jboolean scopeGetBoolean( jlong id , const char * field );
- BSONObj scopeGetObject( jlong id , const char * field );
- char scopeGetType( jlong id , const char * field );
+ double scopeGetNumber( jlong id , const char * field );
+ string scopeGetString( jlong id , const char * field );
+ jboolean scopeGetBoolean( jlong id , const char * field );
+ BSONObj scopeGetObject( jlong id , const char * field );
+ char scopeGetType( jlong id , const char * field );
- int scopeSetNumber( jlong id , const char * field , double val );
- int scopeSetString( jlong id , const char * field , const char * val );
- int scopeSetObject( jlong id , const char * field , BSONObj * obj );
- int scopeSetBoolean( jlong id , const char * field , jboolean val );
+ int scopeSetNumber( jlong id , const char * field , double val );
+ int scopeSetString( jlong id , const char * field , const char * val );
+ int scopeSetObject( jlong id , const char * field , BSONObj * obj );
+ int scopeSetBoolean( jlong id , const char * field , jboolean val );
- jlong functionCreate( const char * code );
+ jlong functionCreate( const char * code );
- /* return values:
- public static final int NO_SCOPE = -1;
- public static final int NO_FUNCTION = -2;
- public static final int INVOKE_ERROR = -3;
- public static final int INVOKE_SUCCESS = 0;
- */
- int invoke( jlong scope , jlong function );
+ /* return values:
+ public static final int NO_SCOPE = -1;
+ public static final int NO_FUNCTION = -2;
+ public static final int INVOKE_ERROR = -3;
+ public static final int INVOKE_SUCCESS = 0;
+ */
+ int invoke( jlong scope , jlong function );
- void printException();
+ void printException();
- void run( const char * js );
+ void run( const char * js );
- void detach( JNIEnv * env ) {
- _jvm->DetachCurrentThread();
- }
+ void detach( JNIEnv * env ) {
+ _jvm->DetachCurrentThread();
+ }
-private:
+ private:
- jobject create( const char * name ) {
- jclass c = findClass( name );
- if ( ! c )
- return 0;
+ jobject create( const char * name ) {
+ jclass c = findClass( name );
+ if ( ! c )
+ return 0;
- jmethodID cons = _getEnv()->GetMethodID( c , "<init>" , "()V" );
- if ( ! cons )
- return 0;
+ jmethodID cons = _getEnv()->GetMethodID( c , "<init>" , "()V" );
+ if ( ! cons )
+ return 0;
- return _getEnv()->NewObject( c , cons );
- }
+ return _getEnv()->NewObject( c , cons );
+ }
- jclass findClass( const char * name ) {
- return _getEnv()->FindClass( name );
- }
+ jclass findClass( const char * name ) {
+ return _getEnv()->FindClass( name );
+ }
-private:
+ private:
- JNIEnv * _getEnv();
+ JNIEnv * _getEnv();
- JavaVM * _jvm;
- JNIEnv * _mainEnv;
- JavaVMInitArgs * _vmArgs;
+ JavaVM * _jvm;
+ JNIEnv * _mainEnv;
+ JavaVMInitArgs * _vmArgs;
- boost::thread_specific_ptr<JNIEnv> * _envs;
+ boost::thread_specific_ptr<JNIEnv> * _envs;
- jclass _dbhook;
- jclass _dbjni;
+ jclass _dbhook;
+ jclass _dbjni;
- jmethodID _scopeCreate;
- jmethodID _scopeInit;
- jmethodID _scopeSetThis;
- jmethodID _scopeReset;
- jmethodID _scopeFree;
+ jmethodID _scopeCreate;
+ jmethodID _scopeInit;
+ jmethodID _scopeSetThis;
+ jmethodID _scopeReset;
+ jmethodID _scopeFree;
- jmethodID _scopeGetNumber;
- jmethodID _scopeGetString;
- jmethodID _scopeGetObject;
- jmethodID _scopeGetBoolean;
- jmethodID _scopeGuessObjectSize;
- jmethodID _scopeGetType;
+ jmethodID _scopeGetNumber;
+ jmethodID _scopeGetString;
+ jmethodID _scopeGetObject;
+ jmethodID _scopeGetBoolean;
+ jmethodID _scopeGuessObjectSize;
+ jmethodID _scopeGetType;
- jmethodID _scopeSetNumber;
- jmethodID _scopeSetString;
- jmethodID _scopeSetObject;
- jmethodID _scopeSetBoolean;
+ jmethodID _scopeSetNumber;
+ jmethodID _scopeSetString;
+ jmethodID _scopeSetObject;
+ jmethodID _scopeSetBoolean;
- jmethodID _functionCreate;
+ jmethodID _functionCreate;
- jmethodID _invoke;
+ jmethodID _invoke;
-};
+ };
-extern JavaJSImpl *JavaJS;
+ extern JavaJSImpl *JavaJS;
// a javascript "scope"
-class Scope {
-public:
- Scope() {
- s = JavaJS->scopeCreate();
- }
- ~Scope() {
- JavaJS->scopeFree(s);
- s = 0;
- }
- void reset() {
- JavaJS->scopeReset(s);
- }
-
- void init( const char * data ) {
- BSONObj o( data , 0 );
- JavaJS->scopeInit( s , & o );
- }
-
- double getNumber(const char *field) {
- return JavaJS->scopeGetNumber(s,field);
- }
- string getString(const char *field) {
- return JavaJS->scopeGetString(s,field);
- }
- jboolean getBoolean(const char *field) {
- return JavaJS->scopeGetBoolean(s,field);
- }
- BSONObj getObject(const char *field ) {
- return JavaJS->scopeGetObject(s,field);
- }
- int type(const char *field ) {
- return JavaJS->scopeGetType(s,field);
- }
-
- void setNumber(const char *field, double val ) {
- JavaJS->scopeSetNumber(s,field,val);
- }
- void setString(const char *field, const char * val ) {
- JavaJS->scopeSetString(s,field,val);
- }
- void setObject(const char *field, BSONObj& obj ) {
- JavaJS->scopeSetObject(s,field,&obj);
- }
- void setBoolean(const char *field, jboolean val ) {
- JavaJS->scopeSetBoolean(s,field,val);
- }
-
- int invoke(jlong function) {
- return JavaJS->invoke(s,function);
- }
-
- jlong s;
-};
-
-JNIEXPORT void JNICALL java_native_say(JNIEnv *, jclass, jobject outBuffer );
-JNIEXPORT jint JNICALL java_native_call(JNIEnv *, jclass, jobject outBuffer , jobject inBuffer );
+ class Scope {
+ public:
+ Scope() {
+ s = JavaJS->scopeCreate();
+ }
+ ~Scope() {
+ JavaJS->scopeFree(s);
+ s = 0;
+ }
+ void reset() {
+ JavaJS->scopeReset(s);
+ }
+
+ void init( const char * data ) {
+ BSONObj o( data , 0 );
+ JavaJS->scopeInit( s , & o );
+ }
+
+ double getNumber(const char *field) {
+ return JavaJS->scopeGetNumber(s,field);
+ }
+ string getString(const char *field) {
+ return JavaJS->scopeGetString(s,field);
+ }
+ jboolean getBoolean(const char *field) {
+ return JavaJS->scopeGetBoolean(s,field);
+ }
+ BSONObj getObject(const char *field ) {
+ return JavaJS->scopeGetObject(s,field);
+ }
+ int type(const char *field ) {
+ return JavaJS->scopeGetType(s,field);
+ }
+
+ void setNumber(const char *field, double val ) {
+ JavaJS->scopeSetNumber(s,field,val);
+ }
+ void setString(const char *field, const char * val ) {
+ JavaJS->scopeSetString(s,field,val);
+ }
+ void setObject(const char *field, BSONObj& obj ) {
+ JavaJS->scopeSetObject(s,field,&obj);
+ }
+ void setBoolean(const char *field, jboolean val ) {
+ JavaJS->scopeSetBoolean(s,field,val);
+ }
+
+ int invoke(jlong function) {
+ return JavaJS->invoke(s,function);
+ }
+
+ jlong s;
+ };
+
+ JNIEXPORT void JNICALL java_native_say(JNIEnv *, jclass, jobject outBuffer );
+ JNIEXPORT jint JNICALL java_native_call(JNIEnv *, jclass, jobject outBuffer , jobject inBuffer );
} // namespace mongo
diff --git a/db/jsobj.cpp b/db/jsobj.cpp
index c97f316b13b..e280fdb4efb 100644
--- a/db/jsobj.cpp
+++ b/db/jsobj.cpp
@@ -24,449 +24,449 @@
namespace mongo {
-BSONElement nullElement;
-
-ostream& operator<<( ostream &s, const OID &o ) {
- s << o.str();
- return s;
-}
-
-string BSONElement::toString() const {
- stringstream s;
- switch ( type() ) {
- case EOO:
- return "EOO";
- case Date:
- s << fieldName() << ": Date(" << hex << date() << ')';
- break;
- case RegEx:
- {
- s << fieldName() << ": /" << regex() << '/';
- const char *p = regexFlags();
- if ( p ) s << p;
- }
- break;
- case NumberDouble:
- case NumberInt:
- s.precision( 16 );
- s << fieldName() << ": " << number();
- break;
- case Bool:
- s << fieldName() << ": " << ( boolean() ? "true" : "false" );
- break;
- case Object:
- case Array:
- s << fieldName() << ": " << embeddedObject().toString();
- break;
- case Undefined:
- s << fieldName() << ": undefined";
- break;
- case jstNULL:
- s << fieldName() << ": null";
- break;
- case MaxKey:
- s << fieldName() << ": MaxKey";
- break;
- case MinKey:
- s << fieldName() << ": MinKey";
- break;
- case CodeWScope:
- s << fieldName() << ": codewscope";
- break;
- case Code:
- s << fieldName() << ": ";
- if ( valuestrsize() > 80 )
- s << string(valuestr()).substr(0, 70) << "...";
- else {
- s << valuestr();
- }
- break;
- case Symbol:
- case String:
- s << fieldName() << ": ";
- if ( valuestrsize() > 80 )
- s << '"' << string(valuestr()).substr(0, 70) << "...\"";
- else {
- s << '"' << valuestr() << '"';
- }
- break;
- case DBRef:
- s << fieldName();
- s << " : DBRef('" << valuestr() << "',";
+ BSONElement nullElement;
+
+ ostream& operator<<( ostream &s, const OID &o ) {
+ s << o.str();
+ return s;
+ }
+
+ string BSONElement::toString() const {
+ stringstream s;
+ switch ( type() ) {
+ case EOO:
+ return "EOO";
+ case Date:
+ s << fieldName() << ": Date(" << hex << date() << ')';
+ break;
+ case RegEx:
{
- OID *x = (OID *) (valuestr() + valuestrsize());
- s << *x << ')';
+ s << fieldName() << ": /" << regex() << '/';
+ const char *p = regexFlags();
+ if ( p ) s << p;
}
break;
- case jstOID:
- s << fieldName() << " : ObjId(";
- s << oid() << ')';
- break;
- case BinData:
- s << fieldName() << " : BinData";
- break;
- default:
- s << fieldName() << ": ?type=" << type();
- break;
- }
- return s.str();
-}
-
-string escape( string s ) {
- stringstream ret;
- for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
- switch ( *i ) {
- case '"':
- ret << "\\\"";
+ case NumberDouble:
+ case NumberInt:
+ s.precision( 16 );
+ s << fieldName() << ": " << number();
break;
- case '\\':
- ret << "\\\\";
+ case Bool:
+ s << fieldName() << ": " << ( boolean() ? "true" : "false" );
break;
- case '/':
- ret << "\\/";
+ case Object:
+ case Array:
+ s << fieldName() << ": " << embeddedObject().toString();
break;
- case '\b':
- ret << "\\b";
+ case Undefined:
+ s << fieldName() << ": undefined";
break;
- case '\f':
- ret << "\\f";
+ case jstNULL:
+ s << fieldName() << ": null";
break;
- case '\n':
- ret << "\\n";
+ case MaxKey:
+ s << fieldName() << ": MaxKey";
break;
- case '\r':
- ret << "\\r";
+ case MinKey:
+ s << fieldName() << ": MinKey";
break;
- case '\t':
- ret << "\\t";
+ case CodeWScope:
+ s << fieldName() << ": codewscope";
break;
- default:
- if ( *i >= 0 && *i <= 0x1f ) {
- ret << "\\u";
- ret << hex;
- ret.width( 4 );
- ret.fill( '0' );
- ret << int( *i );
- } else {
- ret << *i;
+ case Code:
+ s << fieldName() << ": ";
+ if ( valuestrsize() > 80 )
+ s << string(valuestr()).substr(0, 70) << "...";
+ else {
+ s << valuestr();
+ }
+ break;
+ case Symbol:
+ case String:
+ s << fieldName() << ": ";
+ if ( valuestrsize() > 80 )
+ s << '"' << string(valuestr()).substr(0, 70) << "...\"";
+ else {
+ s << '"' << valuestr() << '"';
}
+ break;
+ case DBRef:
+ s << fieldName();
+ s << " : DBRef('" << valuestr() << "',";
+ {
+ OID *x = (OID *) (valuestr() + valuestrsize());
+ s << *x << ')';
+ }
+ break;
+ case jstOID:
+ s << fieldName() << " : ObjId(";
+ s << oid() << ')';
+ break;
+ case BinData:
+ s << fieldName() << " : BinData";
+ break;
+ default:
+ s << fieldName() << ": ?type=" << type();
+ break;
}
+ return s.str();
}
- return ret.str();
-}
-typedef boost::archive::iterators::base64_from_binary
-< boost::archive::iterators::transform_width
-< string::const_iterator, 6, 8 >
-> base64_t;
-
-string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames ) const {
- stringstream s;
- if ( includeFieldNames )
- s << '"' << escape( fieldName() ) << "\" : ";
- switch ( type() ) {
- case String:
- case Symbol:
- s << '"' << escape( valuestr() ) << '"';
- break;
- case NumberInt:
- case NumberDouble:
- if ( number() >= -numeric_limits< double >::max() &&
- number() <= numeric_limits< double >::max() ) {
- s.precision( 16 );
- s << number();
- } else {
- stringstream ss;
- ss << "Number " << number() << " cannot be represented in JSON";
- string message = ss.str();
- massert( message.c_str(), false );
+ string escape( string s ) {
+ stringstream ret;
+ for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
+ switch ( *i ) {
+ case '"':
+ ret << "\\\"";
+ break;
+ case '\\':
+ ret << "\\\\";
+ break;
+ case '/':
+ ret << "\\/";
+ break;
+ case '\b':
+ ret << "\\b";
+ break;
+ case '\f':
+ ret << "\\f";
+ break;
+ case '\n':
+ ret << "\\n";
+ break;
+ case '\r':
+ ret << "\\r";
+ break;
+ case '\t':
+ ret << "\\t";
+ break;
+ default:
+ if ( *i >= 0 && *i <= 0x1f ) {
+ ret << "\\u";
+ ret << hex;
+ ret.width( 4 );
+ ret.fill( '0' );
+ ret << int( *i );
+ } else {
+ ret << *i;
+ }
+ }
}
- break;
- case Bool:
- s << ( boolean() ? "true" : "false" );
- break;
- case jstNULL:
- s << "null";
- break;
- case Object:
- s << embeddedObject().jsonString( format );
- break;
- case Array: {
- if ( embeddedObject().isEmpty() ) {
- s << "[]";
+ return ret.str();
+ }
+
+ typedef boost::archive::iterators::base64_from_binary
+ < boost::archive::iterators::transform_width
+ < string::const_iterator, 6, 8 >
+ > base64_t;
+
+ string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames ) const {
+ stringstream s;
+ if ( includeFieldNames )
+ s << '"' << escape( fieldName() ) << "\" : ";
+ switch ( type() ) {
+ case String:
+ case Symbol:
+ s << '"' << escape( valuestr() ) << '"';
break;
- }
- s << "[ ";
- BSONObjIterator i( embeddedObject() );
- BSONElement e = i.next();
- if ( !e.eoo() )
- while ( 1 ) {
- s << e.jsonString( format, false );
- e = i.next();
- if ( e.eoo() )
- break;
- s << ", ";
+ case NumberInt:
+ case NumberDouble:
+ if ( number() >= -numeric_limits< double >::max() &&
+ number() <= numeric_limits< double >::max() ) {
+ s.precision( 16 );
+ s << number();
+ } else {
+ stringstream ss;
+ ss << "Number " << number() << " cannot be represented in JSON";
+ string message = ss.str();
+ massert( message.c_str(), false );
}
- s << " ]";
- break;
- }
- case DBRef: {
- OID *x = (OID *) (valuestr() + valuestrsize());
- if ( format == TenGen )
- s << "Dbref( ";
- else
- s << "{ \"$ns\" : ";
- s << '"' << valuestr() << "\", ";
- if ( format != TenGen )
- s << "\"$id\" : ";
- s << '"' << *x << "\" ";
- if ( format == TenGen )
- s << ')';
- else
- s << '}';
- break;
- }
- case jstOID:
- if ( format == TenGen )
- s << "ObjectId( ";
- s << '"' << oid() << '"';
- if ( format == TenGen )
- s << " )";
- break;
- case BinData: {
- int len = *(int *)( value() );
- BinDataType type = BinDataType( *(char *)( (int *)( value() ) + 1 ) );
- s << "{ \"$binary\" : \"";
- char *start = ( char * )( value() ) + sizeof( int ) + 1;
- string temp(start, len);
- string base64 = string( base64_t( temp.begin() ), base64_t( temp.end() ) );
- s << base64;
- int padding = ( 4 - ( base64.length() % 4 ) ) % 4;
- for ( int i = 0; i < padding; ++i )
- s << '=';
- s << "\", \"$type\" : \"" << hex;
- s.width( 2 );
- s.fill( '0' );
- s << type << dec;
- s << "\" }";
- break;
- }
- case Date:
- if ( format == Strict )
- s << "{ \"$date\" : ";
- else
- s << "Date( ";
- s << date();
- if ( format == Strict )
- s << " }";
- else
- s << " )";
- break;
- case RegEx:
- if ( format == Strict )
- s << "{ \"$regex\" : \"";
- else
- s << "/";
- s << escape( regex() );
- if ( format == Strict )
- s << "\", \"$options\" : \"" << regexFlags() << "\" }";
- else {
- s << "/";
- // FIXME Worry about alpha order?
- for ( const char *f = regexFlags(); *f; ++f )
- switch ( *f ) {
- case 'g':
- case 'i':
- case 'm':
- s << *f;
- default:
- break;
+ break;
+ case Bool:
+ s << ( boolean() ? "true" : "false" );
+ break;
+ case jstNULL:
+ s << "null";
+ break;
+ case Object:
+ s << embeddedObject().jsonString( format );
+ break;
+ case Array: {
+ if ( embeddedObject().isEmpty() ) {
+ s << "[]";
+ break;
+ }
+ s << "[ ";
+ BSONObjIterator i( embeddedObject() );
+ BSONElement e = i.next();
+ if ( !e.eoo() )
+ while ( 1 ) {
+ s << e.jsonString( format, false );
+ e = i.next();
+ if ( e.eoo() )
+ break;
+ s << ", ";
}
+ s << " ]";
+ break;
}
- break;
- default:
- stringstream ss;
- ss << "Cannot create a properly formatted JSON string with "
- << "element: " << toString() << " of type: " << type();
- string message = ss.str();
- massert( message.c_str(), false );
- }
- return s.str();
-}
-
-int BSONElement::size() const {
- if ( totalSize >= 0 )
- return totalSize;
+ case DBRef: {
+ OID *x = (OID *) (valuestr() + valuestrsize());
+ if ( format == TenGen )
+ s << "Dbref( ";
+ else
+ s << "{ \"$ns\" : ";
+ s << '"' << valuestr() << "\", ";
+ if ( format != TenGen )
+ s << "\"$id\" : ";
+ s << '"' << *x << "\" ";
+ if ( format == TenGen )
+ s << ')';
+ else
+ s << '}';
+ break;
+ }
+ case jstOID:
+ if ( format == TenGen )
+ s << "ObjectId( ";
+ s << '"' << oid() << '"';
+ if ( format == TenGen )
+ s << " )";
+ break;
+ case BinData: {
+ int len = *(int *)( value() );
+ BinDataType type = BinDataType( *(char *)( (int *)( value() ) + 1 ) );
+ s << "{ \"$binary\" : \"";
+ char *start = ( char * )( value() ) + sizeof( int ) + 1;
+ string temp(start, len);
+ string base64 = string( base64_t( temp.begin() ), base64_t( temp.end() ) );
+ s << base64;
+ int padding = ( 4 - ( base64.length() % 4 ) ) % 4;
+ for ( int i = 0; i < padding; ++i )
+ s << '=';
+ s << "\", \"$type\" : \"" << hex;
+ s.width( 2 );
+ s.fill( '0' );
+ s << type << dec;
+ s << "\" }";
+ break;
+ }
+ case Date:
+ if ( format == Strict )
+ s << "{ \"$date\" : ";
+ else
+ s << "Date( ";
+ s << date();
+ if ( format == Strict )
+ s << " }";
+ else
+ s << " )";
+ break;
+ case RegEx:
+ if ( format == Strict )
+ s << "{ \"$regex\" : \"";
+ else
+ s << "/";
+ s << escape( regex() );
+ if ( format == Strict )
+ s << "\", \"$options\" : \"" << regexFlags() << "\" }";
+ else {
+ s << "/";
+ // FIXME Worry about alpha order?
+ for ( const char *f = regexFlags(); *f; ++f )
+ switch ( *f ) {
+ case 'g':
+ case 'i':
+ case 'm':
+ s << *f;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ stringstream ss;
+ ss << "Cannot create a properly formatted JSON string with "
+ << "element: " << toString() << " of type: " << type();
+ string message = ss.str();
+ massert( message.c_str(), false );
+ }
+ return s.str();
+ }
- int x = 1;
- switch ( type() ) {
- case EOO:
- case Undefined:
- case jstNULL:
- case MaxKey:
- case MinKey:
- break;
- case Bool:
- x = 2;
- break;
- case NumberInt:
- x = 5;
- break;
- case Date:
- case NumberDouble:
- x = 9;
- break;
- case jstOID:
- x = 13;
- break;
- case Symbol:
- case Code:
- case String:
- x = valuestrsize() + 4 + 1;
- break;
- case CodeWScope:
- x = objsize() + 1;
- break;
+ int BSONElement::size() const {
+ if ( totalSize >= 0 )
+ return totalSize;
- case DBRef:
- x = valuestrsize() + 4 + 12 + 1;
- break;
- case Object:
- case Array:
- x = objsize() + 1;
- break;
- case BinData:
- x = valuestrsize() + 4 + 1 + 1/*subtype*/;
+ int x = 1;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case Bool:
+ x = 2;
+ break;
+ case NumberInt:
+ x = 5;
+ break;
+ case Date:
+ case NumberDouble:
+ x = 9;
+ break;
+ case jstOID:
+ x = 13;
+ break;
+ case Symbol:
+ case Code:
+ case String:
+ x = valuestrsize() + 4 + 1;
+ break;
+ case CodeWScope:
+ x = objsize() + 1;
+ break;
+
+ case DBRef:
+ x = valuestrsize() + 4 + 12 + 1;
+ break;
+ case Object:
+ case Array:
+ x = objsize() + 1;
+ break;
+ case BinData:
+ x = valuestrsize() + 4 + 1 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ int len1 = strlen(p);
+ p = p + len1 + 1;
+ x = 1 + len1 + strlen(p) + 2;
+ }
break;
- case RegEx:
- {
- const char *p = value();
- int len1 = strlen(p);
- p = p + len1 + 1;
- x = 1 + len1 + strlen(p) + 2;
- }
- break;
- default:
- cout << "BSONElement: bad type " << (int) type() << endl;
- assert(false);
- }
- ((BSONElement *) this)->totalSize = x + fieldNameSize;
-
- if ( !eoo() ) {
- const char *next = data + totalSize;
- if ( *next < MinKey || ( *next > JSTypeMax && *next != MaxKey ) ) {
- // bad type.
- cout << "***\n";
- cout << "Bad data or size in BSONElement::size()\n";
- cout << "bad type:" << (int) *next << '\n';
- cout << "totalsize:" << totalSize << " fieldnamesize:" << fieldNameSize << '\n';
- cout << "lastrec:" << endl;
- //dumpmemory(data, totalSize + 15);
+ default:
+ out() << "BSONElement: bad type " << (int) type() << endl;
assert(false);
}
- }
+ ((BSONElement *) this)->totalSize = x + fieldNameSize;
+
+ if ( !eoo() ) {
+ const char *next = data + totalSize;
+ if ( *next < MinKey || ( *next > JSTypeMax && *next != MaxKey ) ) {
+ // bad type.
+ out() << "***\n";
+ out() << "Bad data or size in BSONElement::size()\n";
+ out() << "bad type:" << (int) *next << '\n';
+ out() << "totalsize:" << totalSize << " fieldnamesize:" << fieldNameSize << '\n';
+ out() << "lastrec:" << endl;
+ //dumpmemory(data, totalSize + 15);
+ assert(false);
+ }
+ }
- return totalSize;
-}
+ return totalSize;
+ }
-int BSONElement::getGtLtOp() const {
- const char *fn = fieldName();
- if ( fn[0] == '$' && fn[1] ) {
- if ( fn[2] == 't' ) {
- if ( fn[1] == 'g' ) {
- if ( fn[3] == 0 ) return JSMatcher::GT;
- else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::GTE;
+ int BSONElement::getGtLtOp() const {
+ const char *fn = fieldName();
+ if ( fn[0] == '$' && fn[1] ) {
+ if ( fn[2] == 't' ) {
+ if ( fn[1] == 'g' ) {
+ if ( fn[3] == 0 ) return JSMatcher::GT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::GTE;
+ }
+ else if ( fn[1] == 'l' ) {
+ if ( fn[3] == 0 ) return JSMatcher::LT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::LTE;
+ }
}
- else if ( fn[1] == 'l' ) {
- if ( fn[3] == 0 ) return JSMatcher::LT;
- else if ( fn[3] == 'e' && fn[4] == 0 ) return JSMatcher::LTE;
+ else if ( fn[2] == 'e' ) {
+ if ( fn[1] == 'n' && fn[3] == 0 )
+ return JSMatcher::NE;
}
+ else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 )
+ return JSMatcher::opIN;
}
- else if ( fn[2] == 'e' ) {
- if ( fn[1] == 'n' && fn[3] == 0 )
- return JSMatcher::NE;
- }
- else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 )
- return JSMatcher::opIN;
+ return JSMatcher::Equality;
}
- return JSMatcher::Equality;
-}
-int BSONElement::woCompare( const BSONElement &e,
- bool considerFieldName ) const {
- int lt = (int) type();
- if ( lt == NumberInt ) lt = NumberDouble;
- int rt = (int) e.type();
- if ( rt == NumberInt ) rt = NumberDouble;
+ int BSONElement::woCompare( const BSONElement &e,
+ bool considerFieldName ) const {
+ int lt = (int) type();
+ if ( lt == NumberInt ) lt = NumberDouble;
+ int rt = (int) e.type();
+ if ( rt == NumberInt ) rt = NumberDouble;
- int x = lt - rt;
- if ( x != 0 )
- return x;
- if ( considerFieldName ) {
- x = strcmp(fieldName(), e.fieldName());
+ int x = lt - rt;
if ( x != 0 )
return x;
+ if ( considerFieldName ) {
+ x = strcmp(fieldName(), e.fieldName());
+ if ( x != 0 )
+ return x;
+ }
+ x = compareElementValues(*this, e);
+ return x;
}
- x = compareElementValues(*this, e);
- return x;
-}
-
-/* must be same type! */
-int compareElementValues(const BSONElement& l, const BSONElement& r) {
- int f;
- double x;
- switch ( l.type() ) {
- case EOO:
- case Undefined:
- case jstNULL:
- case MaxKey:
- case MinKey:
- f = l.type() - r.type();
- if ( f<0 ) return -1;
- return f==0 ? 0 : 1;
- case Bool:
- return *l.value() - *r.value();
- case Date:
- if ( l.date() < r.date() )
- return -1;
- return l.date() == r.date() ? 0 : 1;
- case NumberInt:
- case NumberDouble:
- x = l.number() - r.number();
- if ( x < 0 ) return -1;
- return x == 0 ? 0 : 1;
- case jstOID:
- return memcmp(l.value(), r.value(), 12);
- case Code:
- case Symbol:
- case String:
- /* todo: utf version */
- return strcmp(l.valuestr(), r.valuestr());
- case Object:
- case Array:
- return l.embeddedObject().woCompare( r.embeddedObject() );
- case DBRef:
- case BinData: {
- int lsz = l.valuesize();
- int rsz = r.valuesize();
- if ( lsz - rsz != 0 ) return lsz - rsz;
- return memcmp(l.value(), r.value(), lsz);
- }
- case RegEx:
- {
- int c = strcmp(l.regex(), r.regex());
- if ( c )
- return c;
- return strcmp(l.regexFlags(), r.regexFlags());
- }
- default:
- cout << "compareElementValues: bad type " << (int) l.type() << endl;
- assert(false);
- }
- return -1;
-}
-
-/* JSMatcher --------------------------------------*/
+
+ /* must be same type! */
+ int compareElementValues(const BSONElement& l, const BSONElement& r) {
+ int f;
+ double x;
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ f = l.type() - r.type();
+ if ( f<0 ) return -1;
+ return f==0 ? 0 : 1;
+ case Bool:
+ return *l.value() - *r.value();
+ case Date:
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case NumberInt:
+ case NumberDouble:
+ x = l.number() - r.number();
+ if ( x < 0 ) return -1;
+ return x == 0 ? 0 : 1;
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ /* todo: utf version */
+ return strcmp(l.valuestr(), r.valuestr());
+ case Object:
+ case Array:
+ return l.embeddedObject().woCompare( r.embeddedObject() );
+ case DBRef:
+ case BinData: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case RegEx:
+ {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ default:
+ out() << "compareElementValues: bad type " << (int) l.type() << endl;
+ assert(false);
+ }
+ return -1;
+ }
+
+ /* JSMatcher --------------------------------------*/
// If the element is something like:
// a : { $gt : 3 }
@@ -474,429 +474,429 @@ int compareElementValues(const BSONElement& l, const BSONElement& r) {
// a : 3
// else we just append the element.
//
-void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e) {
- if ( e.type() == Object ) {
- BSONElement fe = e.embeddedObject().firstElement();
- const char *fn = fe.fieldName();
- if ( fn[0] == '$' && fn[1] && fn[2] == 't' ) {
- b.appendAs(fe, e.fieldName());
- return;
+ void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e) {
+ if ( e.type() == Object ) {
+ BSONElement fe = e.embeddedObject().firstElement();
+ const char *fn = fe.fieldName();
+ if ( fn[0] == '$' && fn[1] && fn[2] == 't' ) {
+ b.appendAs(fe, e.fieldName());
+ return;
+ }
}
+ b.append(e);
}
- b.append(e);
-}
-int getGtLtOp(BSONElement& e) {
- if ( e.type() != Object )
- return JSMatcher::Equality;
+ int getGtLtOp(BSONElement& e) {
+ if ( e.type() != Object )
+ return JSMatcher::Equality;
- BSONElement fe = e.embeddedObject().firstElement();
- return fe.getGtLtOp();
-}
+ BSONElement fe = e.embeddedObject().firstElement();
+ return fe.getGtLtOp();
+ }
-/* BSONObj ------------------------------------------------------------*/
+ /* BSONObj ------------------------------------------------------------*/
-string BSONObj::toString() const {
- if ( isEmpty() ) return "{}";
+ string BSONObj::toString() const {
+ if ( isEmpty() ) return "{}";
- stringstream s;
- s << "{ ";
- BSONObjIterator i(*this);
- BSONElement e = i.next();
- if ( !e.eoo() )
- while ( 1 ) {
- s << e.toString();
- e = i.next();
- if ( e.eoo() )
- break;
- s << ", ";
+ stringstream s;
+ s << "{ ";
+ BSONObjIterator i(*this);
+ BSONElement e = i.next();
+ if ( !e.eoo() )
+ while ( 1 ) {
+ s << e.toString();
+ e = i.next();
+ if ( e.eoo() )
+ break;
+ s << ", ";
+ }
+ s << " }";
+ return s.str();
+ }
+
+ string BSONObj::jsonString( JsonStringFormat format ) const {
+ if ( isEmpty() ) return "{}";
+
+ stringstream s;
+ s << "{ ";
+ BSONObjIterator i(*this);
+ BSONElement e = i.next();
+ if ( !e.eoo() )
+ while ( 1 ) {
+ s << e.jsonString( format );
+ e = i.next();
+ if ( e.eoo() )
+ break;
+ s << ", ";
+ }
+ s << " }";
+ return s.str();
+ }
+
+// todo: can be a little faster if we don't use toString() here.
+ bool BSONObj::valid() const {
+ try {
+ toString();
+ }
+ catch (...) {
+ return false;
}
- s << " }";
- return s.str();
-}
+ return true;
+ }
+
+ /* well ordered compare */
+ int BSONObj::woCompare(const BSONObj &r, const BSONObj &idxKey,
+ bool considerFieldName) const {
+ if ( isEmpty() )
+ return r.isEmpty() ? 0 : -1;
+ if ( r.isEmpty() )
+ return 1;
-string BSONObj::jsonString( JsonStringFormat format ) const {
- if ( isEmpty() ) return "{}";
+ bool ordered = !idxKey.isEmpty();
- stringstream s;
- s << "{ ";
- BSONObjIterator i(*this);
- BSONElement e = i.next();
- if ( !e.eoo() )
+ BSONObjIterator i(*this);
+ BSONObjIterator j(r);
+ BSONObjIterator k(idxKey);
while ( 1 ) {
- s << e.jsonString( format );
- e = i.next();
- if ( e.eoo() )
- break;
- s << ", ";
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ BSONElement o;
+ if ( ordered )
+ o = k.next();
+ if ( l.eoo() )
+ return 0;
+
+ int x = l.woCompare( r, considerFieldName );
+ if ( ordered && o.number() < 0 )
+ x = -x;
+ if ( x != 0 )
+ return x;
}
- s << " }";
- return s.str();
-}
+ return -1;
+ }
-// todo: can be a little faster if we don't use toString() here.
-bool BSONObj::valid() const {
- try {
- toString();
- }
- catch (...) {
- return false;
- }
- return true;
-}
-
-/* well ordered compare */
-int BSONObj::woCompare(const BSONObj &r, const BSONObj &idxKey,
- bool considerFieldName) const {
- if ( isEmpty() )
- return r.isEmpty() ? 0 : -1;
- if ( r.isEmpty() )
- return 1;
-
- bool ordered = !idxKey.isEmpty();
-
- BSONObjIterator i(*this);
- BSONObjIterator j(r);
- BSONObjIterator k(idxKey);
- while ( 1 ) {
- // so far, equal...
-
- BSONElement l = i.next();
- BSONElement r = j.next();
- BSONElement o;
- if ( ordered )
- o = k.next();
- if ( l.eoo() )
- return 0;
-
- int x = l.woCompare( r, considerFieldName );
- if ( ordered && o.number() < 0 )
- x = -x;
- if ( x != 0 )
- return x;
+ BSONElement BSONObj::getField(const char *name) const {
+ if ( details ) {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( strcmp(e.fieldName(), name) == 0 )
+ return e;
+ }
+ }
+ return nullElement;
}
- return -1;
-}
-BSONElement BSONObj::getField(const char *name) const {
- if ( details ) {
- BSONObjIterator i(*this);
+ /* return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ BSONElement BSONObj::getFieldDotted(const char *name) const {
+ BSONElement e = getField( name );
+ if ( e.eoo() ) {
+ const char *p = strchr(name, '.');
+ if ( p ) {
+ string left(name, p-name);
+ BSONObj sub = getObjectField(left.c_str());
+ return sub.isEmpty() ? nullElement : sub.getFieldDotted(p+1);
+ }
+ }
+
+ return e;
+ /*
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( e.eoo() )
+ break;
+ if( strcmp(e.fieldName(), name) == 0 )
+ return e;
+ }
+ return nullElement;
+ */
+ }
+
+ BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
+ const char *p = strchr(name, '.');
+ string left;
+ if ( p ) {
+ left = string(name, p-name);
+ name = p + 1;
+ } else {
+ left = string(name);
+ name = name + strlen(name);
+ }
+ BSONElement sub = getField(left.c_str());
+ if ( sub.eoo() )
+ return nullElement;
+ else if ( sub.type() == Array || strlen( name ) == 0 )
+ return sub;
+ else
+ return sub.embeddedObject().getFieldDottedOrArray( name );
+ }
+
+ /* makes a new BSONObj with the fields specified in pattern.
+ fields returned in the order they appear in pattern.
+ if any field missing, you get back an empty object overall.
+
+ n^2 implementation bad if pattern and object have lots
+ of fields - normally pattern doesn't so should be fine.
+ */
+ BSONObj BSONObj::extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const {
+ nameWithinArray = "";
+ BSONObjIterator i(pattern);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ const char *name = e.fieldName();
+ BSONElement x = getFieldDottedOrArray( name );
+ if ( x.eoo() ) {
+ nameWithinArray = "";
+ return BSONObj();
+ } else if ( x.type() == Array ) {
+ // NOTE: Currently set based on last array discovered.
+ nameWithinArray = name;
+ }
+ b.appendAs(x, "");
+ }
+ return b.done();
+ }
+ BSONObj BSONObj::extractFieldsUnDotted(BSONObj pattern) {
+ BSONObjBuilder b;
+ BSONObjIterator i(pattern);
while ( i.more() ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
- if ( strcmp(e.fieldName(), name) == 0 )
- return e;
+ BSONElement x = getField(e.fieldName());
+ if ( x.eoo() )
+ return BSONObj();
+ b.appendAs(x, "");
}
+ return b.doneAndDecouple();
}
- return nullElement;
-}
-/* return has eoo() true if no match
- supports "." notation to reach into embedded objects
-*/
-BSONElement BSONObj::getFieldDotted(const char *name) const {
- BSONElement e = getField( name );
- if ( e.eoo() ) {
- const char *p = strchr(name, '.');
- if ( p ) {
- string left(name, p-name);
- BSONObj sub = getObjectField(left.c_str());
- return sub.isEmpty() ? nullElement : sub.getFieldDotted(p+1);
- }
- }
-
- return e;
- /*
- BSONObjIterator i(*this);
- while( i.more() ) {
- BSONElement e = i.next();
- if( e.eoo() )
- break;
- if( strcmp(e.fieldName(), name) == 0 )
- return e;
- }
- return nullElement;
- */
-}
-
-BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
- const char *p = strchr(name, '.');
- string left;
- if ( p ) {
- left = string(name, p-name);
- name = p + 1;
- } else {
- left = string(name);
- name = name + strlen(name);
- }
- BSONElement sub = getField(left.c_str());
- if ( sub.eoo() )
- return nullElement;
- else if ( sub.type() == Array || strlen( name ) == 0 )
- return sub;
- else
- return sub.embeddedObject().getFieldDottedOrArray( name );
-}
-
-/* makes a new BSONObj with the fields specified in pattern.
- fields returned in the order they appear in pattern.
- if any field missing, you get back an empty object overall.
-
- n^2 implementation bad if pattern and object have lots
- of fields - normally pattern doesn't so should be fine.
-*/
-BSONObj BSONObj::extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const {
- nameWithinArray = "";
- BSONObjIterator i(pattern);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- const char *name = e.fieldName();
- BSONElement x = getFieldDottedOrArray( name );
- if ( x.eoo() ) {
- nameWithinArray = "";
- return BSONObj();
- } else if ( x.type() == Array ) {
- // NOTE: Currently set based on last array discovered.
- nameWithinArray = name;
- }
- b.appendAs(x, "");
- }
- return b.done();
-}
-BSONObj BSONObj::extractFieldsUnDotted(BSONObj pattern) {
- BSONObjBuilder b;
- BSONObjIterator i(pattern);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- BSONElement x = getField(e.fieldName());
- if ( x.eoo() )
- return BSONObj();
- b.appendAs(x, "");
+ BSONObj BSONObj::extractFields(BSONObj& pattern) {
+ BSONObjBuilder b(32); // scanandorder.h can make a zillion of these, so we start the allocation very small
+ BSONObjIterator i(pattern);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = getFieldDotted(e.fieldName());
+ if ( x.eoo() )
+ return BSONObj();
+ b.append(x);
+ }
+ return b.doneAndDecouple();
}
- return b.doneAndDecouple();
-}
-BSONObj BSONObj::extractFields(BSONObj& pattern) {
- BSONObjBuilder b(32); // scanandorder.h can make a zillion of these, so we start the allocation very small
- BSONObjIterator i(pattern);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- BSONElement x = getFieldDotted(e.fieldName());
- if ( x.eoo() )
- return BSONObj();
- b.append(x);
- }
- return b.doneAndDecouple();
-}
-
-int BSONObj::getIntField(const char *name) const {
- BSONElement e = getField(name);
- return e.isNumber() ? (int) e.number() : INT_MIN;
-}
-
-bool BSONObj::getBoolField(const char *name) {
- BSONElement e = getField(name);
- return e.type() == Bool ? e.boolean() : false;
-}
-
-const char * BSONObj::getStringField(const char *name) {
- BSONElement e = getField(name);
- return e.type() == String ? e.valuestr() : "";
-}
-
-BSONObj BSONObj::getObjectField(const char *name) const {
- BSONElement e = getField(name);
- BSONType t = e.type();
- return t == Object || t == Array ? e.embeddedObject() : BSONObj();
-}
-
-int BSONObj::nFields() {
- int n = 0;
- BSONObjIterator i(*this);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- n++;
+ int BSONObj::getIntField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.isNumber() ? (int) e.number() : INT_MIN;
}
- return n;
-}
-/* grab names of all the fields in this object */
-int BSONObj::getFieldNames(set<string>& fields) {
- int n = 0;
- BSONObjIterator i(*this);
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- fields.insert(e.fieldName());
- n++;
+ bool BSONObj::getBoolField(const char *name) {
+ BSONElement e = getField(name);
+ return e.type() == Bool ? e.boolean() : false;
}
- return n;
-}
-/* note: addFields always adds _id even if not specified
- returns n added not counting _id unless requested.
-*/
-int BSONObj::addFields(BSONObj& from, set<string>& fields) {
- assert( details == 0 ); /* partial implementation for now... */
+ const char * BSONObj::getStringField(const char *name) {
+ BSONElement e = getField(name);
+ return e.type() == String ? e.valuestr() : "";
+ }
- BSONObjBuilder b;
+ BSONObj BSONObj::getObjectField(const char *name) const {
+ BSONElement e = getField(name);
+ BSONType t = e.type();
+ return t == Object || t == Array ? e.embeddedObject() : BSONObj();
+ }
- int N = fields.size();
- int n = 0;
- BSONObjIterator i(from);
- bool gotId = false;
- while ( i.more() ) {
- BSONElement e = i.next();
- const char *fname = e.fieldName();
- if ( fields.count(fname) ) {
- b.append(e);
- ++n;
- gotId = gotId || strcmp(fname, "_id")==0;
- if ( n == N && gotId )
- break;
- } else if ( strcmp(fname, "_id")==0 ) {
- b.append(e);
- gotId = true;
- if ( n == N && gotId )
+ int BSONObj::nFields() {
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
break;
+ n++;
}
+ return n;
}
- if ( n ) {
- int len;
- init( b.decouple(len), true );
+ /* grab names of all the fields in this object */
+ int BSONObj::getFieldNames(set<string>& fields) {
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ fields.insert(e.fieldName());
+ n++;
+ }
+ return n;
}
- return n;
-}
+ /* note: addFields always adds _id even if not specified
+ returns n added not counting _id unless requested.
+ */
+ int BSONObj::addFields(BSONObj& from, set<string>& fields) {
+ assert( details == 0 ); /* partial implementation for now... */
+
+ BSONObjBuilder b;
+
+ int N = fields.size();
+ int n = 0;
+ BSONObjIterator i(from);
+ bool gotId = false;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if ( fields.count(fname) ) {
+ b.append(e);
+ ++n;
+ gotId = gotId || strcmp(fname, "_id")==0;
+ if ( n == N && gotId )
+ break;
+ } else if ( strcmp(fname, "_id")==0 ) {
+ b.append(e);
+ gotId = true;
+ if ( n == N && gotId )
+ break;
+ }
+ }
+
+ if ( n ) {
+ int len;
+ init( b.decouple(len), true );
+ }
-ostream& operator<<( ostream &s, const BSONObj &o ) {
- return s << o.toString();
-}
+ return n;
+ }
+
+ ostream& operator<<( ostream &s, const BSONObj &o ) {
+ return s << o.toString();
+ }
-/*-- test things ----------------------------------------------------*/
+ /*-- test things ----------------------------------------------------*/
#pragma pack(push,1)
-struct MaxKeyData {
- MaxKeyData() {
- totsize=7;
- maxkey=MaxKey;
- name=0;
- eoo=EOO;
- }
- int totsize;
- char maxkey;
- char name;
- char eoo;
-} maxkeydata;
-BSONObj maxKey((const char *) &maxkeydata);
-
-struct MinKeyData {
- MinKeyData() {
- totsize=7;
- minkey=MinKey;
- name=0;
- eoo=EOO;
- }
- int totsize;
- char minkey;
- char name;
- char eoo;
-} minkeydata;
-BSONObj minKey((const char *) &minkeydata);
-
-struct JSObj0 {
- JSObj0() {
- totsize = 5;
- eoo = EOO;
- }
- int totsize;
- char eoo;
-} js0;
+ struct MaxKeyData {
+ MaxKeyData() {
+ totsize=7;
+ maxkey=MaxKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char maxkey;
+ char name;
+ char eoo;
+ } maxkeydata;
+ BSONObj maxKey((const char *) &maxkeydata);
+
+ struct MinKeyData {
+ MinKeyData() {
+ totsize=7;
+ minkey=MinKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char minkey;
+ char name;
+ char eoo;
+ } minkeydata;
+ BSONObj minKey((const char *) &minkeydata);
+
+ struct JSObj0 {
+ JSObj0() {
+ totsize = 5;
+ eoo = EOO;
+ }
+ int totsize;
+ char eoo;
+ } js0;
#pragma pack(pop)
-BSONElement::BSONElement() {
- data = &js0.eoo;
- fieldNameSize = 0;
- totalSize = -1;
-}
+ BSONElement::BSONElement() {
+ data = &js0.eoo;
+ fieldNameSize = 0;
+ totalSize = -1;
+ }
#pragma pack(push,1)
-struct EmptyObject {
- EmptyObject() {
- len = 5;
- jstype = EOO;
- }
- int len;
- char jstype;
-} emptyObject;
+ struct EmptyObject {
+ EmptyObject() {
+ len = 5;
+ jstype = EOO;
+ }
+ int len;
+ char jstype;
+ } emptyObject;
#pragma pack(pop)
-BSONObj emptyObj((char *) &emptyObject);
+ BSONObj emptyObj((char *) &emptyObject);
-struct BsonUnitTest : public UnitTest {
- void testRegex() {
- BSONObjBuilder b;
- b.appendRegex("x", "foo");
- BSONObj o = b.done();
-
- BSONObjBuilder c;
- c.appendRegex("x", "goo");
- BSONObj p = c.done();
-
- assert( !o.woEqual( p ) );
- assert( o.woCompare( p ) < 0 );
- }
- void run() {
- testRegex();
- BSONObjBuilder A,B,C;
- A.appendInt("x", 2);
- B.append("x", 2.0);
- C.append("x", 2.1);
- BSONObj a = A.done();
- BSONObj b = B.done();
- BSONObj c = C.done();
- assert( !a.woEqual( b ) ); // comments on operator==
- int cmp = a.woCompare(b);
- assert( cmp == 0 );
- cmp = a.woCompare(c);
- assert( cmp < 0 );
- }
-} bson_unittest;
-
-
-
-
-BSONObjBuilderValueStream::BSONObjBuilderValueStream( const char * fieldName , BSONObjBuilder * builder ) {
- _fieldName = fieldName;
- _builder = builder;
-}
-
-BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const char * value ) {
- _builder->append( _fieldName , value );
- return *_builder;
-}
-
-BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const int value ) {
- _builder->appendInt( _fieldName , value );
- return *_builder;
-}
-
-BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const double value ) {
- _builder->append( _fieldName , value );
- return *_builder;
-}
+ struct BsonUnitTest : public UnitTest {
+ void testRegex() {
+ BSONObjBuilder b;
+ b.appendRegex("x", "foo");
+ BSONObj o = b.done();
+
+ BSONObjBuilder c;
+ c.appendRegex("x", "goo");
+ BSONObj p = c.done();
+
+ assert( !o.woEqual( p ) );
+ assert( o.woCompare( p ) < 0 );
+ }
+ void run() {
+ testRegex();
+ BSONObjBuilder A,B,C;
+ A.appendInt("x", 2);
+ B.append("x", 2.0);
+ C.append("x", 2.1);
+ BSONObj a = A.done();
+ BSONObj b = B.done();
+ BSONObj c = C.done();
+ assert( !a.woEqual( b ) ); // comments on operator==
+ int cmp = a.woCompare(b);
+ assert( cmp == 0 );
+ cmp = a.woCompare(c);
+ assert( cmp < 0 );
+ }
+ } bson_unittest;
+
+
+
+
+ BSONObjBuilderValueStream::BSONObjBuilderValueStream( const char * fieldName , BSONObjBuilder * builder ) {
+ _fieldName = fieldName;
+ _builder = builder;
+ }
+
+ BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const char * value ) {
+ _builder->append( _fieldName , value );
+ return *_builder;
+ }
+
+ BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const int value ) {
+ _builder->appendInt( _fieldName , value );
+ return *_builder;
+ }
+
+ BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const double value ) {
+ _builder->append( _fieldName , value );
+ return *_builder;
+ }
} // namespace mongo
diff --git a/db/jsobj.h b/db/jsobj.h
index f8af23835d2..ed46d5faff8 100644
--- a/db/jsobj.h
+++ b/db/jsobj.h
@@ -32,740 +32,740 @@
namespace mongo {
-class BSONObj;
-class Record;
-class BSONObjBuilder;
+ class BSONObj;
+ class Record;
+ class BSONObjBuilder;
#pragma pack(push,1)
-/* BinData = binary data types.
- EOO = end of object
-*/
-enum BSONType {MinKey=-1, EOO=0, NumberDouble=1, String=2, Object=3, Array=4, BinData=5,
- Undefined=6, jstOID=7, Bool=8, Date=9 , jstNULL=10, RegEx=11 ,
- DBRef=12, Code=13, Symbol=14, CodeWScope=15 ,
- NumberInt = 16,
- JSTypeMax=16,
- MaxKey=127
- };
-
-/* subtypes of BinData.
- bdtCustom and above are ones that the JS compiler understands, but are
- opaque to the database.
-*/
-enum BinDataType { Function=1, ByteArray=2, bdtCustom=128 };
+ /* BinData = binary data types.
+ EOO = end of object
+ */
+ enum BSONType {MinKey=-1, EOO=0, NumberDouble=1, String=2, Object=3, Array=4, BinData=5,
+ Undefined=6, jstOID=7, Bool=8, Date=9 , jstNULL=10, RegEx=11 ,
+ DBRef=12, Code=13, Symbol=14, CodeWScope=15 ,
+ NumberInt = 16,
+ JSTypeMax=16,
+ MaxKey=127
+ };
+
+ /* subtypes of BinData.
+ bdtCustom and above are ones that the JS compiler understands, but are
+ opaque to the database.
+ */
+ enum BinDataType { Function=1, ByteArray=2, bdtCustom=128 };
-/* Object id's are optional for BSONObjects.
- When present they should be the first object member added.
- The app server serializes OIDs as <8-byte-int><4-byte-int>) using the machine's
- native endianness. We deserialize by casting as an OID object, assuming
- the db server has the same endianness.
-*/
-class OID {
- long long a;
- unsigned b;
-public:
- bool operator==(const OID& r) {
- return a==r.a&&b==r.b;
- }
- string str() const {
- stringstream s;
- s << hex;
- s.fill( '0' );
- s.width( 16 );
- s << a;
- s.width( 8 );
- s << b;
- s << dec;
- return s.str();
- }
-};
-ostream& operator<<( ostream &s, const OID &o );
-
-/* marshalled js object format:
-
- <unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
- totalSize includes itself.
-
- Data:
- Bool: <byte>
- EOO: nothing follows
- Undefined: nothing follows
- OID: an OID object
- NumberDouble: <double>
- NumberInt: <int32>
- String: <unsigned32 strsizewithnull><cstring>
- Date: <8bytes>
- Regex: <cstring regex><cstring options>
- Object: a nested object, leading with its entire size, which terminates with EOO.
- Array: same as object
- DBRef: <strlen> <cstring ns> <oid>
- DBRef is a database reference: basically a collection name plus an Object ID
- BinData: <int len> <byte subtype> <byte[len] data>
- Code: a function (not a closure): same format as String.
- Symbol: a language symbol (say a python symbol). same format as String.
- Code With Scope: <total size><String><Object>
-*/
+ /* Object id's are optional for BSONObjects.
+ When present they should be the first object member added.
+ The app server serializes OIDs as <8-byte-int><4-byte-int>) using the machine's
+ native endianness. We deserialize by casting as an OID object, assuming
+ the db server has the same endianness.
+ */
+ class OID {
+ long long a;
+ unsigned b;
+ public:
+ bool operator==(const OID& r) {
+ return a==r.a&&b==r.b;
+ }
+ string str() const {
+ stringstream s;
+ s << hex;
+ s.fill( '0' );
+ s.width( 16 );
+ s << a;
+ s.width( 8 );
+ s << b;
+ s << dec;
+ return s.str();
+ }
+ };
+ ostream& operator<<( ostream &s, const OID &o );
+
+ /* marshalled js object format:
+
+ <unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
+ totalSize includes itself.
+
+ Data:
+ Bool: <byte>
+ EOO: nothing follows
+ Undefined: nothing follows
+ OID: an OID object
+ NumberDouble: <double>
+ NumberInt: <int32>
+ String: <unsigned32 strsizewithnull><cstring>
+ Date: <8bytes>
+ Regex: <cstring regex><cstring options>
+ Object: a nested object, leading with its entire size, which terminates with EOO.
+ Array: same as object
+ DBRef: <strlen> <cstring ns> <oid>
+ DBRef is a database reference: basically a collection name plus an Object ID
+ BinData: <int len> <byte subtype> <byte[len] data>
+ Code: a function (not a closure): same format as String.
+ Symbol: a language symbol (say a python symbol). same format as String.
+ Code With Scope: <total size><String><Object>
+ */
-/* Formatting mode for generating a JSON from the 10gen representation.
- Strict - strict RFC format
- TenGen - 10gen format, which is close to JS format. This form is understandable by
- javascript running inside the Mongo server via eval()
- JS - Javascript JSON compatible
- */
-enum JsonStringFormat { Strict, TenGen, JS };
+ /* Formatting mode for generating a JSON from the 10gen representation.
+ Strict - strict RFC format
+ TenGen - 10gen format, which is close to JS format. This form is understandable by
+ javascript running inside the Mongo server via eval()
+ JS - Javascript JSON compatible
+ */
+ enum JsonStringFormat { Strict, TenGen, JS };
#pragma pack(pop)
-/* BSONElement represents an "element" in a BSONObj. So for the object { a : 3, b : "abc" },
- 'a : 3' is the first element (key+value).
+ /* BSONElement represents an "element" in a BSONObj. So for the object { a : 3, b : "abc" },
+ 'a : 3' is the first element (key+value).
- The BSONElement object points into the BSONObj's data. Thus the BSONObj must stay in scope
- for the life of the BSONElement.
+ The BSONElement object points into the BSONObj's data. Thus the BSONObj must stay in scope
+ for the life of the BSONElement.
- <type><fieldName ><value>
- -------- size() ------------
- -fieldNameSize-
- value()
- type()
-*/
-class BSONElement {
- friend class BSONObjIterator;
- friend class BSONObj;
-public:
- string toString() const;
- string jsonString( JsonStringFormat format, bool includeFieldNames = true ) const;
- BSONType type() const {
- return (BSONType) *data;
- }
- bool eoo() const {
- return type() == EOO;
- }
- int size() const;
+ <type><fieldName ><value>
+ -------- size() ------------
+ -fieldNameSize-
+ value()
+ type()
+ */
+ class BSONElement {
+ friend class BSONObjIterator;
+ friend class BSONObj;
+ public:
+ string toString() const;
+ string jsonString( JsonStringFormat format, bool includeFieldNames = true ) const;
+ BSONType type() const {
+ return (BSONType) *data;
+ }
+ bool eoo() const {
+ return type() == EOO;
+ }
+ int size() const;
- // wrap this element up as a singleton object.
- BSONObj wrap();
+ // wrap this element up as a singleton object.
+ BSONObj wrap();
- const char * fieldName() const {
- if ( eoo() ) return ""; // no fieldname for it.
- return data + 1;
- }
+ const char * fieldName() const {
+ if ( eoo() ) return ""; // no fieldname for it.
+ return data + 1;
+ }
- // raw data be careful:
- const char * value() const {
- return (data + fieldNameSize + 1);
- }
- int valuesize() const {
- return size() - fieldNameSize - 1;
- }
+ // raw data be careful:
+ const char * value() const {
+ return (data + fieldNameSize + 1);
+ }
+ int valuesize() const {
+ return size() - fieldNameSize - 1;
+ }
- bool isBoolean() const {
- return type() == Bool;
- }
- bool boolean() const {
- return *value() ? true : false;
- }
+ bool isBoolean() const {
+ return type() == Bool;
+ }
+ bool boolean() const {
+ return *value() ? true : false;
+ }
- unsigned long long date() const {
- return *((unsigned long long*) value());
- }
- //double& number() { return *((double *) value()); }
+ unsigned long long date() const {
+ return *((unsigned long long*) value());
+ }
+ //double& number() { return *((double *) value()); }
- bool isNumber() const {
- return type() == NumberDouble || type() == NumberInt;
- }
- void setNumber(double d) {
- if ( type() == NumberDouble ) *((double *) value()) = d;
- else if ( type() == NumberInt ) *((int *) value()) = (int) d;
- }
- double number() const {
- if ( type() == NumberDouble ) return *((double *) value());
- if ( type() == NumberInt ) return *((int *) value());
- return 0;
- }
- OID& oid() const {
- return *((OID*) value());
- }
+ bool isNumber() const {
+ return type() == NumberDouble || type() == NumberInt;
+ }
+ void setNumber(double d) {
+ if ( type() == NumberDouble ) *((double *) value()) = d;
+ else if ( type() == NumberInt ) *((int *) value()) = (int) d;
+ }
+ double number() const {
+ if ( type() == NumberDouble ) return *((double *) value());
+ if ( type() == NumberInt ) return *((int *) value());
+ return 0;
+ }
+ OID& oid() const {
+ return *((OID*) value());
+ }
- // for strings
- int valuestrsize() const {
- return *((int *) value());
- }
+ // for strings
+ int valuestrsize() const {
+ return *((int *) value());
+ }
- // for objects the size *includes* the size of the size field
- int objsize() const {
- return *((int *) value());
- }
+ // for objects the size *includes* the size of the size field
+ int objsize() const {
+ return *((int *) value());
+ }
- // for strings. also gives you start of the real data for an embedded object
- const char * valuestr() const {
- return value() + 4;
- }
+ // for strings. also gives you start of the real data for an embedded object
+ const char * valuestr() const {
+ return value() + 4;
+ }
- const char *valuestrsafe() const {
- return type() == String ? valuestr() : "";
- }
+ const char *valuestrsafe() const {
+ return type() == String ? valuestr() : "";
+ }
- const char * codeWScopeCode() const {
- return value() + 8;
- }
- const char * codeWScopeScopeData() const {
- // TODO fix
- return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
- }
+ const char * codeWScopeCode() const {
+ return value() + 8;
+ }
+ const char * codeWScopeScopeData() const {
+ // TODO fix
+ return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
+ }
- BSONObj embeddedObject() const;
+ BSONObj embeddedObject() const;
- /* uassert if not an object */
- BSONObj embeddedObjectUserCheck();
+ /* uassert if not an object */
+ BSONObj embeddedObjectUserCheck();
- const char *regex() const {
- assert(type() == RegEx);
- return value();
- }
- const char *regexFlags() const {
- const char *p = regex();
- return p + strlen(p) + 1;
- }
+ const char *regex() const {
+ assert(type() == RegEx);
+ return value();
+ }
+ const char *regexFlags() const {
+ const char *p = regex();
+ return p + strlen(p) + 1;
+ }
- /* like operator== but doesn't check the fieldname,
- just the value.
- */
- bool valuesEqual(const BSONElement& r) const {
- if ( isNumber() )
- return number() == r.number() && r.isNumber();
- bool match= valuesize() == r.valuesize() &&
- memcmp(value(),r.value(),valuesize()) == 0;
- return match;
- // todo: make "0" == 0.0, undefined==null
- }
+ /* like operator== but doesn't check the fieldname,
+ just the value.
+ */
+ bool valuesEqual(const BSONElement& r) const {
+ if ( isNumber() )
+ return number() == r.number() && r.isNumber();
+ bool match= valuesize() == r.valuesize() &&
+ memcmp(value(),r.value(),valuesize()) == 0;
+ return match;
+ // todo: make "0" == 0.0, undefined==null
+ }
- bool operator==(const BSONElement& r) const {
- if ( strcmp(fieldName(), r.fieldName()) != 0 )
- return false;
- return valuesEqual(r);
- /*
- int sz = size();
- return sz == r.size() &&
- memcmp(data, r.data, sz) == 0;
- */
- }
+ bool operator==(const BSONElement& r) const {
+ if ( strcmp(fieldName(), r.fieldName()) != 0 )
+ return false;
+ return valuesEqual(r);
+ /*
+ int sz = size();
+ return sz == r.size() &&
+ memcmp(data, r.data, sz) == 0;
+ */
+ }
- /* <0: l<r. 0:l==r. >0:l>r
- order by type, field name, and field value.
- If considerFieldName is true, pay attention to the field name.
- */
- int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
+ /* <0: l<r. 0:l==r. >0:l>r
+ order by type, field name, and field value.
+ If considerFieldName is true, pay attention to the field name.
+ */
+ int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
- const char * rawdata() {
- return data;
- }
+ const char * rawdata() {
+ return data;
+ }
- int getGtLtOp() const;
+ int getGtLtOp() const;
- BSONElement();
+ BSONElement();
-private:
- BSONElement(const char *d) : data(d) {
- fieldNameSize = eoo() ? 0 : strlen(fieldName()) + 1;
- totalSize = -1;
- }
- const char *data;
- int fieldNameSize;
- int totalSize; /* caches the computed size */
-};
-
-/* l and r MUST have same type when called: check that first. */
-int compareElementValues(const BSONElement& l, const BSONElement& r);
-int getGtLtOp(BSONElement& e);
-
-class BSONObj {
- friend class BSONObjIterator;
- class Details {
+ private:
+ BSONElement(const char *d) : data(d) {
+ fieldNameSize = eoo() ? 0 : strlen(fieldName()) + 1;
+ totalSize = -1;
+ }
+ const char *data;
+ int fieldNameSize;
+ int totalSize; /* caches the computed size */
+ };
+
+ /* l and r MUST have same type when called: check that first. */
+ int compareElementValues(const BSONElement& l, const BSONElement& r);
+ int getGtLtOp(BSONElement& e);
+
+ class BSONObj {
+ friend class BSONObjIterator;
+ class Details {
+ public:
+ ~Details() {
+ // note refCount means two different things (thus the assert here)
+ assert(refCount <= 0);
+ if (owned()) {
+ free((void *)_objdata);
+ }
+ _objdata = 0;
+ }
+ const char *_objdata;
+ int _objsize;
+ int refCount; // -1 == don't free (we don't "own" the buffer)
+ bool owned() {
+ return refCount >= 0;
+ }
+ } *details;
+ void init(const char *data, bool ifree) {
+ details = new Details();
+ details->_objdata = data;
+ details->_objsize = *((int*) data);
+ assert( details->_objsize > 0 );
+ assert( details->_objsize <= 1024 * 1024 * 16 );
+ details->refCount = ifree ? 1 : -1;
+ }
public:
- ~Details() {
- // note refCount means two different things (thus the assert here)
- assert(refCount <= 0);
- if (owned()) {
- free((void *)_objdata);
+ explicit BSONObj(const char *msgdata, bool ifree = false) {
+ init(msgdata, ifree);
+ }
+ BSONObj(Record *r);
+ BSONObj() : details(0) { }
+ ~BSONObj() {
+ if ( details ) {
+ if ( --details->refCount <= 0 )
+ delete details;
+ details = 0;
}
- _objdata = 0;
- }
- const char *_objdata;
- int _objsize;
- int refCount; // -1 == don't free (we don't "own" the buffer)
- bool owned() {
- return refCount >= 0;
- }
- } *details;
- void init(const char *data, bool ifree) {
- details = new Details();
- details->_objdata = data;
- details->_objsize = *((int*) data);
- assert( details->_objsize > 0 );
- assert( details->_objsize <= 1024 * 1024 * 16 );
- details->refCount = ifree ? 1 : -1;
- }
-public:
- explicit BSONObj(const char *msgdata, bool ifree = false) {
- init(msgdata, ifree);
- }
- BSONObj(Record *r);
- BSONObj() : details(0) { }
- ~BSONObj() {
- if ( details ) {
- if ( --details->refCount <= 0 )
- delete details;
- details = 0;
}
- }
-
- void appendSelfToBufBuilder(BufBuilder& b) {
- assert( objsize() );
- b.append((void *) objdata(), objsize());
- }
-
- // Readable representation of a 10gen object.
- string toString() const;
-
- // Properly formatted JSON string.
- string jsonString( JsonStringFormat format = Strict ) const;
- /* note: addFields always adds _id even if not specified */
- int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+ void appendSelfToBufBuilder(BufBuilder& b) {
+ assert( objsize() );
+ b.append((void *) objdata(), objsize());
+ }
- /* returns # of top level fields in the object
- note: iterates to count the fields
- */
- int nFields();
+ // Readable representation of a 10gen object.
+ string toString() const;
- /* adds the field names to the fields set. does NOT clear it (appends). */
- int getFieldNames(set<string>& fields);
+ // Properly formatted JSON string.
+ string jsonString( JsonStringFormat format = Strict ) const;
- /* return has eoo() true if no match
- supports "." notation to reach into embedded objects
- */
- BSONElement getFieldDotted(const char *name) const;
- // Like above, but returns first array encountered while traversing the
- // dotted fields of name. The name variable is updated to represent field
- // names with respect to the returned element.
- BSONElement getFieldDottedOrArray(const char *&name) const;
-
- BSONElement getField(const string name) const {
- return getField( name.c_str() );
- };
- BSONElement getField(const char *name) const; /* return has eoo() true if no match */
+ /* note: addFields always adds _id even if not specified */
+ int addFields(BSONObj& from, set<string>& fields); /* returns n added */
- bool hasField( const char * name )const {
- return ! getField( name ).eoo();
- }
+ /* returns # of top level fields in the object
+ note: iterates to count the fields
+ */
+ int nFields();
- // returns "" if DNE or wrong type
- const char * getStringField(const char *name);
+ /* adds the field names to the fields set. does NOT clear it (appends). */
+ int getFieldNames(set<string>& fields);
- BSONObj getObjectField(const char *name) const;
+ /* return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ BSONElement getFieldDotted(const char *name) const;
+ // Like above, but returns first array encountered while traversing the
+ // dotted fields of name. The name variable is updated to represent field
+ // names with respect to the returned element.
+ BSONElement getFieldDottedOrArray(const char *&name) const;
+
+ BSONElement getField(const string name) const {
+ return getField( name.c_str() );
+ };
+ BSONElement getField(const char *name) const; /* return has eoo() true if no match */
+
+ bool hasField( const char * name )const {
+ return ! getField( name ).eoo();
+ }
- int getIntField(const char *name) const; // INT_MIN if not present
+ // returns "" if DNE or wrong type
+ const char * getStringField(const char *name);
- bool getBoolField(const char *name);
+ BSONObj getObjectField(const char *name) const;
- /* makes a new BSONObj with the fields specified in pattern.
- fields returned in the order they appear in pattern.
- if any field missing, you get back an empty object overall.
- */
- // sets element field names to empty string
- // If an array is encountered while scanning the dotted names in pattern,
- // that array is added to the returned obj, rather than any subobjects
- // referenced within the array. The variable nameWithinArray is set to the
- // name of the requested field within the returned array.
- BSONObj extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const; // this version, builder owns the returned obj buffer
- // sets element field names to empty string
- BSONObj extractFieldsUnDotted(BSONObj pattern);
- // returns elements with original field names
- BSONObj extractFields(BSONObj &pattern);
+ int getIntField(const char *name) const; // INT_MIN if not present
- const char *objdata() const {
- return details->_objdata;
- }
- int objsize() const {
- return details ? details->_objsize : 0; // includes the embedded size field
- }
- bool isEmpty() const {
- return objsize() <= 5;
- }
+ bool getBoolField(const char *name);
- /* sigh...details == 0 is such a pain we have to eliminate that possibility */
- void validateEmpty();
+ /* makes a new BSONObj with the fields specified in pattern.
+ fields returned in the order they appear in pattern.
+ if any field missing, you get back an empty object overall.
+ */
+ // sets element field names to empty string
+ // If an array is encountered while scanning the dotted names in pattern,
+ // that array is added to the returned obj, rather than any subobjects
+ // referenced within the array. The variable nameWithinArray is set to the
+ // name of the requested field within the returned array.
+ BSONObj extractFieldsDotted(BSONObj pattern, BSONObjBuilder& b, const char *&nameWithinArray) const; // this version, builder owns the returned obj buffer
+ // sets element field names to empty string
+ BSONObj extractFieldsUnDotted(BSONObj pattern);
+ // returns elements with original field names
+ BSONObj extractFields(BSONObj &pattern);
- void dump() {
- cout << hex;
- const char *p = objdata();
- for ( int i = 0; i < objsize(); i++ ) {
- cout << i << '\t' << (unsigned) *p;
- if ( *p >= 'A' && *p <= 'z' )
- cout << '\t' << *p;
- cout << endl;
- p++;
+ const char *objdata() const {
+ return details->_objdata;
+ }
+ int objsize() const {
+ return details ? details->_objsize : 0; // includes the embedded size field
+ }
+ bool isEmpty() const {
+ return objsize() <= 5;
}
- }
-
- /* <0: l<r. 0:l==r. >0:l>r
- wo='well ordered'. fields must be in same order in each object.
- Ordering is with respect to the signs of the elements in idxKey.
- */
- int woCompare(const BSONObj& r, const BSONObj &idxKey = BSONObj(),
- bool considerFieldName=true) const;
- /* note this is "shallow equality" -- ints and doubles won't match. for a
- deep equality test use woCompare (which is slower).
- */
- bool woEqual(const BSONObj& r) const {
- int os = objsize();
- if ( os == r.objsize() ) {
- return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
+ /* sigh...details == 0 is such a pain we have to eliminate that possibility */
+ void validateEmpty();
+
+ void dump() {
+ out() << hex;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ ) {
+ out() << i << '\t' << (unsigned) *p;
+ if ( *p >= 'A' && *p <= 'z' )
+ out() << '\t' << *p;
+ out() << endl;
+ p++;
+ }
}
- return false;
- }
- BSONElement firstElement() const {
- return BSONElement(objdata() + 4);
- }
- BSONElement findElement(const char *name) const;
- BSONElement findElement(string name) const {
- return findElement(name.c_str());
- }
- bool hasElement(const char *name);
+ /* <0: l<r. 0:l==r. >0:l>r
+ wo='well ordered'. fields must be in same order in each object.
+ Ordering is with respect to the signs of the elements in idxKey.
+ */
+ int woCompare(const BSONObj& r, const BSONObj &idxKey = BSONObj(),
+ bool considerFieldName=true) const;
- OID* getOID() {
- BSONElement e = firstElement();
- if ( e.type() != jstOID )
- return 0;
- return &e.oid();
- }
+ /* note this is "shallow equality" -- ints and doubles won't match. for a
+ deep equality test use woCompare (which is slower).
+ */
+ bool woEqual(const BSONObj& r) const {
+ int os = objsize();
+ if ( os == r.objsize() ) {
+ return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
+ }
+ return false;
+ }
- BSONObj(const BSONObj& r) {
- if ( r.details == 0 )
- details = 0;
- else if ( r.details->owned() ) {
- details = r.details;
- details->refCount++;
+ BSONElement firstElement() const {
+ return BSONElement(objdata() + 4);
}
- else {
- details = new Details(*r.details);
+ BSONElement findElement(const char *name) const;
+ BSONElement findElement(string name) const {
+ return findElement(name.c_str());
}
- }
- BSONObj& operator=(const BSONObj& r) {
- if ( details && details->owned() ) {
- if ( --details->refCount == 0 )
- delete details;
+ bool hasElement(const char *name);
+
+ OID* getOID() {
+ BSONElement e = firstElement();
+ if ( e.type() != jstOID )
+ return 0;
+ return &e.oid();
}
- if ( r.details == 0 )
- details = 0;
- else if ( r.details->owned() ) {
- details = r.details;
- details->refCount++;
+ BSONObj(const BSONObj& r) {
+ if ( r.details == 0 )
+ details = 0;
+ else if ( r.details->owned() ) {
+ details = r.details;
+ details->refCount++;
+ }
+ else {
+ details = new Details(*r.details);
+ }
}
- else {
- details = new Details(*r.details);
+ BSONObj& operator=(const BSONObj& r) {
+ if ( details && details->owned() ) {
+ if ( --details->refCount == 0 )
+ delete details;
+ }
+
+ if ( r.details == 0 )
+ details = 0;
+ else if ( r.details->owned() ) {
+ details = r.details;
+ details->refCount++;
+ }
+ else {
+ details = new Details(*r.details);
+ }
+ return *this;
}
- return *this;
- }
- /* makes a copy of the object. Normally, a jsobj points to data "owned"
- by something else. this is a useful way to get your own copy of the buffer
- data (which is freed when the new jsobj destructs).
- */
- BSONObj copy();
-
- int hash() const {
- unsigned x = 0;
- const char *p = objdata();
- for ( int i = 0; i < objsize(); i++ )
- x = x * 131 + p[i];
- return (x & 0x7fffffff) | 0x8000000; // must be > 0
- }
+ /* makes a copy of the object. Normally, a jsobj points to data "owned"
+ by something else. this is a useful way to get your own copy of the buffer
+ data (which is freed when the new jsobj destructs).
+ */
+ BSONObj copy();
+
+ int hash() const {
+ unsigned x = 0;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ )
+ x = x * 131 + p[i];
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
- // true unless corrupt
- bool valid() const;
-};
-ostream& operator<<( ostream &s, const BSONObj &o );
+ // true unless corrupt
+ bool valid() const;
+ };
+ ostream& operator<<( ostream &s, const BSONObj &o );
-class BSONObjCmp {
-public:
- BSONObjCmp( const BSONObj &_order ) : order( _order ) {}
- bool operator()( const BSONObj &l, const BSONObj &r ) const {
- return l.woCompare( r, order ) < 0;
- }
-private:
- BSONObj order;
-};
+ class BSONObjCmp {
+ public:
+ BSONObjCmp( const BSONObj &_order ) : order( _order ) {}
+ bool operator()( const BSONObj &l, const BSONObj &r ) const {
+ return l.woCompare( r, order ) < 0;
+ }
+ private:
+ BSONObj order;
+ };
-class BSONObjCmpDefaultOrder : public BSONObjCmp {
-public:
- BSONObjCmpDefaultOrder() : BSONObjCmp( BSONObj() ) {}
-};
+ class BSONObjCmpDefaultOrder : public BSONObjCmp {
+ public:
+ BSONObjCmpDefaultOrder() : BSONObjCmp( BSONObj() ) {}
+ };
-typedef set< BSONObj, BSONObjCmpDefaultOrder > BSONObjSetDefaultOrder;
+ typedef set< BSONObj, BSONObjCmpDefaultOrder > BSONObjSetDefaultOrder;
#define BUILDOBJ(x) ( BSONObjBuilder() << x ).doneAndDecouple()
-class BSONObjBuilderValueStream {
-public:
- BSONObjBuilderValueStream( const char * fieldName , BSONObjBuilder * builder );
+ class BSONObjBuilderValueStream {
+ public:
+ BSONObjBuilderValueStream( const char * fieldName , BSONObjBuilder * builder );
- BSONObjBuilder& operator<<( const char * value );
- BSONObjBuilder& operator<<( const int value );
- BSONObjBuilder& operator<<( const double value );
+ BSONObjBuilder& operator<<( const char * value );
+ BSONObjBuilder& operator<<( const int value );
+ BSONObjBuilder& operator<<( const double value );
-private:
- const char * _fieldName;
- BSONObjBuilder * _builder;
-};
+ private:
+ const char * _fieldName;
+ BSONObjBuilder * _builder;
+ };
-class BSONObjBuilder {
-public:
- BSONObjBuilder(int initsize=512) : b(initsize) {
- b.skip(4); /*leave room for size field*/
- }
+ class BSONObjBuilder {
+ public:
+ BSONObjBuilder(int initsize=512) : b(initsize) {
+ b.skip(4); /*leave room for size field*/
+ }
- /* add all the fields from the object specified to this object */
- BSONObjBuilder& appendElements(BSONObj x);
+ /* add all the fields from the object specified to this object */
+ BSONObjBuilder& appendElements(BSONObj x);
- void append(BSONElement& e) {
- assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
- b.append((void*) e.rawdata(), e.size());
- }
+ void append(BSONElement& e) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ b.append((void*) e.rawdata(), e.size());
+ }
- /* append an element but with a new name */
- void appendAs(const BSONElement& e, const char *as) {
- b.append((char) e.type());
- b.append(as);
- b.append((void *) e.value(), e.valuesize());
- }
+ /* append an element but with a new name */
+ void appendAs(const BSONElement& e, const char *as) {
+ b.append((char) e.type());
+ b.append(as);
+ b.append((void *) e.value(), e.valuesize());
+ }
- /* add a subobject as a member */
- void append(const char *fieldName, BSONObj subObj) {
- b.append((char) Object);
- b.append(fieldName);
- b.append((void *) subObj.objdata(), subObj.objsize());
- }
+ /* add a subobject as a member */
+ void append(const char *fieldName, BSONObj subObj) {
+ b.append((char) Object);
+ b.append(fieldName);
+ b.append((void *) subObj.objdata(), subObj.objsize());
+ }
- /* add a subobject as a member with type Array. Thus arr object should have "0", "1", ...
- style fields in it.
- */
- void appendArray(const char *fieldName, BSONObj subObj) {
- b.append((char) Array);
- b.append(fieldName);
- b.append((void *) subObj.objdata(), subObj.objsize());
- }
+ /* add a subobject as a member with type Array. Thus arr object should have "0", "1", ...
+ style fields in it.
+ */
+ void appendArray(const char *fieldName, BSONObj subObj) {
+ b.append((char) Array);
+ b.append(fieldName);
+ b.append((void *) subObj.objdata(), subObj.objsize());
+ }
- void appendBool(const char *fieldName, int val) {
- b.append((char) Bool);
- b.append(fieldName);
- b.append((char) (val?1:0));
- }
- void appendInt(const char *fieldName, int n) {
- b.append((char) NumberInt);
- b.append(fieldName);
- b.append(n);
- }
- BSONObjBuilder& append(const char *fieldName, double n) {
- b.append((char) NumberDouble);
- b.append(fieldName);
- b.append(n);
- return *this;
- }
- void appendOID(const char *fieldName, OID *oid = 0) {
- b.append((char) jstOID);
- b.append(fieldName);
- if ( oid )
- b.append( (void *) oid, 12 );
- else {
- OID tmp;
- memset( &tmp, 0, 12 );
- b.append( (void *) &tmp, 12 );
+ void appendBool(const char *fieldName, int val) {
+ b.append((char) Bool);
+ b.append(fieldName);
+ b.append((char) (val?1:0));
+ }
+ void appendInt(const char *fieldName, int n) {
+ b.append((char) NumberInt);
+ b.append(fieldName);
+ b.append(n);
+ }
+ BSONObjBuilder& append(const char *fieldName, double n) {
+ b.append((char) NumberDouble);
+ b.append(fieldName);
+ b.append(n);
+ return *this;
+ }
+ void appendOID(const char *fieldName, OID *oid = 0) {
+ b.append((char) jstOID);
+ b.append(fieldName);
+ if ( oid )
+ b.append( (void *) oid, 12 );
+ else {
+ OID tmp;
+ memset( &tmp, 0, 12 );
+ b.append( (void *) &tmp, 12 );
+ }
+ }
+ void appendDate(const char *fieldName, unsigned long long dt) {
+ b.append((char) Date);
+ b.append(fieldName);
+ b.append(dt);
+ }
+ void appendRegex(const char *fieldName, const char *regex, const char *options = "") {
+ b.append((char) RegEx);
+ b.append(fieldName);
+ b.append(regex);
+ b.append(options);
+ }
+ void appendCode(const char *fieldName, const char *code) {
+ b.append((char) Code);
+ b.append(fieldName);
+ b.append((int) strlen(code)+1);
+ b.append(code);
+ }
+ BSONObjBuilder& append(const char *fieldName, const char *str) {
+ b.append((char) String);
+ b.append(fieldName);
+ b.append((int) strlen(str)+1);
+ b.append(str);
+ return *this;
+ }
+ void append(const char *fieldName, string str) {
+ append(fieldName, str.c_str());
+ }
+ void appendSymbol(const char *fieldName, const char *symbol) {
+ b.append((char) Symbol);
+ b.append(fieldName);
+ b.append((int) strlen(symbol)+1);
+ b.append(symbol);
+ }
+ void appendNull( const char *fieldName ) {
+ b.append( (char) jstNULL );
+ b.append( fieldName );
+ }
+ // Append an element that is less than all other keys.
+ void appendMinKey( const char *fieldName ) {
+ b.append( (char) MinKey );
+ b.append( fieldName );
+ }
+ // Append an element that is greater than all other keys.
+ void appendMaxKey( const char *fieldName ) {
+ b.append( (char) MaxKey );
+ b.append( fieldName );
+ }
+ void appendDBRef( const char *fieldName, const char *ns, const OID &oid ) {
+ b.append( (char) DBRef );
+ b.append( fieldName );
+ b.append( (int) strlen( ns ) + 1 );
+ b.append( ns );
+ b.append( (void *) &oid, 12 );
+ }
+ void appendBinData( const char *fieldName, int len, BinDataType type, const char *data ) {
+ b.append( (char) BinData );
+ b.append( fieldName );
+ b.append( len );
+ b.append( (char) type );
+ b.append( (void *) data, len );
}
- }
- void appendDate(const char *fieldName, unsigned long long dt) {
- b.append((char) Date);
- b.append(fieldName);
- b.append(dt);
- }
- void appendRegex(const char *fieldName, const char *regex, const char *options = "") {
- b.append((char) RegEx);
- b.append(fieldName);
- b.append(regex);
- b.append(options);
- }
- void appendCode(const char *fieldName, const char *code) {
- b.append((char) Code);
- b.append(fieldName);
- b.append((int) strlen(code)+1);
- b.append(code);
- }
- BSONObjBuilder& append(const char *fieldName, const char *str) {
- b.append((char) String);
- b.append(fieldName);
- b.append((int) strlen(str)+1);
- b.append(str);
- return *this;
- }
- void append(const char *fieldName, string str) {
- append(fieldName, str.c_str());
- }
- void appendSymbol(const char *fieldName, const char *symbol) {
- b.append((char) Symbol);
- b.append(fieldName);
- b.append((int) strlen(symbol)+1);
- b.append(symbol);
- }
- void appendNull( const char *fieldName ) {
- b.append( (char) jstNULL );
- b.append( fieldName );
- }
- // Append an element that is less than all other keys.
- void appendMinKey( const char *fieldName ) {
- b.append( (char) MinKey );
- b.append( fieldName );
- }
- // Append an element that is greater than all other keys.
- void appendMaxKey( const char *fieldName ) {
- b.append( (char) MaxKey );
- b.append( fieldName );
- }
- void appendDBRef( const char *fieldName, const char *ns, const OID &oid ) {
- b.append( (char) DBRef );
- b.append( fieldName );
- b.append( (int) strlen( ns ) + 1 );
- b.append( ns );
- b.append( (void *) &oid, 12 );
- }
- void appendBinData( const char *fieldName, int len, BinDataType type, const char *data ) {
- b.append( (char) BinData );
- b.append( fieldName );
- b.append( len );
- b.append( (char) type );
- b.append( (void *) data, len );
- }
- template < class T >
- void append( const char *fieldName, const vector< T >& vals ) {
- BSONObjBuilder arrBuilder;
- for ( unsigned int i = 0; i < vals.size(); ++i )
- arrBuilder.append( numStr( i ).c_str(), vals[ i ] );
- marshalArray( fieldName, arrBuilder.done() );
- }
+ template < class T >
+ void append( const char *fieldName, const vector< T >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned int i = 0; i < vals.size(); ++i )
+ arrBuilder.append( numStr( i ).c_str(), vals[ i ] );
+ marshalArray( fieldName, arrBuilder.done() );
+ }
- void appendIntArray( const char *fieldName, const vector< int >& vals ) {
- BSONObjBuilder arrBuilder;
- for ( unsigned i = 0; i < vals.size(); ++i )
- arrBuilder.appendInt( numStr( i ).c_str(), vals[ i ] );
- marshalArray( fieldName, arrBuilder.done() );
- }
+ void appendIntArray( const char *fieldName, const vector< int >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned i = 0; i < vals.size(); ++i )
+ arrBuilder.appendInt( numStr( i ).c_str(), vals[ i ] );
+ marshalArray( fieldName, arrBuilder.done() );
+ }
- /* BSONObj will free the buffer when it is finished. */
- BSONObj doneAndDecouple() {
- int l;
- return BSONObj(decouple(l), true);
- }
+ /* BSONObj will free the buffer when it is finished. */
+ BSONObj doneAndDecouple() {
+ int l;
+ return BSONObj(decouple(l), true);
+ }
- /* this version, jsobjbuilder still frees the jsobj
- when the builder goes out of scope. use it this way
- by default, that's simplest.
- */
- BSONObj done() {
- return BSONObj(_done());
- }
+ /* this version, jsobjbuilder still frees the jsobj
+ when the builder goes out of scope. use it this way
+ by default, that's simplest.
+ */
+ BSONObj done() {
+ return BSONObj(_done());
+ }
- /* assume ownership of the buffer - you must then free it (with free()) */
- char* decouple(int& l) {
- char *x = _done();
- assert( x );
- l = b.len();
- b.decouple();
- return x;
- }
- void decouple() {
- b.decouple(); // post done() call version. be sure jsobj frees...
- }
+ /* assume ownership of the buffer - you must then free it (with free()) */
+ char* decouple(int& l) {
+ char *x = _done();
+ assert( x );
+ l = b.len();
+ b.decouple();
+ return x;
+ }
+ void decouple() {
+ b.decouple(); // post done() call version. be sure jsobj frees...
+ }
- static string numStr( int i ) {
- stringstream o;
- o << i;
- return o.str();
- }
+ static string numStr( int i ) {
+ stringstream o;
+ o << i;
+ return o.str();
+ }
- BSONObjBuilderValueStream operator<<(const char * name ) {
- return BSONObjBuilderValueStream( name , this );
- }
+ BSONObjBuilderValueStream operator<<(const char * name ) {
+ return BSONObjBuilderValueStream( name , this );
+ }
- BSONObjBuilderValueStream operator<<( string name ) {
- return BSONObjBuilderValueStream( name.c_str() , this );
- }
+ BSONObjBuilderValueStream operator<<( string name ) {
+ return BSONObjBuilderValueStream( name.c_str() , this );
+ }
-private:
- // Append the provided arr object as an array.
- void marshalArray( const char *fieldName, const BSONObj &arr ) {
- b.append( (char) Array );
- b.append( fieldName );
- b.append( (void *) arr.objdata(), arr.objsize() );
- }
+ private:
+ // Append the provided arr object as an array.
+ void marshalArray( const char *fieldName, const BSONObj &arr ) {
+ b.append( (char) Array );
+ b.append( fieldName );
+ b.append( (void *) arr.objdata(), arr.objsize() );
+ }
- char* _done() {
- b.append((char) EOO);
- char *data = b.buf();
- *((int*)data) = b.len();
- return data;
- }
+ char* _done() {
+ b.append((char) EOO);
+ char *data = b.buf();
+ *((int*)data) = b.len();
+ return data;
+ }
- BufBuilder b;
-};
+ BufBuilder b;
+ };
-/* iterator for a BSONObj
+ /* iterator for a BSONObj
- Note each BSONObj ends with an EOO element: so you will get more() on an empty
- object, although next().eoo() will be true.
-*/
-class BSONObjIterator {
-public:
- BSONObjIterator(const BSONObj& jso) {
- int sz = jso.objsize();
- if ( sz == 0 ) {
- pos = theend = 0;
- return;
- }
- pos = jso.objdata() + 4;
- theend = jso.objdata() + sz;
- }
- bool more() {
- return pos < theend;
- }
- BSONElement next() {
- BSONElement e(pos);
- pos += e.size();
- return e;
- }
-private:
- const char *pos;
- const char *theend;
-};
-
-/* iterator a BSONObj which is an array, in array order.
-class JSArrayIter {
-public:
- BSONObjIterator(const BSONObj& jso) {
-...
- }
- bool more() { return ... }
- BSONElement next() {
-...
- }
-};
-*/
+ Note each BSONObj ends with an EOO element: so you will get more() on an empty
+ object, although next().eoo() will be true.
+ */
+ class BSONObjIterator {
+ public:
+ BSONObjIterator(const BSONObj& jso) {
+ int sz = jso.objsize();
+ if ( sz == 0 ) {
+ pos = theend = 0;
+ return;
+ }
+ pos = jso.objdata() + 4;
+ theend = jso.objdata() + sz;
+ }
+ bool more() {
+ return pos < theend;
+ }
+ BSONElement next() {
+ BSONElement e(pos);
+ pos += e.size();
+ return e;
+ }
+ private:
+ const char *pos;
+ const char *theend;
+ };
+
+ /* iterator a BSONObj which is an array, in array order.
+ class JSArrayIter {
+ public:
+ BSONObjIterator(const BSONObj& jso) {
+ ...
+ }
+ bool more() { return ... }
+ BSONElement next() {
+ ...
+ }
+ };
+ */
} // namespace mongo
@@ -773,107 +773,107 @@ public:
namespace mongo {
-extern BSONObj maxKey;
-extern BSONObj minKey;
+ extern BSONObj maxKey;
+ extern BSONObj minKey;
-/*- just for testing -- */
+ /*- just for testing -- */
#pragma pack(push,1)
-struct JSObj1 {
- JSObj1() {
- totsize=sizeof(JSObj1);
- n = NumberDouble;
- strcpy_s(nname, 5, "abcd");
- N = 3.1;
- s = String;
- strcpy_s(sname, 7, "abcdef");
- slen = 10;
- strcpy_s(sval, 10, "123456789");
- eoo = EOO;
- }
- unsigned totsize;
+ struct JSObj1 {
+ JSObj1() {
+ totsize=sizeof(JSObj1);
+ n = NumberDouble;
+ strcpy_s(nname, 5, "abcd");
+ N = 3.1;
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
- char n;
- char nname[5];
- double N;
+ char n;
+ char nname[5];
+ double N;
- char s;
- char sname[7];
- unsigned slen;
- char sval[10];
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
- char eoo;
-};
+ char eoo;
+ };
#pragma pack(pop)
-extern JSObj1 js1;
+ extern JSObj1 js1;
-inline BSONObj BSONElement::embeddedObjectUserCheck() {
- uassert( "invalid parameter: expected an object", type()==Object || type()==Array );
- return BSONObj(value());
-}
+ inline BSONObj BSONElement::embeddedObjectUserCheck() {
+ uassert( "invalid parameter: expected an object", type()==Object || type()==Array );
+ return BSONObj(value());
+ }
-inline BSONObj BSONElement::embeddedObject() const {
- assert( type()==Object || type()==Array );
- return BSONObj(value());
-}
+ inline BSONObj BSONElement::embeddedObject() const {
+ assert( type()==Object || type()==Array );
+ return BSONObj(value());
+ }
-inline BSONObj BSONObj::copy() {
- if ( isEmpty() )
- return *this;
+ inline BSONObj BSONObj::copy() {
+ if ( isEmpty() )
+ return *this;
- char *p = (char*) malloc(objsize());
- memcpy(p, objdata(), objsize());
- return BSONObj(p, true);
-}
+ char *p = (char*) malloc(objsize());
+ memcpy(p, objdata(), objsize());
+ return BSONObj(p, true);
+ }
// wrap this element up as a singleton object.
-inline BSONObj BSONElement::wrap() {
- BSONObjBuilder b;
- b.append(*this);
- return b.doneAndDecouple();
-}
-
-inline bool BSONObj::hasElement(const char *name) {
- if ( !isEmpty() ) {
- BSONObjIterator it(*this);
- while ( it.more() ) {
- BSONElement e = it.next();
- if ( strcmp(name, e.fieldName()) == 0 )
- return true;
+ inline BSONObj BSONElement::wrap() {
+ BSONObjBuilder b;
+ b.append(*this);
+ return b.doneAndDecouple();
+ }
+
+ inline bool BSONObj::hasElement(const char *name) {
+ if ( !isEmpty() ) {
+ BSONObjIterator it(*this);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( strcmp(name, e.fieldName()) == 0 )
+ return true;
+ }
+ }
+ return false;
+ }
+
+ inline BSONElement BSONObj::findElement(const char *name) const {
+ if ( !isEmpty() ) {
+ BSONObjIterator it(*this);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( strcmp(name, e.fieldName()) == 0 )
+ return e;
+ }
}
+ return BSONElement();
}
- return false;
-}
-inline BSONElement BSONObj::findElement(const char *name) const {
- if ( !isEmpty() ) {
- BSONObjIterator it(*this);
+ /* add all the fields from the object specified to this object */
+ inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
+ BSONObjIterator it(x);
while ( it.more() ) {
BSONElement e = it.next();
- if ( strcmp(name, e.fieldName()) == 0 )
- return e;
+ if ( e.eoo() ) break;
+ append(e);
}
+ return *this;
}
- return BSONElement();
-}
-
-/* add all the fields from the object specified to this object */
-inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
- BSONObjIterator it(x);
- while ( it.more() ) {
- BSONElement e = it.next();
- if ( e.eoo() ) break;
- append(e);
- }
- return *this;
-}
-extern BSONObj emptyObj;
+ extern BSONObj emptyObj;
-inline void BSONObj::validateEmpty() {
- if ( details == 0 )
- *this = emptyObj;
-}
+ inline void BSONObj::validateEmpty() {
+ if ( details == 0 )
+ *this = emptyObj;
+ }
} // namespace mongo
diff --git a/db/json.cpp b/db/json.cpp
index 05cc669486e..9d844f8ba7b 100644
--- a/db/json.cpp
+++ b/db/json.cpp
@@ -24,278 +24,278 @@ using namespace boost::spirit;
namespace mongo {
-struct ObjectBuilder {
- BSONObjBuilder *back() {
- return builders.back().get();
- }
- // Storage for field names of elements within builders.back().
- const char *fieldName() {
- return fieldNames.back().c_str();
- }
- void push() {
- boost::shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
- builders.push_back( b );
- fieldNames.push_back( "" );
- indexes.push_back( 0 );
- }
- BSONObj pop() {
- BSONObj ret = back()->doneAndDecouple();
- builders.pop_back();
- fieldNames.pop_back();
- indexes.pop_back();
- return ret;
- }
- void nameFromIndex() {
- fieldNames.back() = BSONObjBuilder::numStr( indexes.back() );
- }
- string popString() {
- string ret = ss.str();
- ss.str( "" );
- return ret;
- }
- // Cannot use auto_ptr because its copy constructor takes a non const reference.
- vector< boost::shared_ptr< BSONObjBuilder > > builders;
- vector< string > fieldNames;
- vector< int > indexes;
- stringstream ss;
- string ns;
- OID oid;
- string binData;
- BinDataType binDataType;
- string regex;
- string regexOptions;
- unsigned long long date;
-};
-
-struct objectStart {
- objectStart( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char &c ) const {
- b.push();
- }
- ObjectBuilder &b;
-};
-
-struct arrayStart {
- arrayStart( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char &c ) const {
- b.push();
- b.nameFromIndex();
- }
- ObjectBuilder &b;
-};
-
-struct arrayNext {
- arrayNext( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char &c ) const {
- ++b.indexes.back();
- b.nameFromIndex();
- }
- ObjectBuilder &b;
-};
+ struct ObjectBuilder {
+ BSONObjBuilder *back() {
+ return builders.back().get();
+ }
+ // Storage for field names of elements within builders.back().
+ const char *fieldName() {
+ return fieldNames.back().c_str();
+ }
+ void push() {
+ boost::shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ builders.push_back( b );
+ fieldNames.push_back( "" );
+ indexes.push_back( 0 );
+ }
+ BSONObj pop() {
+ BSONObj ret = back()->doneAndDecouple();
+ builders.pop_back();
+ fieldNames.pop_back();
+ indexes.pop_back();
+ return ret;
+ }
+ void nameFromIndex() {
+ fieldNames.back() = BSONObjBuilder::numStr( indexes.back() );
+ }
+ string popString() {
+ string ret = ss.str();
+ ss.str( "" );
+ return ret;
+ }
+ // Cannot use auto_ptr because its copy constructor takes a non const reference.
+ vector< boost::shared_ptr< BSONObjBuilder > > builders;
+ vector< string > fieldNames;
+ vector< int > indexes;
+ stringstream ss;
+ string ns;
+ OID oid;
+ string binData;
+ BinDataType binDataType;
+ string regex;
+ string regexOptions;
+ unsigned long long date;
+ };
-struct ch {
- ch( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char c ) const {
- b.ss << c;
- }
- ObjectBuilder &b;
-};
-
-struct chE {
- chE( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char c ) const {
- char o = '\0';
- switch ( c ) {
- case '\"':
- o = '\"';
- break;
- case '\\':
- o = '\\';
- break;
- case '/':
- o = '/';
- break;
- case 'b':
- o = '\b';
- break;
- case 'f':
- o = '\f';
- break;
- case 'n':
- o = '\n';
- break;
- case 'r':
- o = '\r';
- break;
- case 't':
- o = '\t';
- break;
- default:
+ struct objectStart {
+ objectStart( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ b.push();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct arrayStart {
+ arrayStart( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ b.push();
+ b.nameFromIndex();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct arrayNext {
+ arrayNext( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ ++b.indexes.back();
+ b.nameFromIndex();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct ch {
+ ch( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ b.ss << c;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct chE {
+ chE( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ char o = '\0';
+ switch ( c ) {
+ case '\"':
+ o = '\"';
+ break;
+ case '\\':
+ o = '\\';
+ break;
+ case '/':
+ o = '/';
+ break;
+ case 'b':
+ o = '\b';
+ break;
+ case 'f':
+ o = '\f';
+ break;
+ case 'n':
+ o = '\n';
+ break;
+ case 'r':
+ o = '\r';
+ break;
+ case 't':
+ o = '\t';
+ break;
+ default:
+ assert( false );
+ }
+ b.ss << o;
+ }
+ ObjectBuilder &b;
+ };
+
+ namespace hex {
+ int val( char c ) {
+ if ( '0' <= c && c <= '9' )
+ return c - '0';
+ if ( 'a' <= c && c <= 'f' )
+ return c - 'a' + 10;
+ if ( 'A' <= c && c <= 'F' )
+ return c - 'A' + 10;
assert( false );
+ return 0xff;
}
- b.ss << o;
- }
- ObjectBuilder &b;
-};
-
-namespace hex {
-int val( char c ) {
- if ( '0' <= c && c <= '9' )
- return c - '0';
- if ( 'a' <= c && c <= 'f' )
- return c - 'a' + 10;
- if ( 'A' <= c && c <= 'F' )
- return c - 'A' + 10;
- assert( false );
- return 0xff;
-}
-char val( const char *c ) {
- return ( val( c[ 0 ] ) << 4 ) | val( c[ 1 ] );
-}
-} // namespace hex
-
-struct chU {
- chU( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- unsigned char first = hex::val( start );
- unsigned char second = hex::val( start + 2 );
- if ( first == 0 && second < 0x80 )
- b.ss << second;
- else if ( first < 0x08 ) {
- b.ss << char( 0xc0 | ( ( first << 2 ) | ( second >> 6 ) ) );
- b.ss << char( 0x80 | ( ~0xc0 & second ) );
- } else {
- b.ss << char( 0xe0 | ( first >> 4 ) );
- b.ss << char( 0x80 | ( ~0xc0 & ( ( first << 2 ) | ( second >> 6 ) ) ) );
- b.ss << char( 0x80 | ( ~0xc0 & second ) );
+ char val( const char *c ) {
+ return ( val( c[ 0 ] ) << 4 ) | val( c[ 1 ] );
}
- }
- ObjectBuilder &b;
-};
+ } // namespace hex
+
+ struct chU {
+ chU( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ unsigned char first = hex::val( start );
+ unsigned char second = hex::val( start + 2 );
+ if ( first == 0 && second < 0x80 )
+ b.ss << second;
+ else if ( first < 0x08 ) {
+ b.ss << char( 0xc0 | ( ( first << 2 ) | ( second >> 6 ) ) );
+ b.ss << char( 0x80 | ( ~0xc0 & second ) );
+ } else {
+ b.ss << char( 0xe0 | ( first >> 4 ) );
+ b.ss << char( 0x80 | ( ~0xc0 & ( ( first << 2 ) | ( second >> 6 ) ) ) );
+ b.ss << char( 0x80 | ( ~0xc0 & second ) );
+ }
+ }
+ ObjectBuilder &b;
+ };
-struct chClear {
- chClear( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char c ) const {
- b.popString();
- }
- ObjectBuilder &b;
-};
-
-struct fieldNameEnd {
- fieldNameEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- string name = b.popString();
- massert( "Invalid use of reserved field name",
- name != "$ns" &&
- name != "$id" &&
- name != "$binary" &&
- name != "$type" &&
- name != "$date" &&
- name != "$regex" &&
- name != "$options" );
- b.fieldNames.back() = name;
- }
- ObjectBuilder &b;
-};
+ struct chClear {
+ chClear( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ b.popString();
+ }
+ ObjectBuilder &b;
+ };
-struct stringEnd {
- stringEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->append( b.fieldName(), b.popString() );
- }
- ObjectBuilder &b;
-};
+ struct fieldNameEnd {
+ fieldNameEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ string name = b.popString();
+ massert( "Invalid use of reserved field name",
+ name != "$ns" &&
+ name != "$id" &&
+ name != "$binary" &&
+ name != "$type" &&
+ name != "$date" &&
+ name != "$regex" &&
+ name != "$options" );
+ b.fieldNames.back() = name;
+ }
+ ObjectBuilder &b;
+ };
-struct numberValue {
- numberValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( double d ) const {
- b.back()->append( b.fieldName(), d );
- }
- ObjectBuilder &b;
-};
-
-struct subobjectEnd {
- subobjectEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- BSONObj o = b.pop();
- b.back()->append( b.fieldName(), o );
- }
- ObjectBuilder &b;
-};
-
-struct arrayEnd {
- arrayEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- BSONObj o = b.pop();
- b.back()->appendArray( b.fieldName(), o );
- }
- ObjectBuilder &b;
-};
+ struct stringEnd {
+ stringEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->append( b.fieldName(), b.popString() );
+ }
+ ObjectBuilder &b;
+ };
-struct trueValue {
- trueValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendBool( b.fieldName(), true );
- }
- ObjectBuilder &b;
-};
+ struct numberValue {
+ numberValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( double d ) const {
+ b.back()->append( b.fieldName(), d );
+ }
+ ObjectBuilder &b;
+ };
-struct falseValue {
- falseValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendBool( b.fieldName(), false );
- }
- ObjectBuilder &b;
-};
+ struct subobjectEnd {
+ subobjectEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ BSONObj o = b.pop();
+ b.back()->append( b.fieldName(), o );
+ }
+ ObjectBuilder &b;
+ };
-struct nullValue {
- nullValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendNull( b.fieldName() );
- }
- ObjectBuilder &b;
-};
+ struct arrayEnd {
+ arrayEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ BSONObj o = b.pop();
+ b.back()->appendArray( b.fieldName(), o );
+ }
+ ObjectBuilder &b;
+ };
-struct dbrefNS {
- dbrefNS( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.ns = b.popString();
- }
- ObjectBuilder &b;
-};
+ struct trueValue {
+ trueValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBool( b.fieldName(), true );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct falseValue {
+ falseValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBool( b.fieldName(), false );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct nullValue {
+ nullValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendNull( b.fieldName() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct dbrefNS {
+ dbrefNS( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.ns = b.popString();
+ }
+ ObjectBuilder &b;
+ };
// NOTE s must be 24 characters.
-OID stringToOid( const char *s ) {
- OID oid;
- char *oidP = (char *)( &oid );
- for ( int i = 0; i < 12; ++i )
- oidP[ i ] = hex::val( s[ i * 2 ] );
- return oid;
-}
-
-struct oidValue {
- oidValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.oid = stringToOid( start );
- }
- ObjectBuilder &b;
-};
+ OID stringToOid( const char *s ) {
+ OID oid;
+ char *oidP = (char *)( &oid );
+ for ( int i = 0; i < 12; ++i )
+ oidP[ i ] = hex::val( s[ i * 2 ] );
+ return oid;
+ }
+
+ struct oidValue {
+ oidValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.oid = stringToOid( start );
+ }
+ ObjectBuilder &b;
+ };
-struct dbrefEnd {
- dbrefEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendDBRef( b.fieldName(), b.ns.c_str(), b.oid );
- }
- ObjectBuilder &b;
-};
+ struct dbrefEnd {
+ dbrefEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendDBRef( b.fieldName(), b.ns.c_str(), b.oid );
+ }
+ ObjectBuilder &b;
+ };
-struct oidEnd {
- oidEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendOID( "_id", &b.oid );
- }
- ObjectBuilder &b;
-};
+ struct oidEnd {
+ oidEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendOID( "_id", &b.oid );
+ }
+ ObjectBuilder &b;
+ };
// NOTE The boost base64 library code was originally written for use only by the
// boost::archive package, however a google search reveals that these base64
@@ -303,84 +303,84 @@ struct oidEnd {
// handle '=' padding bytes, so here I replace them with 'A' (the value for 0
// in base64's 6bit encoding) and then drop the garbage zeroes produced by
// boost's conversion.
-struct binDataBinary {
- typedef
- boost::archive::iterators::transform_width
- < boost::archive::iterators::binary_from_base64
- < string::const_iterator >, 8, 6
- > binary_t;
- binDataBinary( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- massert( "Badly formatted bindata", ( end - start ) % 4 == 0 );
- string base64( start, end );
- int len = base64.length();
- int pad = 0;
- for (; len - pad > 0 && base64[ len - 1 - pad ] == '='; ++pad )
- base64[ len - 1 - pad ] = 'A';
- massert( "Badly formatted bindata", pad < 3 );
- b.binData = string( binary_t( base64.begin() ), binary_t( base64.end() ) );
- b.binData.resize( b.binData.length() - pad );
- }
- ObjectBuilder &b;
-};
+ struct binDataBinary {
+ typedef
+ boost::archive::iterators::transform_width
+ < boost::archive::iterators::binary_from_base64
+ < string::const_iterator >, 8, 6
+ > binary_t;
+ binDataBinary( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ massert( "Badly formatted bindata", ( end - start ) % 4 == 0 );
+ string base64( start, end );
+ int len = base64.length();
+ int pad = 0;
+ for (; len - pad > 0 && base64[ len - 1 - pad ] == '='; ++pad )
+ base64[ len - 1 - pad ] = 'A';
+ massert( "Badly formatted bindata", pad < 3 );
+ b.binData = string( binary_t( base64.begin() ), binary_t( base64.end() ) );
+ b.binData.resize( b.binData.length() - pad );
+ }
+ ObjectBuilder &b;
+ };
-struct binDataType {
- binDataType( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.binDataType = BinDataType( hex::val( start ) );
- }
- ObjectBuilder &b;
-};
-
-struct binDataEnd {
- binDataEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendBinData( b.fieldName(), b.binData.length(),
- b.binDataType, b.binData.data() );
- }
- ObjectBuilder &b;
-};
+ struct binDataType {
+ binDataType( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.binDataType = BinDataType( hex::val( start ) );
+ }
+ ObjectBuilder &b;
+ };
-struct dateValue {
- dateValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( unsigned long long v ) const {
- b.date = v;
- }
- ObjectBuilder &b;
-};
+ struct binDataEnd {
+ binDataEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBinData( b.fieldName(), b.binData.length(),
+ b.binDataType, b.binData.data() );
+ }
+ ObjectBuilder &b;
+ };
-struct dateEnd {
- dateEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendDate( b.fieldName(), b.date );
- }
- ObjectBuilder &b;
-};
+ struct dateValue {
+ dateValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( unsigned long long v ) const {
+ b.date = v;
+ }
+ ObjectBuilder &b;
+ };
-struct regexValue {
- regexValue( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.regex = b.popString();
- }
- ObjectBuilder &b;
-};
+ struct dateEnd {
+ dateEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendDate( b.fieldName(), b.date );
+ }
+ ObjectBuilder &b;
+ };
-struct regexOptions {
- regexOptions( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.regexOptions = string( start, end );
- }
- ObjectBuilder &b;
-};
-
-struct regexEnd {
- regexEnd( ObjectBuilder &_b ) : b( _b ) {}
- void operator() ( const char *start, const char *end ) const {
- b.back()->appendRegex( b.fieldName(), b.regex.c_str(),
- b.regexOptions.c_str() );
- }
- ObjectBuilder &b;
-};
+ struct regexValue {
+ regexValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.regex = b.popString();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct regexOptions {
+ regexOptions( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.regexOptions = string( start, end );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct regexEnd {
+ regexEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendRegex( b.fieldName(), b.regex.c_str(),
+ b.regexOptions.c_str() );
+ }
+ ObjectBuilder &b;
+ };
// One gotcha with this parsing library is probably best ilustrated with an
// example. Say we have a production like this:
@@ -396,108 +396,108 @@ struct regexEnd {
// worth noting here that this parser follows a short-circuit convention. So,
// in the original z example on line 3, if the input was "ab", foo() would only
// be called once.
-struct JsonGrammar : public grammar< JsonGrammar > {
+ struct JsonGrammar : public grammar< JsonGrammar > {
public:
- JsonGrammar( ObjectBuilder &_b ) : b( _b ) {}
-
- template < typename ScannerT >
- struct definition {
- definition( JsonGrammar const &self ) {
- object = ch_p( '{' )[ objectStart( self.b ) ] >> !members >> '}';
- members = pair >> !( ',' >> members );
- pair =
- oid[ oidEnd( self.b ) ] |
- str[ fieldNameEnd( self.b ) ] >> ':' >> value;
- array = ch_p( '[' )[ arrayStart( self.b ) ] >> !elements >> ']';
- elements = value >> !( ch_p( ',' )[ arrayNext( self.b ) ] >> elements );
- value =
- dbref[ dbrefEnd( self.b ) ] |
- bindata[ binDataEnd( self.b ) ] |
- date[ dateEnd( self.b ) ] |
- regex[ regexEnd( self.b ) ] |
- str[ stringEnd( self.b ) ] |
- number |
- object[ subobjectEnd( self.b ) ] |
- array[ arrayEnd( self.b ) ] |
- lexeme_d[ str_p( "true" ) ][ trueValue( self.b ) ] |
- lexeme_d[ str_p( "false" ) ][ falseValue( self.b ) ] |
- lexeme_d[ str_p( "null" ) ][ nullValue( self.b ) ];
- // lexeme_d and rules don't mix well, so we have this mess
- str = lexeme_d[ ch_p( '"' )[ chClear( self.b ) ] >>
- *( ( ch_p( '\\' ) >>
- ( ch_p( '"' )[ chE( self.b ) ] |
- ch_p( '\\' )[ chE( self.b ) ] |
- ch_p( '/' )[ chE( self.b ) ] |
- ch_p( 'b' )[ chE( self.b ) ] |
- ch_p( 'f' )[ chE( self.b ) ] |
- ch_p( 'n' )[ chE( self.b ) ] |
- ch_p( 'r' )[ chE( self.b ) ] |
- ch_p( 't' )[ chE( self.b ) ] |
- ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) ) ) |
- ch_p( '\x7f' )[ ch( self.b ) ] |
- ( ~cntrl_p & ~ch_p( '"' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '"' ];
- // real_p accepts numbers with nonsignificant zero prefixes, which
- // aren't allowed in JSON. Oh well.
- number = real_p[ numberValue( self.b ) ];
-
- dbref = dbrefS | dbrefT;
- dbrefS = ch_p( '{' ) >> "\"$ns\"" >> ':' >>
- str[ dbrefNS( self.b ) ] >> ',' >> "\"$id\"" >> ':' >> quotedOid >> '}';
- dbrefT = str_p( "Dbref" ) >> '(' >> str[ dbrefNS( self.b ) ] >> ',' >>
- quotedOid >> ')';
-
- // FIXME Only object id if top level field?
- oid = oidS | oidT;
- oidS = str_p( "\"_id\"" ) >> ':' >> quotedOid;
- oidT = str_p( "\"_id\"" ) >> ':' >> "ObjectId" >> '(' >> quotedOid >> ')';
-
- quotedOid = lexeme_d[ '"' >> ( repeat_p( 24 )[ xdigit_p ] )[ oidValue( self.b ) ] >> '"' ];
-
- bindata = ch_p( '{' ) >> "\"$binary\"" >> ':' >>
- lexeme_d[ '"' >> ( *( range_p( 'A', 'Z' ) | range_p( 'a', 'z' ) | range_p( '0', '9' ) | ch_p( '+' ) | ch_p( '/' ) ) >> *ch_p( '=' ) )[ binDataBinary( self.b ) ] >> '"' ] >> ',' >> "\"$type\"" >> ':' >>
- lexeme_d[ '"' >> ( repeat_p( 2 )[ xdigit_p ] )[ binDataType( self.b ) ] >> '"' ] >> '}';
-
- date = dateS | dateT;
- dateS = ch_p( '{' ) >> "\"$date\"" >> ':' >> uint_parser< unsigned long long >()[ dateValue( self.b ) ] >> '}';
- dateT = str_p( "Date" ) >> '(' >> uint_parser< unsigned long long >()[ dateValue( self.b ) ] >> ')';
-
- regex = regexS | regexT;
- regexS = ch_p( '{' ) >> "\"$regex\"" >> ':' >> str[ regexValue( self.b ) ] >> ',' >> "\"$options\"" >> ':' >> lexeme_d[ '"' >> ( *( alpha_p ) )[ regexOptions( self.b ) ] >> '"' ] >> '}';
- // FIXME Obviously it would be nice to unify this with str.
- regexT = lexeme_d[ ch_p( '/' )[ chClear( self.b ) ] >>
- *( ( ch_p( '\\' ) >>
- ( ch_p( '"' )[ chE( self.b ) ] |
- ch_p( '\\' )[ chE( self.b ) ] |
- ch_p( '/' )[ chE( self.b ) ] |
- ch_p( 'b' )[ chE( self.b ) ] |
- ch_p( 'f' )[ chE( self.b ) ] |
- ch_p( 'n' )[ chE( self.b ) ] |
- ch_p( 'r' )[ chE( self.b ) ] |
- ch_p( 't' )[ chE( self.b ) ] |
- ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) ) ) |
- ch_p( '\x7f' )[ ch( self.b ) ] |
- ( ~cntrl_p & ~ch_p( '/' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> str_p( "/" )[ regexValue( self.b ) ]
- >> ( *( ch_p( 'i' ) | ch_p( 'g' ) | ch_p( 'm' ) ) )[ regexOptions( self.b ) ] ];
- }
- rule< ScannerT > object, members, pair, array, elements, value, str, number,
- dbref, dbrefS, dbrefT, oid, oidS, oidT, bindata, date, dateS, dateT,
- regex, regexS, regexT, quotedOid;
- const rule< ScannerT > &start() const {
- return object;
- }
+ JsonGrammar( ObjectBuilder &_b ) : b( _b ) {}
+
+ template < typename ScannerT >
+ struct definition {
+ definition( JsonGrammar const &self ) {
+ object = ch_p( '{' )[ objectStart( self.b ) ] >> !members >> '}';
+ members = pair >> !( ',' >> members );
+ pair =
+ oid[ oidEnd( self.b ) ] |
+ str[ fieldNameEnd( self.b ) ] >> ':' >> value;
+ array = ch_p( '[' )[ arrayStart( self.b ) ] >> !elements >> ']';
+ elements = value >> !( ch_p( ',' )[ arrayNext( self.b ) ] >> elements );
+ value =
+ dbref[ dbrefEnd( self.b ) ] |
+ bindata[ binDataEnd( self.b ) ] |
+ date[ dateEnd( self.b ) ] |
+ regex[ regexEnd( self.b ) ] |
+ str[ stringEnd( self.b ) ] |
+ number |
+ object[ subobjectEnd( self.b ) ] |
+ array[ arrayEnd( self.b ) ] |
+ lexeme_d[ str_p( "true" ) ][ trueValue( self.b ) ] |
+ lexeme_d[ str_p( "false" ) ][ falseValue( self.b ) ] |
+ lexeme_d[ str_p( "null" ) ][ nullValue( self.b ) ];
+ // lexeme_d and rules don't mix well, so we have this mess
+ str = lexeme_d[ ch_p( '"' )[ chClear( self.b ) ] >>
+ *( ( ch_p( '\\' ) >>
+ ( ch_p( '"' )[ chE( self.b ) ] |
+ ch_p( '\\' )[ chE( self.b ) ] |
+ ch_p( '/' )[ chE( self.b ) ] |
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) ) ) |
+ ch_p( '\x7f' )[ ch( self.b ) ] |
+ ( ~cntrl_p & ~ch_p( '"' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '"' ];
+ // real_p accepts numbers with nonsignificant zero prefixes, which
+ // aren't allowed in JSON. Oh well.
+ number = real_p[ numberValue( self.b ) ];
+
+ dbref = dbrefS | dbrefT;
+ dbrefS = ch_p( '{' ) >> "\"$ns\"" >> ':' >>
+ str[ dbrefNS( self.b ) ] >> ',' >> "\"$id\"" >> ':' >> quotedOid >> '}';
+ dbrefT = str_p( "Dbref" ) >> '(' >> str[ dbrefNS( self.b ) ] >> ',' >>
+ quotedOid >> ')';
+
+ // FIXME Only object id if top level field?
+ oid = oidS | oidT;
+ oidS = str_p( "\"_id\"" ) >> ':' >> quotedOid;
+ oidT = str_p( "\"_id\"" ) >> ':' >> "ObjectId" >> '(' >> quotedOid >> ')';
+
+ quotedOid = lexeme_d[ '"' >> ( repeat_p( 24 )[ xdigit_p ] )[ oidValue( self.b ) ] >> '"' ];
+
+ bindata = ch_p( '{' ) >> "\"$binary\"" >> ':' >>
+ lexeme_d[ '"' >> ( *( range_p( 'A', 'Z' ) | range_p( 'a', 'z' ) | range_p( '0', '9' ) | ch_p( '+' ) | ch_p( '/' ) ) >> *ch_p( '=' ) )[ binDataBinary( self.b ) ] >> '"' ] >> ',' >> "\"$type\"" >> ':' >>
+ lexeme_d[ '"' >> ( repeat_p( 2 )[ xdigit_p ] )[ binDataType( self.b ) ] >> '"' ] >> '}';
+
+ date = dateS | dateT;
+ dateS = ch_p( '{' ) >> "\"$date\"" >> ':' >> uint_parser< unsigned long long >()[ dateValue( self.b ) ] >> '}';
+ dateT = str_p( "Date" ) >> '(' >> uint_parser< unsigned long long >()[ dateValue( self.b ) ] >> ')';
+
+ regex = regexS | regexT;
+ regexS = ch_p( '{' ) >> "\"$regex\"" >> ':' >> str[ regexValue( self.b ) ] >> ',' >> "\"$options\"" >> ':' >> lexeme_d[ '"' >> ( *( alpha_p ) )[ regexOptions( self.b ) ] >> '"' ] >> '}';
+ // FIXME Obviously it would be nice to unify this with str.
+ regexT = lexeme_d[ ch_p( '/' )[ chClear( self.b ) ] >>
+ *( ( ch_p( '\\' ) >>
+ ( ch_p( '"' )[ chE( self.b ) ] |
+ ch_p( '\\' )[ chE( self.b ) ] |
+ ch_p( '/' )[ chE( self.b ) ] |
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) ) ) |
+ ch_p( '\x7f' )[ ch( self.b ) ] |
+ ( ~cntrl_p & ~ch_p( '/' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> str_p( "/" )[ regexValue( self.b ) ]
+ >> ( *( ch_p( 'i' ) | ch_p( 'g' ) | ch_p( 'm' ) ) )[ regexOptions( self.b ) ] ];
+ }
+ rule< ScannerT > object, members, pair, array, elements, value, str, number,
+ dbref, dbrefS, dbrefT, oid, oidS, oidT, bindata, date, dateS, dateT,
+ regex, regexS, regexT, quotedOid;
+ const rule< ScannerT > &start() const {
+ return object;
+ }
+ };
+ ObjectBuilder &b;
};
- ObjectBuilder &b;
-};
-
-BSONObj fromjson( const char *str ) {
- ObjectBuilder b;
- JsonGrammar parser( b );
- massert( "Unable to parse JSON string", parse( str, parser, space_p ).full );
- return b.pop();
-}
-
-BSONObj fromjson( const string &str ) {
- return fromjson( str.c_str() );
-}
+
+ BSONObj fromjson( const char *str ) {
+ ObjectBuilder b;
+ JsonGrammar parser( b );
+ massert( "Unable to parse JSON string", parse( str, parser, space_p ).full );
+ return b.pop();
+ }
+
+ BSONObj fromjson( const string &str ) {
+ return fromjson( str.c_str() );
+ }
} // namespace mongo
diff --git a/db/json.h b/db/json.h
index 6c09f5f62fa..566d41f17ca 100644
--- a/db/json.h
+++ b/db/json.h
@@ -23,7 +23,7 @@
namespace mongo {
-BSONObj fromjson(const char *str);
-BSONObj fromjson(const string &str);
+ BSONObj fromjson(const char *str);
+ BSONObj fromjson(const string &str);
} // namespace mongo
diff --git a/db/lasterror.cpp b/db/lasterror.cpp
index 9fc0c3fa83d..48cb70cb783 100644
--- a/db/lasterror.cpp
+++ b/db/lasterror.cpp
@@ -5,6 +5,6 @@
namespace mongo {
-boost::thread_specific_ptr<LastError> lastError;
+ boost::thread_specific_ptr<LastError> lastError;
} // namespace mongo
diff --git a/db/lasterror.h b/db/lasterror.h
index d251a5971c2..10a3f347446 100644
--- a/db/lasterror.h
+++ b/db/lasterror.h
@@ -1,8 +1,8 @@
// lasterror.h
/**
-* Copyright (C) 2009 10gen Inc.
-*
+* Copyright (C) 2009 10gen Inc.
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
@@ -22,33 +22,33 @@
namespace mongo {
-struct LastError {
- string msg;
- int nPrev;
- void raiseError(const char *_msg) {
- msg = _msg;
- nPrev = 1;
- }
- bool haveError() const {
- return !msg.empty();
+ struct LastError {
+ string msg;
+ int nPrev;
+ void raiseError(const char *_msg) {
+ msg = _msg;
+ nPrev = 1;
+ }
+ bool haveError() const {
+ return !msg.empty();
+ }
+ void resetError() {
+ msg.clear();
+ }
+ LastError() {
+ nPrev = 0;
+ }
+ };
+
+ extern boost::thread_specific_ptr<LastError> lastError;
+
+ inline void raiseError(const char *msg) {
+ LastError *le = lastError.get();
+ if ( le == 0 ) {
+ DEV log() << "warning: lastError==0 can't report:" << msg << '\n';
+ return;
+ }
+ le->raiseError(msg);
}
- void resetError() {
- msg.clear();
- }
- LastError() {
- nPrev = 0;
- }
-};
-
-extern boost::thread_specific_ptr<LastError> lastError;
-
-inline void raiseError(const char *msg) {
- LastError *le = lastError.get();
- if ( le == 0 ) {
- DEV log() << "warning: lastError==0 can't report:" << msg << '\n';
- return;
- }
- le->raiseError(msg);
-}
-
-} // namespace mongo
+
+} // namespace mongo
diff --git a/db/matcher.cpp b/db/matcher.cpp
index ee36ea4daea..83bd41cf0f2 100644
--- a/db/matcher.cpp
+++ b/db/matcher.cpp
@@ -35,17 +35,17 @@ using namespace stdext;
namespace mongo {
-typedef const char * MyStr;
-struct less_str {
- bool operator()(const MyStr & x, const MyStr & y) const {
- if ( strcmp(x, y) > 0)
- return true;
+ typedef const char * MyStr;
+ struct less_str {
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) > 0)
+ return true;
- return false;
- }
-};
+ return false;
+ }
+ };
-typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhashmap;
+ typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhashmap;
#else
@@ -55,57 +55,57 @@ typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhas
namespace mongo {
-using namespace __gnu_cxx;
+ using namespace __gnu_cxx;
-typedef const char * MyStr;
-struct eq_str {
- bool operator()(const MyStr & x, const MyStr & y) const {
- if ( strcmp(x, y) == 0)
- return true;
+ typedef const char * MyStr;
+ struct eq_str {
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) == 0)
+ return true;
- return false;
- }
-};
+ return false;
+ }
+ };
-typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
+ typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
#endif
//#include "minilex.h"
//MiniLex minilex;
-class Where {
-public:
- Where() {
- jsScope = 0;
- }
- ~Where() {
+ class Where {
+ public:
+ Where() {
+ jsScope = 0;
+ }
+ ~Where() {
#if !defined(NOJNI)
- JavaJS->scopeFree(scope);
+ JavaJS->scopeFree(scope);
#endif
- if ( jsScope )
- delete jsScope;
- scope = 0;
- func = 0;
- }
+ if ( jsScope )
+ delete jsScope;
+ scope = 0;
+ func = 0;
+ }
- jlong scope, func;
- BSONObj *jsScope;
+ jlong scope, func;
+ BSONObj *jsScope;
- void setFunc(const char *code) {
+ void setFunc(const char *code) {
#if !defined(NOJNI)
- func = JavaJS->functionCreate( code );
+ func = JavaJS->functionCreate( code );
#endif
- }
+ }
-};
+ };
-JSMatcher::~JSMatcher() {
- for ( int i = 0; i < nBuilders; i++ )
- delete builders[i];
- delete in;
- delete where;
-}
+ JSMatcher::~JSMatcher() {
+ for ( int i = 0; i < nBuilders; i++ )
+ delete builders[i];
+ delete in;
+ delete where;
+ }
} // namespace mongo
@@ -113,434 +113,434 @@ JSMatcher::~JSMatcher() {
namespace mongo {
-/* _jsobj - the query pattern
- indexKeyPattern - the "key pattern" / template of what is in the keys of the index we are using.
- used to set indexMatches return value from matches()
-*/
-JSMatcher::JSMatcher(BSONObj &_jsobj, BSONObj indexKeyPattern) :
- in(0), where(0), jsobj(_jsobj), nRegex(0)
-{
- checkInIndex = !indexKeyPattern.isEmpty();
- nBuilders = 0;
- BSONObjIterator i(jsobj);
- n = 0;
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
-
- if ( ( e.type() == CodeWScope || e.type() == Code ) && strcmp(e.fieldName(), "$where")==0 ) {
- // $where: function()...
- uassert( "$where occurs twice?", where == 0 );
- where = new Where();
- uassert( "$where query, but jni is disabled", JavaJS );
+ /* _jsobj - the query pattern
+ indexKeyPattern - the "key pattern" / template of what is in the keys of the index we are using.
+ used to set indexMatches return value from matches()
+ */
+ JSMatcher::JSMatcher(BSONObj &_jsobj, BSONObj indexKeyPattern) :
+ in(0), where(0), jsobj(_jsobj), nRegex(0)
+ {
+ checkInIndex = !indexKeyPattern.isEmpty();
+ nBuilders = 0;
+ BSONObjIterator i(jsobj);
+ n = 0;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+
+ if ( ( e.type() == CodeWScope || e.type() == Code ) && strcmp(e.fieldName(), "$where")==0 ) {
+ // $where: function()...
+ uassert( "$where occurs twice?", where == 0 );
+ where = new Where();
+ uassert( "$where query, but jni is disabled", JavaJS );
#if !defined(NOJNI)
- where->scope = JavaJS->scopeCreate();
- JavaJS->scopeSetString(where->scope, "$client", database->name.c_str());
+ where->scope = JavaJS->scopeCreate();
+ JavaJS->scopeSetString(where->scope, "$client", database->name.c_str());
- if ( e.type() == CodeWScope ) {
- where->setFunc( e.codeWScopeCode() );
- where->jsScope = new BSONObj( e.codeWScopeScopeData() , 0 );
- }
- else {
- const char *code = e.valuestr();
- where->setFunc(code);
- }
+ if ( e.type() == CodeWScope ) {
+ where->setFunc( e.codeWScopeCode() );
+ where->jsScope = new BSONObj( e.codeWScopeScopeData() , 0 );
+ }
+ else {
+ const char *code = e.valuestr();
+ where->setFunc(code);
+ }
#endif
- continue;
- }
-
- if ( e.type() == RegEx ) {
- if ( nRegex >= 4 ) {
- cout << "ERROR: too many regexes in query" << endl;
+ continue;
}
- else {
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- const char *flags = e.regexFlags();
- while ( flags && *flags ) {
- if ( *flags == 'i' )
- options.set_caseless(true);
- else if ( *flags == 'm' )
- options.set_multiline(true);
- else if ( *flags == 'x' )
- options.set_extended(true);
- flags++;
+
+ if ( e.type() == RegEx ) {
+ if ( nRegex >= 4 ) {
+ out() << "ERROR: too many regexes in query" << endl;
}
- RegexMatcher& rm = regexs[nRegex];
- rm.re = new pcrecpp::RE(e.regex(), options);
- rm.fieldName = e.fieldName();
- rm.inIndex = indexKeyPattern.hasElement(rm.fieldName);
+ else {
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ const char *flags = e.regexFlags();
+ while ( flags && *flags ) {
+ if ( *flags == 'i' )
+ options.set_caseless(true);
+ else if ( *flags == 'm' )
+ options.set_multiline(true);
+ else if ( *flags == 'x' )
+ options.set_extended(true);
+ flags++;
+ }
+ RegexMatcher& rm = regexs[nRegex];
+ rm.re = new pcrecpp::RE(e.regex(), options);
+ rm.fieldName = e.fieldName();
+ rm.inIndex = indexKeyPattern.hasElement(rm.fieldName);
- nRegex++;
+ nRegex++;
+ }
+ continue;
}
- continue;
- }
- // greater than / less than...
- // e.g., e == { a : { $gt : 3 } }
- // or
- // { a : { $in : [1,2,3] } }
- if ( e.type() == Object ) {
- // e.g., fe == { $gt : 3 }
- BSONObjIterator j(e.embeddedObject());
- bool ok = false;
- while ( j.more() ) {
- BSONElement fe = j.next();
- if ( fe.eoo() )
- break;
- // BSONElement fe = e.embeddedObject().firstElement();
- const char *fn = fe.fieldName();
- /* TODO: use getGtLtOp() here. this code repeats ourself */
- if ( fn[0] == '$' && fn[1] ) {
- if ( fn[2] == 't' ) {
- int op = Equality;
- if ( fn[1] == 'g' ) {
- if ( fn[3] == 0 ) op = GT;
- else if ( fn[3] == 'e' && fn[4] == 0 ) op = GTE;
+ // greater than / less than...
+ // e.g., e == { a : { $gt : 3 } }
+ // or
+ // { a : { $in : [1,2,3] } }
+ if ( e.type() == Object ) {
+ // e.g., fe == { $gt : 3 }
+ BSONObjIterator j(e.embeddedObject());
+ bool ok = false;
+ while ( j.more() ) {
+ BSONElement fe = j.next();
+ if ( fe.eoo() )
+ break;
+ // BSONElement fe = e.embeddedObject().firstElement();
+ const char *fn = fe.fieldName();
+ /* TODO: use getGtLtOp() here. this code repeats ourself */
+ if ( fn[0] == '$' && fn[1] ) {
+ if ( fn[2] == 't' ) {
+ int op = Equality;
+ if ( fn[1] == 'g' ) {
+ if ( fn[3] == 0 ) op = GT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) op = GTE;
+ else
+ uassert("invalid $operator", false);
+ }
+ else if ( fn[1] == 'l' ) {
+ if ( fn[3] == 0 ) op = LT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) op = LTE;
+ else
+ uassert("invalid $operator", false);
+ }
else
uassert("invalid $operator", false);
+ if ( op ) {
+ uassert("too many items to match in query", nBuilders < 8);
+ BSONObjBuilder *b = new BSONObjBuilder();
+ builders[nBuilders++] = b;
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), op, indexKeyPattern);
+ ok = true;
+ }
}
- else if ( fn[1] == 'l' ) {
- if ( fn[3] == 0 ) op = LT;
- else if ( fn[3] == 'e' && fn[4] == 0 ) op = LTE;
+ else if ( fn[2] == 'e' ) {
+ if ( fn[1] == 'n' && fn[3] == 0 ) {
+ // $ne
+ uassert("too many items to match in query", nBuilders < 8);
+ BSONObjBuilder *b = new BSONObjBuilder();
+ builders[nBuilders++] = b;
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), NE, indexKeyPattern);
+ ok = true;
+ }
else
uassert("invalid $operator", false);
}
- else
- uassert("invalid $operator", false);
- if ( op ) {
- uassert("too many items to match in query", nBuilders < 8);
- BSONObjBuilder *b = new BSONObjBuilder();
- builders[nBuilders++] = b;
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), op, indexKeyPattern);
- ok = true;
- }
- }
- else if ( fn[2] == 'e' ) {
- if ( fn[1] == 'n' && fn[3] == 0 ) {
- // $ne
- uassert("too many items to match in query", nBuilders < 8);
- BSONObjBuilder *b = new BSONObjBuilder();
- builders[nBuilders++] = b;
- b->appendAs(fe, e.fieldName());
- addBasic(b->done().firstElement(), NE, indexKeyPattern);
+ else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 && fe.type() == Array ) {
+ // $in
+ uassert( "only 1 $in statement per query supported", in == 0 ); // todo...
+ in = new set<BSONElement,element_lt>();
+ BSONObjIterator i(fe.embeddedObject());
+ if ( i.more() ) {
+ while ( 1 ) {
+ BSONElement ie = i.next();
+ if ( ie.eoo() )
+ break;
+ in->insert(ie);
+ }
+ }
+ addBasic(e, opIN, indexKeyPattern); // e not actually used at the moment for $in
ok = true;
}
else
uassert("invalid $operator", false);
}
- else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 && fe.type() == Array ) {
- // $in
- uassert( "only 1 $in statement per query supported", in == 0 ); // todo...
- in = new set<BSONElement,element_lt>();
- BSONObjIterator i(fe.embeddedObject());
- if ( i.more() ) {
- while ( 1 ) {
- BSONElement ie = i.next();
- if ( ie.eoo() )
- break;
- in->insert(ie);
- }
- }
- addBasic(e, opIN, indexKeyPattern); // e not actually used at the moment for $in
- ok = true;
+ else {
+ ok = false;
+ break;
}
- else
- uassert("invalid $operator", false);
- }
- else {
- ok = false;
- break;
}
+ if ( ok )
+ continue;
}
- if ( ok )
- continue;
- }
- // normal, simple case e.g. { a : "foo" }
- addBasic(e, Equality, indexKeyPattern);
+ // normal, simple case e.g. { a : "foo" }
+ addBasic(e, Equality, indexKeyPattern);
+ }
}
-}
-inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
- if ( op == 0 )
- return l.valuesEqual(r);
+ inline int JSMatcher::valuesMatch(BSONElement& l, BSONElement& r, int op) {
+ if ( op == 0 )
+ return l.valuesEqual(r);
- if ( op == NE ) {
- return !l.valuesEqual(r);
- }
+ if ( op == NE ) {
+ return !l.valuesEqual(r);
+ }
- if ( op == opIN ) {
- // { $in : [1,2,3] }
- int c = in->count(l);
- return c;
+ if ( op == opIN ) {
+ // { $in : [1,2,3] }
+ int c = in->count(l);
+ return c;
+ }
+
+ /* check LT, GTE, ... */
+ if ( l.type() != r.type() )
+ return false;
+ int c = compareElementValues(l, r);
+ if ( c < -1 ) c = -1;
+ if ( c > 1 ) c = 1;
+ int z = 1 << (c+1);
+ return (op & z);
}
- /* check LT, GTE, ... */
- if ( l.type() != r.type() )
- return false;
- int c = compareElementValues(l, r);
- if ( c < -1 ) c = -1;
- if ( c > 1 ) c = 1;
- int z = 1 << (c+1);
- return (op & z);
-}
-
-/* Check if a particular field matches.
-
- fieldName - field to match "a.b" if we are reaching into an embedded object.
- toMatch - element we want to match.
- obj - database object to check against
- compareOp - Equality, LT, GT, etc.
- deep - out param. set to true/false if we scanned an array
- isArr -
-
- Special forms:
-
- { "a.b" : 3 } means obj.a.b == 3
- { a : { $lt : 3 } } means obj.a < 3
- { a : { $in : [1,2] } } means [1,2].contains(obj.a)
-
- return value
- -1 mismatch
- 0 missing element
- 1 match
-*/
-int JSMatcher::matchesDotted(const char *fieldName, BSONElement& toMatch, BSONObj& obj, int compareOp, bool *deep, bool isArr) {
- {
- const char *p = strchr(fieldName, '.');
- if ( p ) {
- string left(fieldName, p-fieldName);
+ /* Check if a particular field matches.
- BSONElement e = obj.getField(left.c_str());
- if ( e.eoo() )
- return 0;
- if ( e.type() != Object && e.type() != Array )
- return -1;
+ fieldName - field to match "a.b" if we are reaching into an embedded object.
+ toMatch - element we want to match.
+ obj - database object to check against
+ compareOp - Equality, LT, GT, etc.
+ deep - out param. set to true/false if we scanned an array
+ isArr -
+
+ Special forms:
+
+ { "a.b" : 3 } means obj.a.b == 3
+ { a : { $lt : 3 } } means obj.a < 3
+ { a : { $in : [1,2] } } means [1,2].contains(obj.a)
+
+ return value
+ -1 mismatch
+ 0 missing element
+ 1 match
+ */
+ int JSMatcher::matchesDotted(const char *fieldName, BSONElement& toMatch, BSONObj& obj, int compareOp, bool *deep, bool isArr) {
+ {
+ const char *p = strchr(fieldName, '.');
+ if ( p ) {
+ string left(fieldName, p-fieldName);
+
+ BSONElement e = obj.getField(left.c_str());
+ if ( e.eoo() )
+ return 0;
+ if ( e.type() != Object && e.type() != Array )
+ return -1;
+
+ BSONObj eo = e.embeddedObject();
+ return matchesDotted(p+1, toMatch, eo, compareOp, deep, e.type() == Array);
+ }
+ }
- BSONObj eo = e.embeddedObject();
- return matchesDotted(p+1, toMatch, eo, compareOp, deep, e.type() == Array);
+ BSONElement e = obj.getField(fieldName);
+
+ if ( valuesMatch(e, toMatch, compareOp) ) {
+ return 1;
+ }
+ else if ( e.type() == Array ) {
+ BSONObjIterator ai(e.embeddedObject());
+ while ( ai.more() ) {
+ BSONElement z = ai.next();
+ if ( valuesMatch( z, toMatch, compareOp) ) {
+ if ( deep )
+ *deep = true;
+ return 1;
+ }
+ }
+ }
+ else if ( isArr ) {
+ BSONObjIterator ai(obj);
+ while ( ai.more() ) {
+ BSONElement z = ai.next();
+ if ( z.type() == Object ) {
+ BSONObj eo = z.embeddedObject();
+ int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, deep);
+ if ( cmp > 0 ) {
+ if ( deep ) *deep = true;
+ return 1;
+ }
+ }
+ }
+ }
+ else if ( e.eoo() ) {
+ // 0 indicatse "missing element"
+ return 0;
}
+ return -1;
}
- BSONElement e = obj.getField(fieldName);
+ extern int dump;
- if ( valuesMatch(e, toMatch, compareOp) ) {
- return 1;
+ inline bool _regexMatches(RegexMatcher& rm, BSONElement& e) {
+ char buf[64];
+ const char *p = buf;
+ if ( e.type() == String || e.type() == Symbol )
+ p = e.valuestr();
+ else if ( e.isNumber() ) {
+ sprintf(buf, "%f", e.number());
+ }
+ else if ( e.type() == Date ) {
+ unsigned long long d = e.date();
+ time_t t = (d/1000);
+ time_t_to_String(t, buf);
+ }
+ else
+ return false;
+ return rm.re->PartialMatch(p);
}
- else if ( e.type() == Array ) {
+ /* todo: internal dotted notation scans -- not done yet here. */
+ inline bool regexMatches(RegexMatcher& rm, BSONElement& e, bool *deep) {
+ if ( e.type() != Array )
+ return _regexMatches(rm, e);
+
BSONObjIterator ai(e.embeddedObject());
while ( ai.more() ) {
BSONElement z = ai.next();
- if ( valuesMatch( z, toMatch, compareOp) ) {
+ if ( _regexMatches(rm, z) ) {
if ( deep )
*deep = true;
- return 1;
+ return true;
}
}
- }
- else if ( isArr ) {
- BSONObjIterator ai(obj);
- while ( ai.more() ) {
- BSONElement z = ai.next();
- if ( z.type() == Object ) {
- BSONObj eo = z.embeddedObject();
- int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, deep);
- if ( cmp > 0 ) {
- if ( deep ) *deep = true;
- return 1;
- }
- }
- }
- }
- else if ( e.eoo() ) {
- // 0 indicatse "missing element"
- return 0;
- }
- return -1;
-}
-
-extern int dump;
-
-inline bool _regexMatches(RegexMatcher& rm, BSONElement& e) {
- char buf[64];
- const char *p = buf;
- if ( e.type() == String || e.type() == Symbol )
- p = e.valuestr();
- else if ( e.isNumber() ) {
- sprintf(buf, "%f", e.number());
- }
- else if ( e.type() == Date ) {
- unsigned long long d = e.date();
- time_t t = (d/1000);
- time_t_to_String(t, buf);
- }
- else
return false;
- return rm.re->PartialMatch(p);
-}
-/* todo: internal dotted notation scans -- not done yet here. */
-inline bool regexMatches(RegexMatcher& rm, BSONElement& e, bool *deep) {
- if ( e.type() != Array )
- return _regexMatches(rm, e);
-
- BSONObjIterator ai(e.embeddedObject());
- while ( ai.more() ) {
- BSONElement z = ai.next();
- if ( _regexMatches(rm, z) ) {
- if ( deep )
- *deep = true;
- return true;
- }
}
- return false;
-}
-/* See if an object matches the query.
- deep - return true when means we looked into arrays for a match
+ /* See if an object matches the query.
+ deep - return true when means we looked into arrays for a match
- Wondering if it would be worth having
- if( !inIndex && !ok ) continue;
- in each loop to bypass those checks. probably not worth checking as usually we are ok.
-*/
-bool JSMatcher::matches(BSONObj& jsobj, bool *deep) {
- if ( deep )
- *deep = false;
+ Wondering if it would be worth having
+ if( !inIndex && !ok ) continue;
+ in each loop to bypass those checks. probably not worth checking as usually we are ok.
+ */
+ bool JSMatcher::matches(BSONObj& jsobj, bool *deep) {
+ if ( deep )
+ *deep = false;
- /* assuming there is usually only one thing to match. if more this
- could be slow sometimes. */
+ /* assuming there is usually only one thing to match. if more this
+ could be slow sometimes. */
- // check normal non-regex cases:
- for ( int i = 0; i < n; i++ ) {
- BasicMatcher& bm = basics[i];
- BSONElement& m = bm.toMatch;
- // -1=mismatch. 0=missing element. 1=match
- int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, deep);
+ // check normal non-regex cases:
+ for ( int i = 0; i < n; i++ ) {
+ BasicMatcher& bm = basics[i];
+ BSONElement& m = bm.toMatch;
+ // -1=mismatch. 0=missing element. 1=match
+ int cmp = matchesDotted(m.fieldName(), m, jsobj, bm.compareOp, deep);
- if ( cmp < 0 )
- return false;
- if ( cmp == 0 ) {
- /* missing is ok iff we were looking for null */
- if ( m.type() == jstNULL || m.type() == Undefined ) {
- if ( bm.compareOp == NE ) {
+ if ( cmp < 0 )
+ return false;
+ if ( cmp == 0 ) {
+ /* missing is ok iff we were looking for null */
+ if ( m.type() == jstNULL || m.type() == Undefined ) {
+ if ( bm.compareOp == NE ) {
+ return false;
+ }
+ } else {
return false;
}
- } else {
- return false;
}
}
- }
-
- for ( int r = 0; r < nRegex; r++ ) {
- RegexMatcher& rm = regexs[r];
- BSONElement e = jsobj.getFieldDotted(rm.fieldName);
- if ( e.eoo() )
- return false;
- if ( !regexMatches(rm, e, deep) )
- return false;
- }
- if ( where ) {
- if ( where->func == 0 ) {
- uassert("$where compile error", false);
- return false; // didn't compile
+ for ( int r = 0; r < nRegex; r++ ) {
+ RegexMatcher& rm = regexs[r];
+ BSONElement e = jsobj.getFieldDotted(rm.fieldName);
+ if ( e.eoo() )
+ return false;
+ if ( !regexMatches(rm, e, deep) )
+ return false;
}
+
+ if ( where ) {
+ if ( where->func == 0 ) {
+ uassert("$where compile error", false);
+ return false; // didn't compile
+ }
#if !defined(NOJNI)
- /**if( 1 || jsobj.objsize() < 200 || where->fullObject ) */
- {
- if ( where->jsScope ) {
- JavaJS->scopeInit( where->scope , where->jsScope );
+ /**if( 1 || jsobj.objsize() < 200 || where->fullObject ) */
+ {
+ if ( where->jsScope ) {
+ JavaJS->scopeInit( where->scope , where->jsScope );
+ }
+ JavaJS->scopeSetThis(where->scope, &jsobj);
+ JavaJS->scopeSetObject(where->scope, "obj", &jsobj);
}
- JavaJS->scopeSetThis(where->scope, &jsobj);
- JavaJS->scopeSetObject(where->scope, "obj", &jsobj);
- }
- /*else {
- BSONObjBuilder b;
- where->buildSubset(jsobj, b);
- BSONObj temp = b.done();
- JavaJS->scopeSetObject(where->scope, "obj", &temp);
- }*/
- if ( JavaJS->invoke(where->scope, where->func) ) {
- uassert("error in invocation of $where function", false);
- return false;
- }
- return JavaJS->scopeGetBoolean(where->scope, "return") != 0;
+ /*else {
+ BSONObjBuilder b;
+ where->buildSubset(jsobj, b);
+ BSONObj temp = b.done();
+ JavaJS->scopeSetObject(where->scope, "obj", &temp);
+ }*/
+ if ( JavaJS->invoke(where->scope, where->func) ) {
+ uassert("error in invocation of $where function", false);
+ return false;
+ }
+ return JavaJS->scopeGetBoolean(where->scope, "return") != 0;
#else
- return false;
+ return false;
#endif
- }
+ }
- return true;
-}
+ return true;
+ }
-struct JSObj1 js1;
+ struct JSObj1 js1;
#pragma pack(push,1)
-struct JSObj2 {
- JSObj2() {
- totsize=sizeof(JSObj2);
- s = String;
- strcpy_s(sname, 7, "abcdef");
- slen = 10;
- strcpy_s(sval, 10, "123456789");
- eoo = EOO;
- }
- unsigned totsize;
- char s;
- char sname[7];
- unsigned slen;
- char sval[10];
- char eoo;
-} js2;
-
-struct JSUnitTest : public UnitTest {
- void run() {
-
- BSONObj j1((const char *) &js1);
- BSONObj j2((const char *) &js2);
- JSMatcher m(j2, BSONObj());
- assert( m.matches(j1) );
- js2.sval[0] = 'z';
- assert( !m.matches(j1) );
- JSMatcher n(j1, BSONObj());
- assert( n.matches(j1) );
- assert( !n.matches(j2) );
-
- BSONObj j0 = emptyObj;
+ struct JSObj2 {
+ JSObj2() {
+ totsize=sizeof(JSObj2);
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
+ char eoo;
+ } js2;
+
+ struct JSUnitTest : public UnitTest {
+ void run() {
+
+ BSONObj j1((const char *) &js1);
+ BSONObj j2((const char *) &js2);
+ JSMatcher m(j2, BSONObj());
+ assert( m.matches(j1) );
+ js2.sval[0] = 'z';
+ assert( !m.matches(j1) );
+ JSMatcher n(j1, BSONObj());
+ assert( n.matches(j1) );
+ assert( !n.matches(j2) );
+
+ BSONObj j0 = emptyObj;
// BSONObj j0((const char *) &js0);
- JSMatcher p(j0, BSONObj());
- assert( p.matches(j1) );
- assert( p.matches(j2) );
- }
-} jsunittest;
+ JSMatcher p(j0, BSONObj());
+ assert( p.matches(j1) );
+ assert( p.matches(j2) );
+ }
+ } jsunittest;
#pragma pack(pop)
-struct RXTest : public UnitTest {
+ struct RXTest : public UnitTest {
- RXTest() {
- }
+ RXTest() {
+ }
- void run() {
- /*
- static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
- static const boost::regex b(".....");
- cout << "regex result: " << regex_match("hello", e) << endl;
- cout << "regex result: " << regex_match("abcoo", b) << endl;
- */
- pcrecpp::RE re1(")({a}h.*o");
- pcrecpp::RE re("h.llo");
- assert( re.FullMatch("hello") );
- assert( !re1.FullMatch("hello") );
-
-
- pcrecpp::RE_Options options;
- options.set_utf8(true);
- pcrecpp::RE part("dwi", options);
- assert( part.PartialMatch("dwight") );
- }
-} rxtest;
+ void run() {
+ /*
+ static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
+ static const boost::regex b(".....");
+ out() << "regex result: " << regex_match("hello", e) << endl;
+ out() << "regex result: " << regex_match("abcoo", b) << endl;
+ */
+ pcrecpp::RE re1(")({a}h.*o");
+ pcrecpp::RE re("h.llo");
+ assert( re.FullMatch("hello") );
+ assert( !re1.FullMatch("hello") );
+
+
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ pcrecpp::RE part("dwi", options);
+ assert( part.PartialMatch("dwight") );
+ }
+ } rxtest;
} // namespace mongo
diff --git a/db/matcher.h b/db/matcher.h
index 555a9a96b6b..2a8685de0f6 100644
--- a/db/matcher.h
+++ b/db/matcher.h
@@ -24,115 +24,115 @@
namespace mongo {
-class RegexMatcher {
-public:
- const char *fieldName;
- pcrecpp::RE *re;
- bool inIndex;
- RegexMatcher() {
- re = 0;
- inIndex = false;
- }
- ~RegexMatcher() {
- delete re;
- }
-};
-
-class BasicMatcher {
-public:
- BSONElement toMatch;
- int compareOp;
- bool inIndex;
-};
+ class RegexMatcher {
+ public:
+ const char *fieldName;
+ pcrecpp::RE *re;
+ bool inIndex;
+ RegexMatcher() {
+ re = 0;
+ inIndex = false;
+ }
+ ~RegexMatcher() {
+ delete re;
+ }
+ };
+
+ class BasicMatcher {
+ public:
+ BSONElement toMatch;
+ int compareOp;
+ bool inIndex;
+ };
// SQL where clause equivalent
-class Where;
+ class Where;
-/* Match BSON objects against a query pattern.
+ /* Match BSON objects against a query pattern.
- e.g.
- db.foo.find( { a : 3 } );
+ e.g.
+ db.foo.find( { a : 3 } );
- { a : 3 } is the pattern object.
+ { a : 3 } is the pattern object.
- GT/LT:
- { a : { $gt : 3 } }
+ GT/LT:
+ { a : { $gt : 3 } }
- Not equal:
- { a : { $ne : 3 } }
+ Not equal:
+ { a : { $ne : 3 } }
- TODO: we should rewrite the matcher to be more an AST style.
-*/
-class JSMatcher : boost::noncopyable {
- int matchesDotted(
- const char *fieldName,
- BSONElement& toMatch, BSONObj& obj,
- int compareOp, bool *deep, bool isArr = false);
-
- struct element_lt
- {
- bool operator()(const BSONElement& l, const BSONElement& r) const
+ TODO: we should rewrite the matcher to be more an AST style.
+ */
+ class JSMatcher : boost::noncopyable {
+ int matchesDotted(
+ const char *fieldName,
+ BSONElement& toMatch, BSONObj& obj,
+ int compareOp, bool *deep, bool isArr = false);
+
+ struct element_lt
{
- int x = (int) l.type() - (int) r.type();
- if ( x < 0 ) return true;
- if ( x > 0 ) return false;
- return compareElementValues(l,r) < 0;
+ bool operator()(const BSONElement& l, const BSONElement& r) const
+ {
+ int x = (int) l.type() - (int) r.type();
+ if ( x < 0 ) return true;
+ if ( x > 0 ) return false;
+ return compareElementValues(l,r) < 0;
+ }
+ };
+ public:
+ enum {
+ Equality = 0,
+ LT = 0x1,
+ LTE = 0x3,
+ GTE = 0x6,
+ GT = 0x4,
+ opIN = 0x8, // { x : { $in : [1,2,3] } }
+ NE = 0x9
+ };
+
+ static int opDirection(int op) {
+ return op <= LTE ? -1 : 1;
}
- };
-public:
- enum {
- Equality = 0,
- LT = 0x1,
- LTE = 0x3,
- GTE = 0x6,
- GT = 0x4,
- opIN = 0x8, // { x : { $in : [1,2,3] } }
- NE = 0x9
- };
- static int opDirection(int op) {
- return op <= LTE ? -1 : 1;
- }
+ JSMatcher(BSONObj& pattern, BSONObj indexKeyPattern);
- JSMatcher(BSONObj& pattern, BSONObj indexKeyPattern);
+ ~JSMatcher();
- ~JSMatcher();
-
- /* deep - means we looked into arrays for a match
- */
- bool matches(BSONObj& j, bool *deep = 0);
+ /* deep - means we looked into arrays for a match
+ */
+ bool matches(BSONObj& j, bool *deep = 0);
- int getN() {
- return n;
- }
+ int getN() {
+ return n;
+ }
-private:
- void addBasic(BSONElement e, int c, BSONObj& indexKeyPattern) {
- BasicMatcher bm;
- bm.toMatch = e;
- bm.compareOp = c;
- bm.inIndex = indexKeyPattern.hasElement(e.fieldName());
- basics.push_back(bm);
- n++;
- }
+ private:
+ void addBasic(BSONElement e, int c, BSONObj& indexKeyPattern) {
+ BasicMatcher bm;
+ bm.toMatch = e;
+ bm.compareOp = c;
+ bm.inIndex = indexKeyPattern.hasElement(e.fieldName());
+ basics.push_back(bm);
+ n++;
+ }
- int valuesMatch(BSONElement& l, BSONElement& r, int op);
+ int valuesMatch(BSONElement& l, BSONElement& r, int op);
- set<BSONElement,element_lt> *in; // set if query uses $in
- Where *where; // set if query uses $where
- BSONObj& jsobj; // the query pattern. e.g., { name: "joe" }
+ set<BSONElement,element_lt> *in; // set if query uses $in
+ Where *where; // set if query uses $where
+ BSONObj& jsobj; // the query pattern. e.g., { name: "joe" }
- vector<BasicMatcher> basics;
- int n; // # of basicmatcher items
+ vector<BasicMatcher> basics;
+ int n; // # of basicmatcher items
- RegexMatcher regexs[4];
- int nRegex;
+ RegexMatcher regexs[4];
+ int nRegex;
- // so we delete the mem when we're done:
- BSONObjBuilder *builders[8];
- int nBuilders;
+ // so we delete the mem when we're done:
+ BSONObjBuilder *builders[8];
+ int nBuilders;
- bool checkInIndex;
-};
+ bool checkInIndex;
+ };
} // namespace mongo
diff --git a/db/minilex.h b/db/minilex.h
index 1d2b774f9bf..ef5f2e46bab 100644
--- a/db/minilex.h
+++ b/db/minilex.h
@@ -19,97 +19,97 @@
namespace mongo {
-struct MiniLex {
- strhashmap reserved;
- bool ic[256]; // ic=Identifier Character
- bool starter[256];
+ struct MiniLex {
+ strhashmap reserved;
+ bool ic[256]; // ic=Identifier Character
+ bool starter[256];
- // dm: very dumb about comments and escaped quotes -- but we are faster then at least,
- // albeit returning too much (which is ok for jsbobj current usage).
- void grabVariables(char *code /*modified and must stay in scope*/, strhashmap& vars) {
- char *p = code;
- char last = 0;
- while ( *p ) {
- if ( starter[*p] ) {
- char *q = p+1;
- while ( *q && ic[*q] ) q++;
- const char *identifier = p;
- bool done = *q == 0;
- *q = 0;
- if ( !reserved.count(identifier) ) {
- // we try to be smart about 'obj' but have to be careful as obj.obj
- // can happen; this is so that nFields is right for simplistic where cases
- // so we can stop scanning in jsobj when we find the field of interest.
- if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
- ;
- else
- vars[identifier] = 1;
+ // dm: very dumb about comments and escaped quotes -- but we are faster then at least,
+ // albeit returning too much (which is ok for jsbobj current usage).
+ void grabVariables(char *code /*modified and must stay in scope*/, strhashmap& vars) {
+ char *p = code;
+ char last = 0;
+ while ( *p ) {
+ if ( starter[*p] ) {
+ char *q = p+1;
+ while ( *q && ic[*q] ) q++;
+ const char *identifier = p;
+ bool done = *q == 0;
+ *q = 0;
+ if ( !reserved.count(identifier) ) {
+ // we try to be smart about 'obj' but have to be careful as obj.obj
+ // can happen; this is so that nFields is right for simplistic where cases
+ // so we can stop scanning in jsobj when we find the field of interest.
+ if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
+ ;
+ else
+ vars[identifier] = 1;
+ }
+ if ( done )
+ break;
+ p = q + 1;
+ continue;
}
- if ( done )
- break;
- p = q + 1;
- continue;
- }
- if ( *p == '\'' ) {
- p++;
- while ( *p && *p != '\'' ) p++;
- }
- else if ( *p == '"' ) {
+ if ( *p == '\'' ) {
+ p++;
+ while ( *p && *p != '\'' ) p++;
+ }
+ else if ( *p == '"' ) {
+ p++;
+ while ( *p && *p != '"' ) p++;
+ }
p++;
- while ( *p && *p != '"' ) p++;
}
- p++;
}
- }
- MiniLex() {
- strhashmap atest;
- atest["foo"] = 3;
- assert( atest.count("bar") == 0 );
- assert( atest.count("foo") == 1 );
- assert( atest["foo"] == 3 );
+ MiniLex() {
+ strhashmap atest;
+ atest["foo"] = 3;
+ assert( atest.count("bar") == 0 );
+ assert( atest.count("foo") == 1 );
+ assert( atest["foo"] == 3 );
- for ( int i = 0; i < 256; i++ ) {
- ic[i] = starter[i] = false;
- }
- for ( int i = 'a'; i <= 'z'; i++ )
- ic[i] = starter[i] = true;
- for ( int i = 'A'; i <= 'Z'; i++ )
- ic[i] = starter[i] = true;
- for ( int i = '0'; i <= '9'; i++ )
- ic[i] = true;
- for ( int i = 128; i < 256; i++ )
- ic[i] = starter[i] = true;
- ic['$'] = starter['$'] = true;
- ic['_'] = starter['_'] = true;
+ for ( int i = 0; i < 256; i++ ) {
+ ic[i] = starter[i] = false;
+ }
+ for ( int i = 'a'; i <= 'z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = 'A'; i <= 'Z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = '0'; i <= '9'; i++ )
+ ic[i] = true;
+ for ( int i = 128; i < 256; i++ )
+ ic[i] = starter[i] = true;
+ ic['$'] = starter['$'] = true;
+ ic['_'] = starter['_'] = true;
- reserved["break"] = true;
- reserved["case"] = true;
- reserved["catch"] = true;
- reserved["continue"] = true;
- reserved["default"] = true;
- reserved["delete"] = true;
- reserved["do"] = true;
- reserved["else"] = true;
- reserved["finally"] = true;
- reserved["for"] = true;
- reserved["function"] = true;
- reserved["if"] = true;
- reserved["in"] = true;
- reserved["instanceof"] = true;
- reserved["new"] = true;
- reserved["return"] = true;
- reserved["switch"] = true;
- reserved["this"] = true;
- reserved["throw"] = true;
- reserved["try"] = true;
- reserved["typeof"] = true;
- reserved["var"] = true;
- reserved["void"] = true;
- reserved["while"] = true;
- reserved["with "] = true;
- }
-};
+ reserved["break"] = true;
+ reserved["case"] = true;
+ reserved["catch"] = true;
+ reserved["continue"] = true;
+ reserved["default"] = true;
+ reserved["delete"] = true;
+ reserved["do"] = true;
+ reserved["else"] = true;
+ reserved["finally"] = true;
+ reserved["for"] = true;
+ reserved["function"] = true;
+ reserved["if"] = true;
+ reserved["in"] = true;
+ reserved["instanceof"] = true;
+ reserved["new"] = true;
+ reserved["return"] = true;
+ reserved["switch"] = true;
+ reserved["this"] = true;
+ reserved["throw"] = true;
+ reserved["try"] = true;
+ reserved["typeof"] = true;
+ reserved["var"] = true;
+ reserved["void"] = true;
+ reserved["while"] = true;
+ reserved["with "] = true;
+ }
+ };
} // namespace mongo
diff --git a/db/namespace.cpp b/db/namespace.cpp
index 54731757c7e..d2e7e9d3c66 100644
--- a/db/namespace.cpp
+++ b/db/namespace.cpp
@@ -28,517 +28,517 @@
namespace mongo {
-/* deleted lists -- linked lists of deleted records -- tehy are placed in 'buckets' of various sizes
- so you can look for a deleterecord about the right size.
-*/
-int bucketSizes[] = {
- 32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
- 0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
- 0x400000, 0x800000
-};
+ /* deleted lists -- linked lists of deleted records -- tehy are placed in 'buckets' of various sizes
+ so you can look for a deleterecord about the right size.
+ */
+ int bucketSizes[] = {
+ 32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
+ 0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
+ 0x400000, 0x800000
+ };
//NamespaceIndexMgr namespaceIndexMgr;
-/* returns true if we created (did not exist) during init() */
-bool NamespaceIndex::init(const char *dir, const char *database) {
- boost::filesystem::path path( dir );
- path /= string( database ) + ".ns";
+ /* returns true if we created (did not exist) during init() */
+ bool NamespaceIndex::init(const char *dir, const char *database) {
+ boost::filesystem::path path( dir );
+ path /= string( database ) + ".ns";
- bool created = !boost::filesystem::exists(path);
+ bool created = !boost::filesystem::exists(path);
- /* if someone manually deleted the datafiles for a database,
- we need to be sure to clear any cached info for the database in
- local.*.
- */
- if ( string("local") != database ) {
- DBInfo i(database);
- i.dbDropped();
- }
+ /* if someone manually deleted the datafiles for a database,
+ we need to be sure to clear any cached info for the database in
+ local.*.
+ */
+ if ( string("local") != database ) {
+ DBInfo i(database);
+ i.dbDropped();
+ }
- const int LEN = 16 * 1024 * 1024;
- string pathString = path.string();
- void *p = f.map(pathString.c_str(), LEN);
- if ( p == 0 ) {
- problem() << "couldn't open namespace.idx " << pathString << " terminating" << endl;
- exit(-3);
- }
- ht = new HashTable<Namespace,NamespaceDetails>(p, LEN, "namespace index");
- return created;
-}
-
-void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
- {
- // defensive code: try to make us notice if we reference a deleted record
- (unsigned&) (((Record *) d)->data) = 0xeeeeeeee;
+ const int LEN = 16 * 1024 * 1024;
+ string pathString = path.string();
+ void *p = f.map(pathString.c_str(), LEN);
+ if ( p == 0 ) {
+ problem() << "couldn't open namespace.idx " << pathString << " terminating" << endl;
+ exit(-3);
+ }
+ ht = new HashTable<Namespace,NamespaceDetails>(p, LEN, "namespace index");
+ return created;
}
- dassert( dloc.drec() == d );
- DEBUGGING cout << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
- if ( capped ) {
- if ( !deletedList[ 1 ].isValid() ) {
- // Initial extent allocation. Insert at end.
- d->nextDeleted = DiskLoc();
- if ( deletedList[ 0 ].isNull() )
- deletedList[ 0 ] = dloc;
- else {
- DiskLoc i = deletedList[ 0 ];
- for (; !i.drec()->nextDeleted.isNull(); i = i.drec()->nextDeleted );
- i.drec()->nextDeleted = dloc;
+
+ void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
+ {
+ // defensive code: try to make us notice if we reference a deleted record
+ (unsigned&) (((Record *) d)->data) = 0xeeeeeeee;
+ }
+ dassert( dloc.drec() == d );
+ DEBUGGING out() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
+ if ( capped ) {
+ if ( !deletedList[ 1 ].isValid() ) {
+ // Initial extent allocation. Insert at end.
+ d->nextDeleted = DiskLoc();
+ if ( deletedList[ 0 ].isNull() )
+ deletedList[ 0 ] = dloc;
+ else {
+ DiskLoc i = deletedList[ 0 ];
+ for (; !i.drec()->nextDeleted.isNull(); i = i.drec()->nextDeleted );
+ i.drec()->nextDeleted = dloc;
+ }
+ } else {
+ d->nextDeleted = firstDeletedInCapExtent();
+ firstDeletedInCapExtent() = dloc;
}
} else {
- d->nextDeleted = firstDeletedInCapExtent();
- firstDeletedInCapExtent() = dloc;
+ int b = bucket(d->lengthWithHeaders);
+ DiskLoc& list = deletedList[b];
+ DiskLoc oldHead = list;
+ list = dloc;
+ d->nextDeleted = oldHead;
}
- } else {
- int b = bucket(d->lengthWithHeaders);
- DiskLoc& list = deletedList[b];
- DiskLoc oldHead = list;
- list = dloc;
- d->nextDeleted = oldHead;
}
-}
-/*
- lenToAlloc is WITH header
-*/
-DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
- lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
- DiskLoc loc = _alloc(ns, lenToAlloc);
- if ( loc.isNull() )
- return loc;
+ /*
+ lenToAlloc is WITH header
+ */
+ DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
+ lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
+ DiskLoc loc = _alloc(ns, lenToAlloc);
+ if ( loc.isNull() )
+ return loc;
- DeletedRecord *r = loc.drec();
+ DeletedRecord *r = loc.drec();
- /* note we want to grab from the front so our next pointers on disk tend
- to go in a forward direction which is important for performance. */
- int regionlen = r->lengthWithHeaders;
- extentLoc.set(loc.a(), r->extentOfs);
- assert( r->extentOfs < loc.getOfs() );
+ /* note we want to grab from the front so our next pointers on disk tend
+ to go in a forward direction which is important for performance. */
+ int regionlen = r->lengthWithHeaders;
+ extentLoc.set(loc.a(), r->extentOfs);
+ assert( r->extentOfs < loc.getOfs() );
- DEBUGGING cout << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
+ DEBUGGING out() << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
- int left = regionlen - lenToAlloc;
- if ( capped == 0 ) {
- if ( left < 24 || left < (lenToAlloc >> 3) ) {
- // you get the whole thing.
- return loc;
+ int left = regionlen - lenToAlloc;
+ if ( capped == 0 ) {
+ if ( left < 24 || left < (lenToAlloc >> 3) ) {
+ // you get the whole thing.
+ return loc;
+ }
}
- }
- /* split off some for further use. */
- r->lengthWithHeaders = lenToAlloc;
- DiskLoc newDelLoc = loc;
- newDelLoc.inc(lenToAlloc);
- DeletedRecord *newDel = newDelLoc.drec();
- newDel->extentOfs = r->extentOfs;
- newDel->lengthWithHeaders = left;
- newDel->nextDeleted.Null();
+ /* split off some for further use. */
+ r->lengthWithHeaders = lenToAlloc;
+ DiskLoc newDelLoc = loc;
+ newDelLoc.inc(lenToAlloc);
+ DeletedRecord *newDel = newDelLoc.drec();
+ newDel->extentOfs = r->extentOfs;
+ newDel->lengthWithHeaders = left;
+ newDel->nextDeleted.Null();
- addDeletedRec(newDel, newDelLoc);
+ addDeletedRec(newDel, newDelLoc);
- return loc;
-}
+ return loc;
+ }
-/* for non-capped collections.
- returned item is out of the deleted list upon return
-*/
-DiskLoc NamespaceDetails::__stdAlloc(int len) {
- DiskLoc *prev;
- DiskLoc *bestprev = 0;
- DiskLoc bestmatch;
- int bestmatchlen = 0x7fffffff;
- int b = bucket(len);
- DiskLoc cur = deletedList[b];
- prev = &deletedList[b];
- int extra = 5; // look for a better fit, a little.
- int chain = 0;
- while ( 1 ) {
- {
- int a = cur.a();
- if ( a < -1 || a >= 100000 ) {
- problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
- " a:" << a << " b:" << b << " chain:" << chain << '\n';
- sayDbContext();
- if ( cur == *prev )
- prev->Null();
- cur.Null();
+ /* for non-capped collections.
+ returned item is out of the deleted list upon return
+ */
+ DiskLoc NamespaceDetails::__stdAlloc(int len) {
+ DiskLoc *prev;
+ DiskLoc *bestprev = 0;
+ DiskLoc bestmatch;
+ int bestmatchlen = 0x7fffffff;
+ int b = bucket(len);
+ DiskLoc cur = deletedList[b];
+ prev = &deletedList[b];
+ int extra = 5; // look for a better fit, a little.
+ int chain = 0;
+ while ( 1 ) {
+ {
+ int a = cur.a();
+ if ( a < -1 || a >= 100000 ) {
+ problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
+ " a:" << a << " b:" << b << " chain:" << chain << '\n';
+ sayDbContext();
+ if ( cur == *prev )
+ prev->Null();
+ cur.Null();
+ }
}
- }
- if ( cur.isNull() ) {
- // move to next bucket. if we were doing "extra", just break
- if ( bestmatchlen < 0x7fffffff )
+ if ( cur.isNull() ) {
+ // move to next bucket. if we were doing "extra", just break
+ if ( bestmatchlen < 0x7fffffff )
+ break;
+ b++;
+ if ( b > MaxBucket ) {
+ // out of space. alloc a new extent.
+ return DiskLoc();
+ }
+ cur = deletedList[b];
+ prev = &deletedList[b];
+ continue;
+ }
+ DeletedRecord *r = cur.drec();
+ if ( r->lengthWithHeaders >= len &&
+ r->lengthWithHeaders < bestmatchlen ) {
+ bestmatchlen = r->lengthWithHeaders;
+ bestmatch = cur;
+ bestprev = prev;
+ }
+ if ( bestmatchlen < 0x7fffffff && --extra <= 0 )
break;
- b++;
- if ( b > MaxBucket ) {
- // out of space. alloc a new extent.
- return DiskLoc();
+ if ( ++chain > 30 && b < MaxBucket ) {
+ // too slow, force move to next bucket to grab a big chunk
+ //b++;
+ chain = 0;
+ cur.Null();
}
- cur = deletedList[b];
- prev = &deletedList[b];
- continue;
- }
- DeletedRecord *r = cur.drec();
- if ( r->lengthWithHeaders >= len &&
- r->lengthWithHeaders < bestmatchlen ) {
- bestmatchlen = r->lengthWithHeaders;
- bestmatch = cur;
- bestprev = prev;
- }
- if ( bestmatchlen < 0x7fffffff && --extra <= 0 )
- break;
- if ( ++chain > 30 && b < MaxBucket ) {
- // too slow, force move to next bucket to grab a big chunk
- //b++;
- chain = 0;
- cur.Null();
- }
- else {
- if ( r->nextDeleted.getOfs() == 0 ) {
- problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
- " b:" << b << " chain:" << chain << ", fixing.\n";
- r->nextDeleted.Null();
+ else {
+ if ( r->nextDeleted.getOfs() == 0 ) {
+ problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
+ " b:" << b << " chain:" << chain << ", fixing.\n";
+ r->nextDeleted.Null();
+ }
+ cur = r->nextDeleted;
+ prev = &r->nextDeleted;
}
- cur = r->nextDeleted;
- prev = &r->nextDeleted;
}
- }
- /* unlink ourself from the deleted list */
- {
- DeletedRecord *bmr = bestmatch.drec();
- *bestprev = bmr->nextDeleted;
- bmr->nextDeleted.setInvalid(); // defensive.
- assert(bmr->extentOfs < bestmatch.getOfs());
+ /* unlink ourself from the deleted list */
+ {
+ DeletedRecord *bmr = bestmatch.drec();
+ *bestprev = bmr->nextDeleted;
+ bmr->nextDeleted.setInvalid(); // defensive.
+ assert(bmr->extentOfs < bestmatch.getOfs());
+ }
+
+ return bestmatch;
}
- return bestmatch;
-}
-
-void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
-// cout << "DUMP deleted chains" << endl;
- for ( int i = 0; i < Buckets; i++ ) {
-// cout << " bucket " << i << endl;
- DiskLoc dl = deletedList[i];
- while ( !dl.isNull() ) {
- DeletedRecord *r = dl.drec();
- DiskLoc extLoc(dl.a(), r->extentOfs);
- if ( extents == 0 || extents->count(extLoc) <= 0 ) {
- cout << " bucket " << i << endl;
- cout << " " << dl.toString() << " ext:" << extLoc.toString();
- if ( extents && extents->count(extLoc) <= 0 )
- cout << '?';
- cout << " len:" << r->lengthWithHeaders << endl;
+ void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
+// out() << "DUMP deleted chains" << endl;
+ for ( int i = 0; i < Buckets; i++ ) {
+// out() << " bucket " << i << endl;
+ DiskLoc dl = deletedList[i];
+ while ( !dl.isNull() ) {
+ DeletedRecord *r = dl.drec();
+ DiskLoc extLoc(dl.a(), r->extentOfs);
+ if ( extents == 0 || extents->count(extLoc) <= 0 ) {
+ out() << " bucket " << i << endl;
+ out() << " " << dl.toString() << " ext:" << extLoc.toString();
+ if ( extents && extents->count(extLoc) <= 0 )
+ out() << '?';
+ out() << " len:" << r->lengthWithHeaders << endl;
+ }
+ dl = r->nextDeleted;
}
- dl = r->nextDeleted;
}
+// out() << endl;
}
-// cout << endl;
-}
-/* combine adjacent deleted records
+ /* combine adjacent deleted records
- this is O(n^2) but we call it for capped tables where typically n==1 or 2!
- (or 3...there will be a little unused sliver at the end of the extent.)
-*/
-void NamespaceDetails::compact() {
- assert(capped);
-
- list<DiskLoc> drecs;
-
- // Pull out capExtent's DRs from deletedList
- DiskLoc i = firstDeletedInCapExtent();
- for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
- drecs.push_back( i );
- firstDeletedInCapExtent() = i;
-
- // This is the O(n^2) part.
- drecs.sort();
-
- list<DiskLoc>::iterator j = drecs.begin();
- assert( j != drecs.end() );
- DiskLoc a = *j;
- while ( 1 ) {
- j++;
- if ( j == drecs.end() ) {
- DEBUGGING cout << "TEMP: compact adddelrec\n";
- addDeletedRec(a.drec(), a);
- break;
- }
- DiskLoc b = *j;
- while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
- // a & b are adjacent. merge.
- a.drec()->lengthWithHeaders += b.drec()->lengthWithHeaders;
+ this is O(n^2) but we call it for capped tables where typically n==1 or 2!
+ (or 3...there will be a little unused sliver at the end of the extent.)
+ */
+ void NamespaceDetails::compact() {
+ assert(capped);
+
+ list<DiskLoc> drecs;
+
+ // Pull out capExtent's DRs from deletedList
+ DiskLoc i = firstDeletedInCapExtent();
+ for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
+ drecs.push_back( i );
+ firstDeletedInCapExtent() = i;
+
+ // This is the O(n^2) part.
+ drecs.sort();
+
+ list<DiskLoc>::iterator j = drecs.begin();
+ assert( j != drecs.end() );
+ DiskLoc a = *j;
+ while ( 1 ) {
j++;
if ( j == drecs.end() ) {
- DEBUGGING cout << "temp: compact adddelrec2\n";
+ DEBUGGING out() << "TEMP: compact adddelrec\n";
addDeletedRec(a.drec(), a);
- return;
+ break;
}
- b = *j;
+ DiskLoc b = *j;
+ while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
+ // a & b are adjacent. merge.
+ a.drec()->lengthWithHeaders += b.drec()->lengthWithHeaders;
+ j++;
+ if ( j == drecs.end() ) {
+ DEBUGGING out() << "temp: compact adddelrec2\n";
+ addDeletedRec(a.drec(), a);
+ return;
+ }
+ b = *j;
+ }
+ DEBUGGING out() << "temp: compact adddelrec3\n";
+ addDeletedRec(a.drec(), a);
+ a = b;
}
- DEBUGGING cout << "temp: compact adddelrec3\n";
- addDeletedRec(a.drec(), a);
- a = b;
}
-}
-DiskLoc NamespaceDetails::firstRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? firstExtent : startExtent;
- !i.isNull(); i = i.ext()->xnext ) {
- if ( !i.ext()->firstRecord.isNull() )
- return i.ext()->firstRecord;
- }
- return DiskLoc();
-}
-
-DiskLoc NamespaceDetails::lastRecord( const DiskLoc &startExtent ) const {
- for (DiskLoc i = startExtent.isNull() ? lastExtent : startExtent;
- !i.isNull(); i = i.ext()->xprev ) {
- if ( !i.ext()->lastRecord.isNull() )
- return i.ext()->lastRecord;
- }
- return DiskLoc();
-}
-
-DiskLoc &NamespaceDetails::firstDeletedInCapExtent() {
- if ( deletedList[ 1 ].isNull() )
- return deletedList[ 0 ];
- else
- return deletedList[ 1 ].drec()->nextDeleted;
-}
-
-bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
- // We could have a rec or drec, doesn't matter.
- return dl.drec()->myExtent( dl ) == capExtent.ext();
-}
-
-bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
- assert( !dl.isNull() );
- DiskLoc next = dl.drec()->nextDeleted;
- if ( next.isNull() )
- return false;
- return inCapExtent( next );
-}
-
-void NamespaceDetails::advanceCapExtent( const char *ns ) {
- // We want deletedList[ 1 ] to be the last DeletedRecord of the prev cap extent
- // (or DiskLoc() if new capExtent == firstExtent)
- if ( capExtent == lastExtent )
- deletedList[ 1 ] = DiskLoc();
- else {
- DiskLoc i = firstDeletedInCapExtent();
- for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
- deletedList[ 1 ] = i;
- }
-
- capExtent = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
- dassert( theCapExtent()->ns == ns );
- theCapExtent()->assertOk();
- capFirstNewRecord = DiskLoc();
-}
-
-int n_complaints_cap = 0;
-void NamespaceDetails::maybeComplain( const char *ns, int len ) const {
- if ( ++n_complaints_cap < 8 ) {
- cout << "couldn't make room for new record (len: " << len << ") in capped ns " << ns << '\n';
- int i = 0;
- for ( DiskLoc e = firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
- cout << " Extent " << i;
- if ( e == capExtent )
- cout << " (capExtent)";
- cout << '\n';
- cout << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->ns.buf << '\n';
- cout << " fr: " << e.ext()->firstRecord.toString() <<
- " lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
+ DiskLoc NamespaceDetails::firstRecord( const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? firstExtent : startExtent;
+ !i.isNull(); i = i.ext()->xnext ) {
+ if ( !i.ext()->firstRecord.isNull() )
+ return i.ext()->firstRecord;
}
- assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ return DiskLoc();
}
-}
-
-DiskLoc NamespaceDetails::__capAlloc( int len ) {
- DiskLoc prev = deletedList[ 1 ];
- DiskLoc i = firstDeletedInCapExtent();
- DiskLoc ret;
- for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted ) {
- // We need to keep at least one DR per extent in deletedList[ 0 ],
- // so make sure there's space to create a DR at the end.
- if ( i.drec()->lengthWithHeaders >= len + 24 ) {
- ret = i;
- break;
+
+ DiskLoc NamespaceDetails::lastRecord( const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? lastExtent : startExtent;
+ !i.isNull(); i = i.ext()->xprev ) {
+ if ( !i.ext()->lastRecord.isNull() )
+ return i.ext()->lastRecord;
}
+ return DiskLoc();
}
- /* unlink ourself from the deleted list */
- if ( !ret.isNull() ) {
- if ( prev.isNull() )
- deletedList[ 0 ] = ret.drec()->nextDeleted;
+ DiskLoc &NamespaceDetails::firstDeletedInCapExtent() {
+ if ( deletedList[ 1 ].isNull() )
+ return deletedList[ 0 ];
else
- prev.drec()->nextDeleted = ret.drec()->nextDeleted;
- ret.drec()->nextDeleted.setInvalid(); // defensive.
- assert( ret.drec()->extentOfs < ret.getOfs() );
+ return deletedList[ 1 ].drec()->nextDeleted;
}
- return ret;
-}
+ bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
+ assert( !dl.isNull() );
+ // We could have a rec or drec, doesn't matter.
+ return dl.drec()->myExtent( dl ) == capExtent.ext();
+ }
-void NamespaceDetails::checkMigrate() {
- // migrate old NamespaceDetails format
- if ( capped && capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
- capFirstNewRecord = DiskLoc();
- capFirstNewRecord.setInvalid();
- // put all the DeletedRecords in deletedList[ 0 ]
- for ( int i = 1; i < Buckets; ++i ) {
- DiskLoc first = deletedList[ i ];
- if ( first.isNull() )
- continue;
- DiskLoc last = first;
- for (; !last.drec()->nextDeleted.isNull(); last = last.drec()->nextDeleted );
- last.drec()->nextDeleted = deletedList[ 0 ];
- deletedList[ 0 ] = first;
- deletedList[ i ] = DiskLoc();
+ bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
+ assert( !dl.isNull() );
+ DiskLoc next = dl.drec()->nextDeleted;
+ if ( next.isNull() )
+ return false;
+ return inCapExtent( next );
+ }
+
+ void NamespaceDetails::advanceCapExtent( const char *ns ) {
+ // We want deletedList[ 1 ] to be the last DeletedRecord of the prev cap extent
+ // (or DiskLoc() if new capExtent == firstExtent)
+ if ( capExtent == lastExtent )
+ deletedList[ 1 ] = DiskLoc();
+ else {
+ DiskLoc i = firstDeletedInCapExtent();
+ for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
+ deletedList[ 1 ] = i;
}
- // NOTE deletedList[ 1 ] set to DiskLoc() in above
- // Last, in case we're killed before getting here
- capExtent = firstExtent;
+ capExtent = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
+ dassert( theCapExtent()->ns == ns );
+ theCapExtent()->assertOk();
+ capFirstNewRecord = DiskLoc();
}
-}
-
-/* alloc with capped table handling. */
-DiskLoc NamespaceDetails::_alloc(const char *ns, int len) {
- if ( !capped )
- return __stdAlloc(len);
-
- // capped.
-
- // signal done allocating new extents.
- if ( !deletedList[ 1 ].isValid() )
- deletedList[ 1 ] = DiskLoc();
-
- assert( len < 400000000 );
- int passes = 0;
- DiskLoc loc;
-
- // delete records until we have room and the max # objects limit achieved.
- dassert( theCapExtent()->ns == ns );
- theCapExtent()->assertOk();
- DiskLoc firstEmptyExtent;
- while ( 1 ) {
- if ( nrecords < max ) {
- loc = __capAlloc( len );
- if ( !loc.isNull() )
- break;
+
+ int n_complaints_cap = 0;
+ void NamespaceDetails::maybeComplain( const char *ns, int len ) const {
+ if ( ++n_complaints_cap < 8 ) {
+ out() << "couldn't make room for new record (len: " << len << ") in capped ns " << ns << '\n';
+ int i = 0;
+ for ( DiskLoc e = firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
+ out() << " Extent " << i;
+ if ( e == capExtent )
+ out() << " (capExtent)";
+ out() << '\n';
+ out() << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->ns.buf << '\n';
+ out() << " fr: " << e.ext()->firstRecord.toString() <<
+ " lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
+ }
+ assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
}
+ }
- // If on first iteration through extents, don't delete anything.
- if ( !capFirstNewRecord.isValid() ) {
- advanceCapExtent( ns );
- if ( capExtent != firstExtent )
- capFirstNewRecord.setInvalid();
- // else signal done with first iteration through extents.
- continue;
+ DiskLoc NamespaceDetails::__capAlloc( int len ) {
+ DiskLoc prev = deletedList[ 1 ];
+ DiskLoc i = firstDeletedInCapExtent();
+ DiskLoc ret;
+ for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted ) {
+ // We need to keep at least one DR per extent in deletedList[ 0 ],
+ // so make sure there's space to create a DR at the end.
+ if ( i.drec()->lengthWithHeaders >= len + 24 ) {
+ ret = i;
+ break;
+ }
}
- if ( !capFirstNewRecord.isNull() &&
- theCapExtent()->firstRecord == capFirstNewRecord ) {
- // We've deleted all records that were allocated on the previous
- // iteration through this extent.
- advanceCapExtent( ns );
- continue;
+ /* unlink ourself from the deleted list */
+ if ( !ret.isNull() ) {
+ if ( prev.isNull() )
+ deletedList[ 0 ] = ret.drec()->nextDeleted;
+ else
+ prev.drec()->nextDeleted = ret.drec()->nextDeleted;
+ ret.drec()->nextDeleted.setInvalid(); // defensive.
+ assert( ret.drec()->extentOfs < ret.getOfs() );
}
- if ( theCapExtent()->firstRecord.isNull() ) {
- if ( firstEmptyExtent.isNull() )
- firstEmptyExtent = capExtent;
- advanceCapExtent( ns );
- if ( firstEmptyExtent == capExtent ) {
- maybeComplain( ns, len );
- return DiskLoc();
+ return ret;
+ }
+
+ void NamespaceDetails::checkMigrate() {
+ // migrate old NamespaceDetails format
+ if ( capped && capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
+ capFirstNewRecord = DiskLoc();
+ capFirstNewRecord.setInvalid();
+ // put all the DeletedRecords in deletedList[ 0 ]
+ for ( int i = 1; i < Buckets; ++i ) {
+ DiskLoc first = deletedList[ i ];
+ if ( first.isNull() )
+ continue;
+ DiskLoc last = first;
+ for (; !last.drec()->nextDeleted.isNull(); last = last.drec()->nextDeleted );
+ last.drec()->nextDeleted = deletedList[ 0 ];
+ deletedList[ 0 ] = first;
+ deletedList[ i ] = DiskLoc();
}
- continue;
- }
+ // NOTE deletedList[ 1 ] set to DiskLoc() in above
- DiskLoc fr = theCapExtent()->firstRecord;
- theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
- compact();
- assert( ++passes < 5000 );
+ // Last, in case we're killed before getting here
+ capExtent = firstExtent;
+ }
}
- // Remember first record allocated on this iteration through capExtent.
- if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
- capFirstNewRecord = loc;
+ /* alloc with capped table handling. */
+ DiskLoc NamespaceDetails::_alloc(const char *ns, int len) {
+ if ( !capped )
+ return __stdAlloc(len);
+
+ // capped.
+
+ // signal done allocating new extents.
+ if ( !deletedList[ 1 ].isValid() )
+ deletedList[ 1 ] = DiskLoc();
+
+ assert( len < 400000000 );
+ int passes = 0;
+ DiskLoc loc;
+
+ // delete records until we have room and the max # objects limit achieved.
+ dassert( theCapExtent()->ns == ns );
+ theCapExtent()->assertOk();
+ DiskLoc firstEmptyExtent;
+ while ( 1 ) {
+ if ( nrecords < max ) {
+ loc = __capAlloc( len );
+ if ( !loc.isNull() )
+ break;
+ }
+
+ // If on first iteration through extents, don't delete anything.
+ if ( !capFirstNewRecord.isValid() ) {
+ advanceCapExtent( ns );
+ if ( capExtent != firstExtent )
+ capFirstNewRecord.setInvalid();
+ // else signal done with first iteration through extents.
+ continue;
+ }
- return loc;
-}
+ if ( !capFirstNewRecord.isNull() &&
+ theCapExtent()->firstRecord == capFirstNewRecord ) {
+ // We've deleted all records that were allocated on the previous
+ // iteration through this extent.
+ advanceCapExtent( ns );
+ continue;
+ }
-/* you MUST call when adding an index. see pdfile.cpp */
-void NamespaceDetails::addingIndex(const char *thisns, IndexDetails& details) {
- assert( nsdetails(thisns) == this );
- assert( &details == &indexes[nIndexes] );
- nIndexes++;
- NamespaceDetailsTransient::get(thisns).addedIndex();
-}
+ if ( theCapExtent()->firstRecord.isNull() ) {
+ if ( firstEmptyExtent.isNull() )
+ firstEmptyExtent = capExtent;
+ advanceCapExtent( ns );
+ if ( firstEmptyExtent == capExtent ) {
+ maybeComplain( ns, len );
+ return DiskLoc();
+ }
+ continue;
+ }
-/* returns index of the first index in which the field is present. -1 if not present.
- (aug08 - this method not currently used)
-*/
-int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
- for ( int i = 0; i < nIndexes; i++ ) {
- IndexDetails& idx = indexes[i];
- BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
- if ( !idxKey.findElement(fieldName).eoo() )
- return i;
+ DiskLoc fr = theCapExtent()->firstRecord;
+ theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
+ compact();
+ assert( ++passes < 5000 );
+ }
+
+ // Remember first record allocated on this iteration through capExtent.
+ if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
+ capFirstNewRecord = loc;
+
+ return loc;
}
- return -1;
-}
-/* ------------------------------------------------------------------------- */
+ /* you MUST call when adding an index. see pdfile.cpp */
+ void NamespaceDetails::addingIndex(const char *thisns, IndexDetails& details) {
+ assert( nsdetails(thisns) == this );
+ assert( &details == &indexes[nIndexes] );
+ nIndexes++;
+ NamespaceDetailsTransient::get(thisns).addedIndex();
+ }
-map<const char *,NamespaceDetailsTransient*> NamespaceDetailsTransient::map;
-typedef map<const char *,NamespaceDetailsTransient*>::iterator ouriter;
+ /* returns index of the first index in which the field is present. -1 if not present.
+ (aug08 - this method not currently used)
+ */
+ int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
+ for ( int i = 0; i < nIndexes; i++ ) {
+ IndexDetails& idx = indexes[i];
+ BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
+ if ( !idxKey.findElement(fieldName).eoo() )
+ return i;
+ }
+ return -1;
+ }
+
+ /* ------------------------------------------------------------------------- */
-NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
- NamespaceDetailsTransient*& t = map[ns];
- if ( t == 0 )
- t = new NamespaceDetailsTransient(ns);
- return *t;
-}
+ map<const char *,NamespaceDetailsTransient*> NamespaceDetailsTransient::map;
+ typedef map<const char *,NamespaceDetailsTransient*>::iterator ouriter;
-void NamespaceDetailsTransient::computeIndexKeys() {
- NamespaceDetails *d = nsdetails(ns.c_str());
- for ( int i = 0; i < d->nIndexes; i++ ) {
+ NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
+ NamespaceDetailsTransient*& t = map[ns];
+ if ( t == 0 )
+ t = new NamespaceDetailsTransient(ns);
+ return *t;
+ }
+
+ void NamespaceDetailsTransient::computeIndexKeys() {
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ for ( int i = 0; i < d->nIndexes; i++ ) {
// set<string> fields;
- d->indexes[i].keyPattern().getFieldNames(allIndexKeys);
+ d->indexes[i].keyPattern().getFieldNames(allIndexKeys);
// allIndexKeys.insert(fields.begin(),fields.end());
+ }
}
-}
-/* ------------------------------------------------------------------------- */
+ /* ------------------------------------------------------------------------- */
-/* add a new namespace to the system catalog (<dbname>.system.namespaces).
- options: { capped : ..., size : ... }
-*/
-void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0) {
- if ( verbose )
- log() << "New namespace: " << ns << '\n';
- if ( strstr(ns, "system.namespaces") ) {
- // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
- // TODO: fix above should not be strstr!
- return;
- }
+ /* add a new namespace to the system catalog (<dbname>.system.namespaces).
+ options: { capped : ..., size : ... }
+ */
+ void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0) {
+ if ( verbose )
+ log() << "New namespace: " << ns << '\n';
+ if ( strstr(ns, "system.namespaces") ) {
+ // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
+ // TODO: fix above should not be strstr!
+ return;
+ }
- {
- BSONObjBuilder b;
- b.append("name", ns);
- if ( options )
- b.append("options", *options);
- BSONObj j = b.done();
- char database[256];
- nsToClient(ns, database);
- string s = database;
- s += ".system.namespaces";
- theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
+ {
+ BSONObjBuilder b;
+ b.append("name", ns);
+ if ( options )
+ b.append("options", *options);
+ BSONObj j = b.done();
+ char database[256];
+ nsToClient(ns, database);
+ string s = database;
+ s += ".system.namespaces";
+ theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
+ }
}
-}
} // namespace mongo
diff --git a/db/namespace.h b/db/namespace.h
index 76e73a57a13..c93043ffe43 100644
--- a/db/namespace.h
+++ b/db/namespace.h
@@ -28,363 +28,363 @@
namespace mongo {
-class Cursor;
+ class Cursor;
#pragma pack(push,1)
-class Namespace {
-public:
- enum { MaxNsLen = 128 };
- Namespace(const char *ns) {
- *this = ns;
- }
- Namespace& operator=(const char *ns) {
- memset(buf, 0, MaxNsLen); /* this is just to keep stuff clean in the files for easy dumping and reading */
- strcpy_s(buf, MaxNsLen, ns);
- return *this;
- }
-
- void kill() {
- buf[0] = 0x7f;
- }
-
- bool operator==(const char *r) {
- return strcmp(buf, r) == 0;
- }
- bool operator==(const Namespace& r) {
- return strcmp(buf, r.buf) == 0;
- }
- int hash() const {
- unsigned x = 0;
- const char *p = buf;
- while ( *p ) {
- x = x * 131 + *p;
- p++;
+ class Namespace {
+ public:
+ enum { MaxNsLen = 128 };
+ Namespace(const char *ns) {
+ *this = ns;
+ }
+ Namespace& operator=(const char *ns) {
+ memset(buf, 0, MaxNsLen); /* this is just to keep stuff clean in the files for easy dumping and reading */
+ strcpy_s(buf, MaxNsLen, ns);
+ return *this;
}
- return (x & 0x7fffffff) | 0x8000000; // must be > 0
- }
-
-
- /**
- ( foo.bar ).getSisterNS( "blah" ) == foo.blah
- */
- string getSisterNS( const char * local ) {
- assert( local && local[0] != '.' );
- string old(buf);
- if ( old.find( "." ) != string::npos )
- old = old.substr( 0 , old.find( "." ) );
- return old + "." + local;
- }
- char buf[MaxNsLen];
-};
+ void kill() {
+ buf[0] = 0x7f;
+ }
-const int Buckets = 19;
-const int MaxBucket = 18;
-const int MaxIndexes = 10;
+ bool operator==(const char *r) {
+ return strcmp(buf, r) == 0;
+ }
+ bool operator==(const Namespace& r) {
+ return strcmp(buf, r.buf) == 0;
+ }
+ int hash() const {
+ unsigned x = 0;
+ const char *p = buf;
+ while ( *p ) {
+ x = x * 131 + *p;
+ p++;
+ }
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
-class IndexDetails {
-public:
- DiskLoc head; /* btree head */
- /* Location of index info object. Format:
+ /**
+ ( foo.bar ).getSisterNS( "blah" ) == foo.blah
+ */
+ string getSisterNS( const char * local ) {
+ assert( local && local[0] != '.' );
+ string old(buf);
+ if ( old.find( "." ) != string::npos )
+ old = old.substr( 0 , old.find( "." ) );
+ return old + "." + local;
+ }
- { name:"nameofindex", ns:"parentnsname", key: {keypattobject} }
+ char buf[MaxNsLen];
+ };
- This object is in the system.indexes collection. Note that since we
- have a pointer to the object here, the object in system.indexes must
- never move.
- */
- DiskLoc info;
+ const int Buckets = 19;
+ const int MaxBucket = 18;
+ const int MaxIndexes = 10;
+
+ class IndexDetails {
+ public:
+ DiskLoc head; /* btree head */
+
+ /* Location of index info object. Format:
+
+ { name:"nameofindex", ns:"parentnsname", key: {keypattobject} }
+
+ This object is in the system.indexes collection. Note that since we
+ have a pointer to the object here, the object in system.indexes must
+ never move.
+ */
+ DiskLoc info;
+
+ /* extract key value from the query object
+ e.g., if key() == { x : 1 },
+ { x : 70, y : 3 } -> { x : 70 }
+ handles our embedded dot notation too.
+ */
+ BSONObj getKeyFromQuery(BSONObj& query) {
+ BSONObj k = keyPattern();
+ BSONObj res = query.extractFieldsUnDotted(k);
+ assert(res.objsize() != 0); // guard against a seg fault if details is 0
+ return res;
+ }
- /* extract key value from the query object
- e.g., if key() == { x : 1 },
- { x : 70, y : 3 } -> { x : 70 }
- handles our embedded dot notation too.
- */
- BSONObj getKeyFromQuery(BSONObj& query) {
- BSONObj k = keyPattern();
- BSONObj res = query.extractFieldsUnDotted(k);
- assert(res.objsize() != 0); // guard against a seg fault if details is 0
- return res;
- }
+ /* pull out the relevant key objects from obj, so we
+ can index them. Note that the set is multiple elements
+ only when it's a "multikey" array.
+ keys will be left empty if key not found in the object.
+ */
+ void getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const;
+
+ /* get the key pattern for this object.
+ e.g., { lastname:1, firstname:1 }
+ */
+ BSONObj keyPattern() const {
+ return info.obj().getObjectField("key");
+ }
- /* pull out the relevant key objects from obj, so we
- can index them. Note that the set is multiple elements
- only when it's a "multikey" array.
- keys will be left empty if key not found in the object.
- */
- void getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const;
+ // returns name of this index's storage area
+ // database.table.$index
+ string indexNamespace() {
+ BSONObj io = info.obj();
+ string s;
+ s.reserve(Namespace::MaxNsLen);
+ s = io.getStringField("ns");
+ assert( !s.empty() );
+ s += ".$";
+ s += io.getStringField("name");
+ return s;
+ }
- /* get the key pattern for this object.
- e.g., { lastname:1, firstname:1 }
- */
- BSONObj keyPattern() const {
- return info.obj().getObjectField("key");
- }
+ string indexName() const { // e.g. "ts_1"
+ BSONObj io = info.obj();
+ return io.getStringField("name");
+ }
- // returns name of this index's storage area
- // database.table.$index
- string indexNamespace() {
- BSONObj io = info.obj();
- string s;
- s.reserve(Namespace::MaxNsLen);
- s = io.getStringField("ns");
- assert( !s.empty() );
- s += ".$";
- s += io.getStringField("name");
- return s;
- }
+ /* gets not our namespace name (indexNamespace for that),
+ but the collection we index, its name.
+ */
+ string parentNS() const {
+ BSONObj io = info.obj();
+ return io.getStringField("ns");
+ }
- string indexName() const { // e.g. "ts_1"
- BSONObj io = info.obj();
- return io.getStringField("name");
- }
+ /* delete this index. does NOT celan up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
+ */
+ void kill();
+ };
- /* gets not our namespace name (indexNamespace for that),
- but the collection we index, its name.
- */
- string parentNS() const {
- BSONObj io = info.obj();
- return io.getStringField("ns");
- }
+ extern int bucketSizes[];
- /* delete this index. does NOT celan up the system catalog
- (system.indexes or system.namespaces) -- only NamespaceIndex.
+ /* this is the "header" for a collection that has all its details. in the .ns file.
*/
- void kill();
-};
-
-extern int bucketSizes[];
-
-/* this is the "header" for a collection that has all its details. in the .ns file.
-*/
-class NamespaceDetails {
-public:
- NamespaceDetails( const DiskLoc &loc, bool _capped ) {
- /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
- firstExtent = lastExtent = capExtent = loc;
- datasize = nrecords = 0;
- lastExtentSize = 0;
- nIndexes = 0;
- capped = _capped;
- max = 0x7fffffff;
- paddingFactor = 1.0;
- flags = 0;
- capFirstNewRecord = DiskLoc();
- // Signal that we are on first allocation iteration through extents.
- capFirstNewRecord.setInvalid();
- // For capped case, signal that we are doing initial extent allocation.
- if ( capped )
- deletedList[ 1 ].setInvalid();
- memset(reserved, 0, sizeof(reserved));
- }
- DiskLoc firstExtent;
- DiskLoc lastExtent;
- DiskLoc deletedList[Buckets];
- long long datasize;
- long long nrecords;
- int lastExtentSize;
- int nIndexes;
- IndexDetails indexes[MaxIndexes];
- int capped;
- int max; // max # of objects for a capped table.
- double paddingFactor; // 1.0 = no padding.
- int flags;
- DiskLoc capExtent;
- DiskLoc capFirstNewRecord;
- char reserved[108];
-
- enum {
- Flag_HaveIdIndex = 1 // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
- };
-
- /* you MUST call when adding an index. see pdfile.cpp */
- void addingIndex(const char *thisns, IndexDetails& details);
+ class NamespaceDetails {
+ public:
+ NamespaceDetails( const DiskLoc &loc, bool _capped ) {
+ /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
+ firstExtent = lastExtent = capExtent = loc;
+ datasize = nrecords = 0;
+ lastExtentSize = 0;
+ nIndexes = 0;
+ capped = _capped;
+ max = 0x7fffffff;
+ paddingFactor = 1.0;
+ flags = 0;
+ capFirstNewRecord = DiskLoc();
+ // Signal that we are on first allocation iteration through extents.
+ capFirstNewRecord.setInvalid();
+ // For capped case, signal that we are doing initial extent allocation.
+ if ( capped )
+ deletedList[ 1 ].setInvalid();
+ memset(reserved, 0, sizeof(reserved));
+ }
+ DiskLoc firstExtent;
+ DiskLoc lastExtent;
+ DiskLoc deletedList[Buckets];
+ long long datasize;
+ long long nrecords;
+ int lastExtentSize;
+ int nIndexes;
+ IndexDetails indexes[MaxIndexes];
+ int capped;
+ int max; // max # of objects for a capped table.
+ double paddingFactor; // 1.0 = no padding.
+ int flags;
+ DiskLoc capExtent;
+ DiskLoc capFirstNewRecord;
+ char reserved[108];
+
+ enum {
+ Flag_HaveIdIndex = 1 // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
+ };
+
+ /* you MUST call when adding an index. see pdfile.cpp */
+ void addingIndex(const char *thisns, IndexDetails& details);
+
+ void aboutToDeleteAnIndex() {
+ flags &= ~Flag_HaveIdIndex;
+ }
- void aboutToDeleteAnIndex() {
- flags &= ~Flag_HaveIdIndex;
- }
+ /* returns index of the first index in which the field is present. -1 if not present. */
+ int fieldIsIndexed(const char *fieldName);
- /* returns index of the first index in which the field is present. -1 if not present. */
- int fieldIsIndexed(const char *fieldName);
+ void paddingFits() {
+ double x = paddingFactor - 0.01;
+ if ( x >= 1.0 )
+ paddingFactor = x;
+ }
+ void paddingTooSmall() {
+ double x = paddingFactor + 0.6;
+ if ( x <= 2.0 )
+ paddingFactor = x;
+ }
- void paddingFits() {
- double x = paddingFactor - 0.01;
- if ( x >= 1.0 )
- paddingFactor = x;
- }
- void paddingTooSmall() {
- double x = paddingFactor + 0.6;
- if ( x <= 2.0 )
- paddingFactor = x;
- }
+ //returns offset in indexes[]
+ int findIndexByName(const char *name) {
+ for ( int i = 0; i < nIndexes; i++ ) {
+ if ( strcmp(indexes[i].info.obj().getStringField("name"),name) == 0 )
+ return i;
+ }
+ return -1;
+ }
- //returns offset in indexes[]
- int findIndexByName(const char *name) {
- for ( int i = 0; i < nIndexes; i++ ) {
- if ( strcmp(indexes[i].info.obj().getStringField("name"),name) == 0 )
- return i;
+ /* return which "deleted bucket" for this size object */
+ static int bucket(int n) {
+ for ( int i = 0; i < Buckets; i++ )
+ if ( bucketSizes[i] > n )
+ return i;
+ return Buckets-1;
}
- return -1;
- }
- /* return which "deleted bucket" for this size object */
- static int bucket(int n) {
- for ( int i = 0; i < Buckets; i++ )
- if ( bucketSizes[i] > n )
- return i;
- return Buckets-1;
- }
+ /* allocate a new record. lenToAlloc includes headers. */
+ DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc);
- /* allocate a new record. lenToAlloc includes headers. */
- DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc);
+ /* add a given record to the deleted chains for this NS */
+ void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
- /* add a given record to the deleted chains for this NS */
- void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
+ void dumpDeleted(set<DiskLoc> *extents = 0);
- void dumpDeleted(set<DiskLoc> *extents = 0);
+ bool capLooped() const {
+ return capped && capFirstNewRecord.isValid();
+ }
- bool capLooped() const {
- return capped && capFirstNewRecord.isValid();
- }
+ // Start from firstExtent by default.
+ DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const;
- // Start from firstExtent by default.
- DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ // Start from lastExtent by default.
+ DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
- // Start from lastExtent by default.
- DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ bool inCapExtent( const DiskLoc &dl ) const;
- bool inCapExtent( const DiskLoc &dl ) const;
+ void checkMigrate();
- void checkMigrate();
+ private:
+ Extent *theCapExtent() const {
+ return capExtent.ext();
+ }
+ void advanceCapExtent( const char *ns );
+ void maybeComplain( const char *ns, int len ) const;
+ DiskLoc __stdAlloc(int len);
+ DiskLoc __capAlloc(int len);
+ DiskLoc _alloc(const char *ns, int len);
+ void compact();
+
+ DiskLoc &firstDeletedInCapExtent();
+ bool nextIsInCapExtent( const DiskLoc &dl ) const;
+ };
-private:
- Extent *theCapExtent() const {
- return capExtent.ext();
- }
- void advanceCapExtent( const char *ns );
- void maybeComplain( const char *ns, int len ) const;
- DiskLoc __stdAlloc(int len);
- DiskLoc __capAlloc(int len);
- DiskLoc _alloc(const char *ns, int len);
- void compact();
+#pragma pack(pop)
- DiskLoc &firstDeletedInCapExtent();
- bool nextIsInCapExtent( const DiskLoc &dl ) const;
-};
+ /* these are things we know / compute about a namespace that are transient -- things
+ we don't actually store in the .ns file. so mainly caching of frequently used
+ information.
-#pragma pack(pop)
+ CAUTION: Are you maintaining this properly on a collection drop()? A dropdatabase()? Be careful.
+ The current field "allIndexKeys" may have too many keys in it on such an occurrence;
+ as currently used that does not cause anything terrible to happen.
+ */
+ class NamespaceDetailsTransient : boost::noncopyable {
+ string ns;
+ bool haveIndexKeys;
+ set<string> allIndexKeys;
+ void computeIndexKeys();
+ public:
+ NamespaceDetailsTransient(const char *_ns) : ns(_ns) {
+ haveIndexKeys=false; /*lazy load them*/
+ }
-/* these are things we know / compute about a namespace that are transient -- things
- we don't actually store in the .ns file. so mainly caching of frequently used
- information.
+ /* get set of index keys for this namespace. handy to quickly check if a given
+ field is indexed (Note it might be a seconary component of a compound index.)
+ */
+ set<string>& indexKeys() {
+ if ( !haveIndexKeys ) {
+ haveIndexKeys=true;
+ computeIndexKeys();
+ }
+ return allIndexKeys;
+ }
- CAUTION: Are you maintaining this properly on a collection drop()? A dropdatabase()? Be careful.
- The current field "allIndexKeys" may have too many keys in it on such an occurrence;
- as currently used that does not cause anything terrible to happen.
-*/
-class NamespaceDetailsTransient : boost::noncopyable {
- string ns;
- bool haveIndexKeys;
- set<string> allIndexKeys;
- void computeIndexKeys();
-public:
- NamespaceDetailsTransient(const char *_ns) : ns(_ns) {
- haveIndexKeys=false; /*lazy load them*/
- }
+ void addedIndex() {
+ haveIndexKeys=false;
+ }
+ private:
+ static std::map<const char *,NamespaceDetailsTransient*> map;
+ public:
+ static NamespaceDetailsTransient& get(const char *ns);
+ };
- /* get set of index keys for this namespace. handy to quickly check if a given
- field is indexed (Note it might be a seconary component of a compound index.)
+ /* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
+ if you will: at least the core parts. (Additional info in system.* collections.)
*/
- set<string>& indexKeys() {
- if ( !haveIndexKeys ) {
- haveIndexKeys=true;
- computeIndexKeys();
+ class NamespaceIndex {
+ friend class NamespaceCursor;
+ public:
+ NamespaceIndex() { }
+
+ /* returns true if we created (did not exist) during init() */
+ bool init(const char *dir, const char *database);
+
+ void add(const char *ns, DiskLoc& loc, bool capped) {
+ Namespace n(ns);
+ NamespaceDetails details( loc, capped );
+ ht->put(n, details);
}
- return allIndexKeys;
- }
- void addedIndex() {
- haveIndexKeys=false;
- }
-private:
- static std::map<const char *,NamespaceDetailsTransient*> map;
-public:
- static NamespaceDetailsTransient& get(const char *ns);
-};
-
-/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
- if you will: at least the core parts. (Additional info in system.* collections.)
-*/
-class NamespaceIndex {
- friend class NamespaceCursor;
-public:
- NamespaceIndex() { }
-
- /* returns true if we created (did not exist) during init() */
- bool init(const char *dir, const char *database);
-
- void add(const char *ns, DiskLoc& loc, bool capped) {
- Namespace n(ns);
- NamespaceDetails details( loc, capped );
- ht->put(n, details);
- }
-
- /* just for diagnostics */
- size_t detailsOffset(NamespaceDetails *d) {
- return ((char *) d) - (char *) ht->nodes;
- }
+ /* just for diagnostics */
+ size_t detailsOffset(NamespaceDetails *d) {
+ return ((char *) d) - (char *) ht->nodes;
+ }
- NamespaceDetails* details(const char *ns) {
- Namespace n(ns);
- NamespaceDetails *d = ht->get(n);
- if ( d )
- d->checkMigrate();
- return d;
- }
+ NamespaceDetails* details(const char *ns) {
+ Namespace n(ns);
+ NamespaceDetails *d = ht->get(n);
+ if ( d )
+ d->checkMigrate();
+ return d;
+ }
- void kill(const char *ns) {
- Namespace n(ns);
- ht->kill(n);
- }
+ void kill(const char *ns) {
+ Namespace n(ns);
+ ht->kill(n);
+ }
- bool find(const char *ns, DiskLoc& loc) {
- NamespaceDetails *l = details(ns);
- if ( l ) {
- loc = l->firstExtent;
- return true;
+ bool find(const char *ns, DiskLoc& loc) {
+ NamespaceDetails *l = details(ns);
+ if ( l ) {
+ loc = l->firstExtent;
+ return true;
+ }
+ return false;
}
- return false;
- }
-private:
- MemoryMappedFile f;
- HashTable<Namespace,NamespaceDetails> *ht;
-};
+ private:
+ MemoryMappedFile f;
+ HashTable<Namespace,NamespaceDetails> *ht;
+ };
-extern const char *dbpath;
+ extern const char *dbpath;
// "database.a.b.c" -> "database"
-const int MaxClientLen = 256;
-inline void nsToClient(const char *ns, char *database) {
- const char *p = ns;
- char *q = database;
- while ( *p != '.' ) {
- if ( *p == 0 )
- break;
- *q++ = *p++;
+ const int MaxClientLen = 256;
+ inline void nsToClient(const char *ns, char *database) {
+ const char *p = ns;
+ char *q = database;
+ while ( *p != '.' ) {
+ if ( *p == 0 )
+ break;
+ *q++ = *p++;
+ }
+ *q = 0;
+ if (q-database>=MaxClientLen) {
+ problem() << "nsToClient: ns too long. terminating, buf overrun condition" << endl;
+ dbexit(60);
+ }
}
- *q = 0;
- if (q-database>=MaxClientLen) {
- problem() << "nsToClient: ns too long. terminating, buf overrun condition" << endl;
- dbexit(60);
+ inline string nsToClient(const char *ns) {
+ char buf[MaxClientLen];
+ nsToClient(ns, buf);
+ return buf;
}
-}
-inline string nsToClient(const char *ns) {
- char buf[MaxClientLen];
- nsToClient(ns, buf);
- return buf;
-}
} // namespace mongo
diff --git a/db/pdfile.cpp b/db/pdfile.cpp
index fb8e09389c1..8c226a20952 100644
--- a/db/pdfile.cpp
+++ b/db/pdfile.cpp
@@ -37,963 +37,963 @@ _ disallow system* manipulations from the database.
namespace mongo {
-extern bool quota;
-extern int port;
-
-const char *dbpath = "/data/db/";
-
-DataFileMgr theDataFileMgr;
-map<string,Database*> databases;
-Database *database;
-const char *curNs = "";
-int MAGIC = 0x1000;
-int curOp = -2;
-int callDepth = 0;
-
-extern int otherTraceLevel;
-void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0);
-
-string getDbContext() {
- stringstream ss;
- if ( database ) {
- ss << database->name << ' ';
- if ( curNs )
- ss << curNs << ' ';
- }
- ss<< "op:" << curOp << ' ' << callDepth;
- return ss.str();
-}
+ extern bool quota;
+ extern int port;
-BSONObj::BSONObj(Record *r) {
- init(r->data, false);
- /*
- _objdata = r->data;
- _objsize = *((int*) _objdata);
- if( _objsize > r->netLength() ) {
- cout << "About to assert fail _objsize <= r->netLength()" << endl;
- cout << " _objsize: " << _objsize << endl;
- cout << " netLength(): " << r->netLength() << endl;
- cout << " extentOfs: " << r->extentOfs << endl;
- cout << " nextOfs: " << r->nextOfs << endl;
- cout << " prevOfs: " << r->prevOfs << endl;
- assert( _objsize <= r->netLength() );
- }
- iFree = false;
- */
-}
-
-/*---------------------------------------------------------------------*/
-
-int initialExtentSize(int len) {
- long long sz = len * 16;
- if ( len < 1000 ) sz = len * 64;
- if ( sz > 1000000000 )
- sz = 1000000000;
- int z = ((int)sz) & 0xffffff00;
- assert( z > len );
- DEV log() << "initialExtentSize(" << len << ") returns " << z << endl;
- return z;
-}
-
-bool _userCreateNS(const char *ns, BSONObj& j, string& err) {
- if ( nsdetails(ns) ) {
- err = "collection already exists";
- return false;
+ const char *dbpath = "/data/db/";
+
+ DataFileMgr theDataFileMgr;
+ map<string,Database*> databases;
+ Database *database;
+ const char *curNs = "";
+ int MAGIC = 0x1000;
+ int curOp = -2;
+ int callDepth = 0;
+
+ extern int otherTraceLevel;
+ void addNewNamespaceToCatalog(const char *ns, BSONObj *options = 0);
+
+ string getDbContext() {
+ stringstream ss;
+ if ( database ) {
+ ss << database->name << ' ';
+ if ( curNs )
+ ss << curNs << ' ';
+ }
+ ss<< "op:" << curOp << ' ' << callDepth;
+ return ss.str();
}
- if ( verbose )
- log() << "create collection " << ns << ' ' << j.toString() << '\n';
+ BSONObj::BSONObj(Record *r) {
+ init(r->data, false);
+ /*
+ _objdata = r->data;
+ _objsize = *((int*) _objdata);
+ if( _objsize > r->netLength() ) {
+ out() << "About to assert fail _objsize <= r->netLength()" << endl;
+ out() << " _objsize: " << _objsize << endl;
+ out() << " netLength(): " << r->netLength() << endl;
+ out() << " extentOfs: " << r->extentOfs << endl;
+ out() << " nextOfs: " << r->nextOfs << endl;
+ out() << " prevOfs: " << r->prevOfs << endl;
+ assert( _objsize <= r->netLength() );
+ }
+ iFree = false;
+ */
+ }
- /* todo: do this only when we have allocated space successfully? or we could insert with a { ok: 0 } field
- and then go back and set to ok : 1 after we are done.
- */
- addNewNamespaceToCatalog(ns, j.isEmpty() ? 0 : &j);
-
- long long size = initialExtentSize(128);
- BSONElement e = j.findElement("size");
- if ( e.isNumber() ) {
- size = (long long) e.number();
- size += 256;
- size &= 0xffffffffffffff00LL;
+ /*---------------------------------------------------------------------*/
+
+ int initialExtentSize(int len) {
+ long long sz = len * 16;
+ if ( len < 1000 ) sz = len * 64;
+ if ( sz > 1000000000 )
+ sz = 1000000000;
+ int z = ((int)sz) & 0xffffff00;
+ assert( z > len );
+ DEV log() << "initialExtentSize(" << len << ") returns " << z << endl;
+ return z;
}
- bool newCapped = false;
- int mx = 0;
- e = j.findElement("capped");
- if ( e.type() == Bool && e.boolean() ) {
- newCapped = true;
- e = j.findElement("max");
- if ( e.isNumber() ) {
- mx = (int) e.number();
+ bool _userCreateNS(const char *ns, BSONObj& j, string& err) {
+ if ( nsdetails(ns) ) {
+ err = "collection already exists";
+ return false;
}
- }
- // $nExtents just for debug/testing. We create '$nExtents' extents,
- // each of size 'size'.
- e = j.findElement( "$nExtents" );
- int nExtents = int( e.number() );
- if ( nExtents > 0 )
- for ( int i = 0; i < nExtents; ++i ) {
- database->suitableFile(size)->newExtent( ns, size, newCapped );
+ if ( verbose )
+ log() << "create collection " << ns << ' ' << j.toString() << '\n';
+
+ /* todo: do this only when we have allocated space successfully? or we could insert with a { ok: 0 } field
+ and then go back and set to ok : 1 after we are done.
+ */
+ addNewNamespaceToCatalog(ns, j.isEmpty() ? 0 : &j);
+
+ long long size = initialExtentSize(128);
+ BSONElement e = j.findElement("size");
+ if ( e.isNumber() ) {
+ size = (long long) e.number();
+ size += 256;
+ size &= 0xffffffffffffff00LL;
}
- else
- while ( size > 0 ) {
- int max = PhysicalDataFile::maxSize() - PDFHeader::headerSize();
- int desiredExtentSize = size > max ? max : size;
- Extent *e = database->suitableFile( desiredExtentSize )->newExtent( ns, desiredExtentSize, newCapped );
- size -= e->length;
+
+ bool newCapped = false;
+ int mx = 0;
+ e = j.findElement("capped");
+ if ( e.type() == Bool && e.boolean() ) {
+ newCapped = true;
+ e = j.findElement("max");
+ if ( e.isNumber() ) {
+ mx = (int) e.number();
+ }
}
- NamespaceDetails *d = nsdetails(ns);
- assert(d);
+ // $nExtents just for debug/testing. We create '$nExtents' extents,
+ // each of size 'size'.
+ e = j.findElement( "$nExtents" );
+ int nExtents = int( e.number() );
+ if ( nExtents > 0 )
+ for ( int i = 0; i < nExtents; ++i ) {
+ database->suitableFile(size)->newExtent( ns, size, newCapped );
+ }
+ else
+ while ( size > 0 ) {
+ int max = PhysicalDataFile::maxSize() - PDFHeader::headerSize();
+ int desiredExtentSize = size > max ? max : size;
+ Extent *e = database->suitableFile( desiredExtentSize )->newExtent( ns, desiredExtentSize, newCapped );
+ size -= e->length;
+ }
- if ( mx > 0 )
- d->max = mx;
+ NamespaceDetails *d = nsdetails(ns);
+ assert(d);
- return true;
-}
+ if ( mx > 0 )
+ d->max = mx;
+
+ return true;
+ }
// { ..., capped: true, size: ..., max: ... }
// returns true if successful
-bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication) {
- j.validateEmpty();
- bool ok = _userCreateNS(ns, j, err);
- if ( logForReplication && ok )
- logOp("c", ns, j);
- return ok;
-}
-
-/*---------------------------------------------------------------------*/
-
-int PhysicalDataFile::maxSize() {
- if ( sizeof( int* ) == 4 )
- return 512 * 1024 * 1024;
- else
- return 0x7ff00000;
-}
-
-int PhysicalDataFile::defaultSize( const char *filename ) const {
- int size;
-
- if ( fileNo <= 4 )
- size = (64*1024*1024) << fileNo;
- else
- size = 0x7ff00000;
-
- if ( strstr(filename, "_hudsonSmall") ) {
- int mult = 1;
- if ( fileNo > 1 && fileNo < 1000 )
- mult = fileNo;
- size = 1024 * 512 * mult;
- log() << "Warning : using small files for _hudsonSmall" << endl;
+ bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication) {
+ j.validateEmpty();
+ bool ok = _userCreateNS(ns, j, err);
+ if ( logForReplication && ok )
+ logOp("c", ns, j);
+ return ok;
}
- return size;
-}
+ /*---------------------------------------------------------------------*/
-void PhysicalDataFile::open( const char *filename, int minSize ) {
- {
- /* check quotas
- very simple temporary implementation - we will in future look up
- the quota from the grid database
- */
- if ( quota && fileNo > 8 && !boost::filesystem::exists(filename) ) {
- /* todo: if we were adding / changing keys in an index did we do some
- work previously that needs cleaning up? Possible. We should
- check code like that and have it catch the exception and do
- something reasonable.
- */
- string s = "db disk space quota exceeded ";
- if ( database )
- s += database->name;
- uasserted(s.c_str());
- }
+ int PhysicalDataFile::maxSize() {
+ if ( sizeof( int* ) == 4 )
+ return 512 * 1024 * 1024;
+ else
+ return 0x7ff00000;
}
- int size = defaultSize( filename );
- while ( size < minSize ) {
- if ( size < maxSize() / 2 )
- size *= 2;
- else {
- size = maxSize();
- break;
+ int PhysicalDataFile::defaultSize( const char *filename ) const {
+ int size;
+
+ if ( fileNo <= 4 )
+ size = (64*1024*1024) << fileNo;
+ else
+ size = 0x7ff00000;
+
+ if ( strstr(filename, "_hudsonSmall") ) {
+ int mult = 1;
+ if ( fileNo > 1 && fileNo < 1000 )
+ mult = fileNo;
+ size = 1024 * 512 * mult;
+ log() << "Warning : using small files for _hudsonSmall" << endl;
}
+
+ return size;
}
- if ( size > maxSize() )
- size = maxSize();
-
- assert( ( size >= 64*1024*1024 ) || ( strstr( filename, "_hudsonSmall" ) ) );
- assert( size % 4096 == 0 );
-
- header = (PDFHeader *) mmf.map(filename, size);
- uassert("can't map file memory", header);
- // If opening an existing file, this is a no-op.
- header->init(fileNo, size);
-}
-
-/* prev - previous extent for this namespace. null=this is the first one. */
-Extent* PhysicalDataFile::newExtent(const char *ns, int approxSize, bool newCapped, int loops) {
- assert( approxSize >= 0 && approxSize <= 0x7ff00000 );
-
- assert( header ); // null if file open failed
- int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
- DiskLoc loc;
- if ( ExtentSize <= 0 ) {
- /* not there could be a lot of looping here is db just started and
- no files are open yet. we might want to do something about that. */
- if ( loops > 8 ) {
- assert( loops < 10000 );
- cout << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
+
+ void PhysicalDataFile::open( const char *filename, int minSize ) {
+ {
+ /* check quotas
+ very simple temporary implementation - we will in future look up
+ the quota from the grid database
+ */
+ if ( quota && fileNo > 8 && !boost::filesystem::exists(filename) ) {
+ /* todo: if we were adding / changing keys in an index did we do some
+ work previously that needs cleaning up? Possible. We should
+ check code like that and have it catch the exception and do
+ something reasonable.
+ */
+ string s = "db disk space quota exceeded ";
+ if ( database )
+ s += database->name;
+ uasserted(s.c_str());
+ }
+ }
+
+ int size = defaultSize( filename );
+ while ( size < minSize ) {
+ if ( size < maxSize() / 2 )
+ size *= 2;
+ else {
+ size = maxSize();
+ break;
+ }
}
- log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
- return database->addAFile()->newExtent(ns, approxSize, newCapped, loops+1);
+ if ( size > maxSize() )
+ size = maxSize();
+
+ assert( ( size >= 64*1024*1024 ) || ( strstr( filename, "_hudsonSmall" ) ) );
+ assert( size % 4096 == 0 );
+
+ header = (PDFHeader *) mmf.map(filename, size);
+ uassert("can't map file memory", header);
+ // If opening an existing file, this is a no-op.
+ header->init(fileNo, size);
}
- int offset = header->unused.getOfs();
- header->unused.setOfs( fileNo, offset + ExtentSize );
- header->unusedLength -= ExtentSize;
- loc.setOfs(fileNo, offset);
- Extent *e = _getExtent(loc);
- DiskLoc emptyLoc = e->init(ns, ExtentSize, fileNo, offset);
-
- DiskLoc oldExtentLoc;
- NamespaceIndex *ni = nsindex(ns);
- NamespaceDetails *details = ni->details(ns);
- if ( details ) {
- assert( !details->firstExtent.isNull() );
- e->xprev = details->lastExtent;
- details->lastExtent.ext()->xnext = loc;
- details->lastExtent = loc;
+
+ /* prev - previous extent for this namespace. null=this is the first one. */
+ Extent* PhysicalDataFile::newExtent(const char *ns, int approxSize, bool newCapped, int loops) {
+ assert( approxSize >= 0 && approxSize <= 0x7ff00000 );
+
+ assert( header ); // null if file open failed
+ int ExtentSize = approxSize <= header->unusedLength ? approxSize : header->unusedLength;
+ DiskLoc loc;
+ if ( ExtentSize <= 0 ) {
+ /* not there could be a lot of looping here is db just started and
+ no files are open yet. we might want to do something about that. */
+ if ( loops > 8 ) {
+ assert( loops < 10000 );
+ out() << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
+ }
+ log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
+ return database->addAFile()->newExtent(ns, approxSize, newCapped, loops+1);
+ }
+ int offset = header->unused.getOfs();
+ header->unused.setOfs( fileNo, offset + ExtentSize );
+ header->unusedLength -= ExtentSize;
+ loc.setOfs(fileNo, offset);
+ Extent *e = _getExtent(loc);
+ DiskLoc emptyLoc = e->init(ns, ExtentSize, fileNo, offset);
+
+ DiskLoc oldExtentLoc;
+ NamespaceIndex *ni = nsindex(ns);
+ NamespaceDetails *details = ni->details(ns);
+ if ( details ) {
+ assert( !details->firstExtent.isNull() );
+ e->xprev = details->lastExtent;
+ details->lastExtent.ext()->xnext = loc;
+ details->lastExtent = loc;
+ }
+ else {
+ ni->add(ns, loc, newCapped);
+ details = ni->details(ns);
+ }
+
+ details->lastExtentSize = approxSize;
+ DEBUGGING out() << "temp: newextent adddelrec " << ns << endl;
+ details->addDeletedRec(emptyLoc.drec(), emptyLoc);
+
+ DEV log() << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
+ << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
+ return e;
}
- else {
- ni->add(ns, loc, newCapped);
- details = ni->details(ns);
+
+ /*---------------------------------------------------------------------*/
+
+ /* assumes already zeroed -- insufficient for block 'reuse' perhaps */
+ DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset) {
+ magic = 0x41424344;
+ myLoc.setOfs(_fileNo, _offset);
+ xnext.Null();
+ xprev.Null();
+ ns = nsname;
+ length = _length;
+ firstRecord.Null();
+ lastRecord.Null();
+
+ DiskLoc emptyLoc = myLoc;
+ emptyLoc.inc( (extentData-(char*)this) );
+
+ DeletedRecord *empty1 = (DeletedRecord *) extentData;
+ DeletedRecord *empty = (DeletedRecord *) getRecord(emptyLoc);
+ assert( empty == empty1 );
+ empty->lengthWithHeaders = _length - (extentData - (char *) this);
+ empty->extentOfs = myLoc.getOfs();
+ return emptyLoc;
}
- details->lastExtentSize = approxSize;
- DEBUGGING cout << "temp: newextent adddelrec " << ns << endl;
- details->addDeletedRec(emptyLoc.drec(), emptyLoc);
-
- DEV log() << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
- << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
- return e;
-}
-
-/*---------------------------------------------------------------------*/
-
-/* assumes already zeroed -- insufficient for block 'reuse' perhaps */
-DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset) {
- magic = 0x41424344;
- myLoc.setOfs(_fileNo, _offset);
- xnext.Null();
- xprev.Null();
- ns = nsname;
- length = _length;
- firstRecord.Null();
- lastRecord.Null();
-
- DiskLoc emptyLoc = myLoc;
- emptyLoc.inc( (extentData-(char*)this) );
-
- DeletedRecord *empty1 = (DeletedRecord *) extentData;
- DeletedRecord *empty = (DeletedRecord *) getRecord(emptyLoc);
- assert( empty == empty1 );
- empty->lengthWithHeaders = _length - (extentData - (char *) this);
- empty->extentOfs = myLoc.getOfs();
- return emptyLoc;
-}
+ /*
+ Record* Extent::newRecord(int len) {
+ if( firstEmptyRegion.isNull() )
+ return 0;
+
+ assert(len > 0);
+ int newRecSize = len + Record::HeaderSize;
+ DiskLoc newRecordLoc = firstEmptyRegion;
+ Record *r = getRecord(newRecordLoc);
+ int left = r->netLength() - len;
+ if( left < 0 ) {
+ //
+ firstEmptyRegion.Null();
+ return 0;
+ }
-/*
-Record* Extent::newRecord(int len) {
- if( firstEmptyRegion.isNull() )
- return 0;
-
- assert(len > 0);
- int newRecSize = len + Record::HeaderSize;
- DiskLoc newRecordLoc = firstEmptyRegion;
- Record *r = getRecord(newRecordLoc);
- int left = r->netLength() - len;
- if( left < 0 ) {
- //
- firstEmptyRegion.Null();
- return 0;
- }
-
- DiskLoc nextEmpty = r->next.getNextEmpty(firstEmptyRegion);
- r->lengthWithHeaders = newRecSize;
- r->next.markAsFirstOrLastInExtent(this); // we're now last in the extent
- if( !lastRecord.isNull() ) {
- assert(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
- getRecord(lastRecord)->next.set(newRecordLoc); // until now
- r->prev.set(lastRecord);
- }
- else {
- r->prev.markAsFirstOrLastInExtent(this); // we are the first in the extent
- assert( firstRecord.isNull() );
- firstRecord = newRecordLoc;
- }
- lastRecord = newRecordLoc;
-
- if( left < Record::HeaderSize + 32 ) {
- firstEmptyRegion.Null();
- }
- else {
- firstEmptyRegion.inc(newRecSize);
- Record *empty = getRecord(firstEmptyRegion);
- empty->next.set(nextEmpty); // not for empty records, unless in-use records, next and prev can be null.
- empty->prev.Null();
- empty->lengthWithHeaders = left;
- }
-
- return r;
-}
-*/
+ DiskLoc nextEmpty = r->next.getNextEmpty(firstEmptyRegion);
+ r->lengthWithHeaders = newRecSize;
+ r->next.markAsFirstOrLastInExtent(this); // we're now last in the extent
+ if( !lastRecord.isNull() ) {
+ assert(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
+ getRecord(lastRecord)->next.set(newRecordLoc); // until now
+ r->prev.set(lastRecord);
+ }
+ else {
+ r->prev.markAsFirstOrLastInExtent(this); // we are the first in the extent
+ assert( firstRecord.isNull() );
+ firstRecord = newRecordLoc;
+ }
+ lastRecord = newRecordLoc;
-/*---------------------------------------------------------------------*/
+ if( left < Record::HeaderSize + 32 ) {
+ firstEmptyRegion.Null();
+ }
+ else {
+ firstEmptyRegion.inc(newRecSize);
+ Record *empty = getRecord(firstEmptyRegion);
+ empty->next.set(nextEmpty); // not for empty records, unless in-use records, next and prev can be null.
+ empty->prev.Null();
+ empty->lengthWithHeaders = left;
+ }
-auto_ptr<Cursor> DataFileMgr::findAll(const char *ns) {
- DiskLoc loc;
- bool found = nsindex(ns)->find(ns, loc);
- if ( !found ) {
- // cout << "info: findAll() namespace does not exist: " << ns << endl;
- return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
+ return r;
}
+ */
- Extent *e = getExtent(loc);
-
- DEBUGGING {
- cout << "listing extents for " << ns << endl;
- DiskLoc tmp = loc;
- set<DiskLoc> extents;
+ /*---------------------------------------------------------------------*/
- while ( 1 ) {
- Extent *f = getExtent(tmp);
- cout << "extent: " << tmp.toString() << endl;
- extents.insert(tmp);
- tmp = f->xnext;
- if ( tmp.isNull() )
- break;
- f = f->getNextExtent();
+ auto_ptr<Cursor> DataFileMgr::findAll(const char *ns) {
+ DiskLoc loc;
+ bool found = nsindex(ns)->find(ns, loc);
+ if ( !found ) {
+ // out() << "info: findAll() namespace does not exist: " << ns << endl;
+ return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
}
- cout << endl;
- nsdetails(ns)->dumpDeleted(&extents);
- }
+ Extent *e = getExtent(loc);
+
+ DEBUGGING {
+ out() << "listing extents for " << ns << endl;
+ DiskLoc tmp = loc;
+ set<DiskLoc> extents;
+
+ while ( 1 ) {
+ Extent *f = getExtent(tmp);
+ out() << "extent: " << tmp.toString() << endl;
+ extents.insert(tmp);
+ tmp = f->xnext;
+ if ( tmp.isNull() )
+ break;
+ f = f->getNextExtent();
+ }
- if ( !nsdetails( ns )->capped ) {
- while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
- /* todo: if extent is empty, free it for reuse elsewhere.
- that is a bit complicated have to clean up the freelists.
- */
- RARELY cout << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
- // find a nonempty extent
- // it might be nice to free the whole extent here! but have to clean up free recs then.
- e = e->getNextExtent();
+ out() << endl;
+ nsdetails(ns)->dumpDeleted(&extents);
}
- return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
- } else {
- return auto_ptr< Cursor >( new ForwardCappedCursor( nsdetails( ns ) ) );
- }
-}
-/* get a table scan cursor, but can be forward or reverse direction.
- order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
-*/
-auto_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, bool *isSorted) {
- BSONElement el = order.findElement("$natural"); // e.g., { $natural : -1 }
- if ( !el.eoo() && isSorted )
- *isSorted = true;
-
- if ( el.number() >= 0 )
- return DataFileMgr::findAll(ns);
-
- // "reverse natural order"
- NamespaceDetails *d = nsdetails(ns);
- if ( !d )
- return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
- if ( !d->capped ) {
- Extent *e = d->lastExtent.ext();
- while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
- OCCASIONALLY cout << " findTableScan: extent empty, skipping ahead" << endl;
- e = e->getPrevExtent();
+ if ( !nsdetails( ns )->capped ) {
+ while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
+ /* todo: if extent is empty, free it for reuse elsewhere.
+ that is a bit complicated have to clean up the freelists.
+ */
+ RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead " << ns << endl;
+ // find a nonempty extent
+ // it might be nice to free the whole extent here! but have to clean up free recs then.
+ e = e->getNextExtent();
+ }
+ return auto_ptr<Cursor>(new BasicCursor( e->firstRecord ));
+ } else {
+ return auto_ptr< Cursor >( new ForwardCappedCursor( nsdetails( ns ) ) );
}
- return auto_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
- } else {
- return auto_ptr< Cursor >( new ReverseCappedCursor( d ) );
}
-}
-
-void aboutToDelete(const DiskLoc& dl);
-/* drop a collection/namespace */
-void dropNS(string& nsToDrop) {
- assert( strstr(nsToDrop.c_str(), ".system.") == 0 );
- {
- // remove from the system catalog
- BSONObjBuilder b;
- b.append("name", nsToDrop.c_str());
- BSONObj cond = b.done(); // { name: "colltodropname" }
- string system_namespaces = database->name + ".system.namespaces";
- int n = deleteObjects(system_namespaces.c_str(), cond, false, true);
- wassert( n == 1 );
- }
- // remove from the catalog hashtable
- database->namespaceIndex.kill(nsToDrop.c_str());
-}
+ /* get a table scan cursor, but can be forward or reverse direction.
+ order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
+ */
+ auto_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, bool *isSorted) {
+ BSONElement el = order.findElement("$natural"); // e.g., { $natural : -1 }
+ if ( !el.eoo() && isSorted )
+ *isSorted = true;
-/* delete this index. does NOT clean up the system catalog
- (system.indexes or system.namespaces) -- only NamespaceIndex.
-*/
-void IndexDetails::kill() {
- string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+ if ( el.number() >= 0 )
+ return DataFileMgr::findAll(ns);
- {
- // clean up in system.indexes
- BSONObjBuilder b;
- b.append("name", indexName().c_str());
- b.append("ns", parentNS().c_str());
- BSONObj cond = b.done(); // e.g.: { name: "ts_1", ns: "foo.coll" }
- string system_indexes = database->name + ".system.indexes";
- int n = deleteObjects(system_indexes.c_str(), cond, false, true);
- wassert( n == 1 );
+ // "reverse natural order"
+ NamespaceDetails *d = nsdetails(ns);
+ if ( !d )
+ return auto_ptr<Cursor>(new BasicCursor(DiskLoc()));
+ if ( !d->capped ) {
+ Extent *e = d->lastExtent.ext();
+ while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
+ OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
+ e = e->getPrevExtent();
+ }
+ return auto_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
+ } else {
+ return auto_ptr< Cursor >( new ReverseCappedCursor( d ) );
+ }
}
- dropNS(ns);
- // database->namespaceIndex.kill(ns.c_str());
- head.setInvalid();
- info.setInvalid();
-}
-
-/* Pull out the relevant key objects from obj, so we
- can index them. Note that the set is multiple elements
- only when it's a "multikey" array.
- Keys will be left empty if key not found in the object.
-*/
-void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
- BSONObj keyPattern = info.obj().getObjectField("key"); // e.g., keyPattern == { ts : 1 }
- if ( keyPattern.objsize() == 0 ) {
- cout << keyPattern.toString() << endl;
- cout << info.obj().toString() << endl;
- assert(false);
- }
- BSONObjBuilder b;
- const char *nameWithinArray;
- BSONObj key = obj.extractFieldsDotted(keyPattern, b, nameWithinArray);
- if ( key.isEmpty() )
- return;
- BSONObjIterator keyIter( key );
- BSONElement arrayElt;
- int arrayPos = -1;
- for ( int i = 0; keyIter.more(); ++i ) {
- BSONElement e = keyIter.next();
- if ( e.eoo() )
- break;
- if ( e.type() == Array ) {
- uassert( "Index cannot be created on parallel arrays.",
- arrayPos == -1 );
- arrayPos = i;
- arrayElt = e;
+ void aboutToDelete(const DiskLoc& dl);
+
+ /* drop a collection/namespace */
+ void dropNS(string& nsToDrop) {
+ assert( strstr(nsToDrop.c_str(), ".system.") == 0 );
+ {
+ // remove from the system catalog
+ BSONObjBuilder b;
+ b.append("name", nsToDrop.c_str());
+ BSONObj cond = b.done(); // { name: "colltodropname" }
+ string system_namespaces = database->name + ".system.namespaces";
+ int n = deleteObjects(system_namespaces.c_str(), cond, false, true);
+ wassert( n == 1 );
}
+ // remove from the catalog hashtable
+ database->namespaceIndex.kill(nsToDrop.c_str());
}
- if ( arrayPos == -1 ) {
- assert( strlen( nameWithinArray ) == 0 );
- BSONObjBuilder b;
- BSONObjIterator keyIter( key );
- while ( keyIter.more() ) {
- BSONElement f = keyIter.next();
- if ( f.eoo() )
- break;
- b.append( f );
+
+ /* delete this index. does NOT clean up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
+ */
+ void IndexDetails::kill() {
+ string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+
+ {
+ // clean up in system.indexes
+ BSONObjBuilder b;
+ b.append("name", indexName().c_str());
+ b.append("ns", parentNS().c_str());
+ BSONObj cond = b.done(); // e.g.: { name: "ts_1", ns: "foo.coll" }
+ string system_indexes = database->name + ".system.indexes";
+ int n = deleteObjects(system_indexes.c_str(), cond, false, true);
+ wassert( n == 1 );
}
- BSONObj o = b.doneAndDecouple();
- assert( !o.isEmpty() );
- keys.insert(o);
- return;
+
+ dropNS(ns);
+ // database->namespaceIndex.kill(ns.c_str());
+ head.setInvalid();
+ info.setInvalid();
}
- BSONObj arr = arrayElt.embeddedObject();
- BSONObjIterator arrIter(arr);
- while ( arrIter.more() ) {
- BSONElement e = arrIter.next();
- if ( e.eoo() )
- break;
-
- if ( strlen( nameWithinArray ) != 0 ) {
- e = e.embeddedObject().getFieldDotted( nameWithinArray );
- if ( e.eoo() )
- continue;
+
+ /* Pull out the relevant key objects from obj, so we
+ can index them. Note that the set is multiple elements
+ only when it's a "multikey" array.
+ Keys will be left empty if key not found in the object.
+ */
+ void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSetDefaultOrder& keys) const {
+ BSONObj keyPattern = info.obj().getObjectField("key"); // e.g., keyPattern == { ts : 1 }
+ if ( keyPattern.objsize() == 0 ) {
+ out() << keyPattern.toString() << endl;
+ out() << info.obj().toString() << endl;
+ assert(false);
}
BSONObjBuilder b;
+ const char *nameWithinArray;
+ BSONObj key = obj.extractFieldsDotted(keyPattern, b, nameWithinArray);
+ if ( key.isEmpty() )
+ return;
BSONObjIterator keyIter( key );
+ BSONElement arrayElt;
+ int arrayPos = -1;
for ( int i = 0; keyIter.more(); ++i ) {
- BSONElement f = keyIter.next();
- if ( f.eoo() )
+ BSONElement e = keyIter.next();
+ if ( e.eoo() )
break;
- if ( i != arrayPos )
+ if ( e.type() == Array ) {
+ uassert( "Index cannot be created on parallel arrays.",
+ arrayPos == -1 );
+ arrayPos = i;
+ arrayElt = e;
+ }
+ }
+ if ( arrayPos == -1 ) {
+ assert( strlen( nameWithinArray ) == 0 );
+ BSONObjBuilder b;
+ BSONObjIterator keyIter( key );
+ while ( keyIter.more() ) {
+ BSONElement f = keyIter.next();
+ if ( f.eoo() )
+ break;
b.append( f );
- else
- b.appendAs( e, "" );
+ }
+ BSONObj o = b.doneAndDecouple();
+ assert( !o.isEmpty() );
+ keys.insert(o);
+ return;
}
+ BSONObj arr = arrayElt.embeddedObject();
+ BSONObjIterator arrIter(arr);
+ while ( arrIter.more() ) {
+ BSONElement e = arrIter.next();
+ if ( e.eoo() )
+ break;
- BSONObj o = b.doneAndDecouple();
- assert( !o.isEmpty() );
- keys.insert(o);
- }
-}
-
-int nUnindexes = 0;
-
-void _unindexRecord(const char *ns, IndexDetails& id, BSONObj& obj, const DiskLoc& dl) {
- BSONObjSetDefaultOrder keys;
- id.getKeysFromObject(obj, keys);
- for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
- BSONObj j = *i;
-// cout << "UNINDEX: j:" << j.toString() << " head:" << id.head.toString() << dl.toString() << endl;
- if ( otherTraceLevel >= 5 ) {
- cout << "_unindexRecord() " << obj.toString();
- cout << "\n unindex:" << j.toString() << endl;
- }
- nUnindexes++;
- bool ok = false;
- try {
- ok = id.head.btree()->unindex(id.head, id, j, dl);
- }
- catch (AssertionException&) {
- problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
- cout << "Assertion failure: _unindex failed" << '\n';
- cout << " obj:" << obj.toString() << '\n';
- cout << " key:" << j.toString() << '\n';
- cout << " dl:" << dl.toString() << endl;
- sayDbContext();
- }
+ if ( strlen( nameWithinArray ) != 0 ) {
+ e = e.embeddedObject().getFieldDotted( nameWithinArray );
+ if ( e.eoo() )
+ continue;
+ }
+ BSONObjBuilder b;
+ BSONObjIterator keyIter( key );
+ for ( int i = 0; keyIter.more(); ++i ) {
+ BSONElement f = keyIter.next();
+ if ( f.eoo() )
+ break;
+ if ( i != arrayPos )
+ b.append( f );
+ else
+ b.appendAs( e, "" );
+ }
- if ( !ok ) {
- cout << "unindex failed (key too big?) " << id.indexNamespace() << '\n';
+ BSONObj o = b.doneAndDecouple();
+ assert( !o.isEmpty() );
+ keys.insert(o);
}
}
-}
-
-/* unindex all keys in all indexes for this record. */
-void unindexRecord(const char *ns, NamespaceDetails *d, Record *todelete, const DiskLoc& dl) {
- if ( d->nIndexes == 0 ) return;
- BSONObj obj(todelete);
- for ( int i = 0; i < d->nIndexes; i++ ) {
- _unindexRecord(ns, d->indexes[i], obj, dl);
- }
-}
-void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK)
-{
- dassert( todelete == dl.rec() );
+ int nUnindexes = 0;
+
+ void _unindexRecord(const char *ns, IndexDetails& id, BSONObj& obj, const DiskLoc& dl) {
+ BSONObjSetDefaultOrder keys;
+ id.getKeysFromObject(obj, keys);
+ for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ BSONObj j = *i;
+// out() << "UNINDEX: j:" << j.toString() << " head:" << id.head.toString() << dl.toString() << endl;
+ if ( otherTraceLevel >= 5 ) {
+ out() << "_unindexRecord() " << obj.toString();
+ out() << "\n unindex:" << j.toString() << endl;
+ }
+ nUnindexes++;
+ bool ok = false;
+ try {
+ ok = id.head.btree()->unindex(id.head, id, j, dl);
+ }
+ catch (AssertionException&) {
+ problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
+ out() << "Assertion failure: _unindex failed" << '\n';
+ out() << " obj:" << obj.toString() << '\n';
+ out() << " key:" << j.toString() << '\n';
+ out() << " dl:" << dl.toString() << endl;
+ sayDbContext();
+ }
- NamespaceDetails* d = nsdetails(ns);
- if ( d->capped && !cappedOK ) {
- cout << "failing remove on a capped ns " << ns << endl;
- return;
+ if ( !ok ) {
+ out() << "unindex failed (key too big?) " << id.indexNamespace() << '\n';
+ }
+ }
}
- /* check if any cursors point to us. if so, advance them. */
- aboutToDelete(dl);
-
- unindexRecord(ns, d, todelete, dl);
-
- /* remove ourself from the record next/prev chain */
- {
- if ( todelete->prevOfs != DiskLoc::NullOfs )
- todelete->getPrev(dl).rec()->nextOfs = todelete->nextOfs;
- if ( todelete->nextOfs != DiskLoc::NullOfs )
- todelete->getNext(dl).rec()->prevOfs = todelete->prevOfs;
+ /* unindex all keys in all indexes for this record. */
+ void unindexRecord(const char *ns, NamespaceDetails *d, Record *todelete, const DiskLoc& dl) {
+ if ( d->nIndexes == 0 ) return;
+ BSONObj obj(todelete);
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ _unindexRecord(ns, d->indexes[i], obj, dl);
+ }
}
- /* remove ourself from extent pointers */
+ void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK)
{
- Extent *e = todelete->myExtent(dl);
- if ( e->firstRecord == dl ) {
- if ( todelete->nextOfs == DiskLoc::NullOfs )
- e->firstRecord.Null();
- else
- e->firstRecord.setOfs(dl.a(), todelete->nextOfs);
+ dassert( todelete == dl.rec() );
+
+ NamespaceDetails* d = nsdetails(ns);
+ if ( d->capped && !cappedOK ) {
+ out() << "failing remove on a capped ns " << ns << endl;
+ return;
}
- if ( e->lastRecord == dl ) {
- if ( todelete->prevOfs == DiskLoc::NullOfs )
- e->lastRecord.Null();
- else
- e->lastRecord.setOfs(dl.a(), todelete->prevOfs);
+
+ /* check if any cursors point to us. if so, advance them. */
+ aboutToDelete(dl);
+
+ unindexRecord(ns, d, todelete, dl);
+
+ /* remove ourself from the record next/prev chain */
+ {
+ if ( todelete->prevOfs != DiskLoc::NullOfs )
+ todelete->getPrev(dl).rec()->nextOfs = todelete->nextOfs;
+ if ( todelete->nextOfs != DiskLoc::NullOfs )
+ todelete->getNext(dl).rec()->prevOfs = todelete->prevOfs;
}
- }
- /* add to the free list */
- {
- d->nrecords--;
- d->datasize -= todelete->netLength();
- /* temp: if in system.indexes, don't reuse, and zero out: we want to be
- careful until validated more, as IndexDetails has pointers
- to this disk location. so an incorrectly done remove would cause
- a lot of problems.
- */
- if ( strstr(ns, ".system.indexes") ) {
- memset(todelete, 0, todelete->lengthWithHeaders);
+ /* remove ourself from extent pointers */
+ {
+ Extent *e = todelete->myExtent(dl);
+ if ( e->firstRecord == dl ) {
+ if ( todelete->nextOfs == DiskLoc::NullOfs )
+ e->firstRecord.Null();
+ else
+ e->firstRecord.setOfs(dl.a(), todelete->nextOfs);
+ }
+ if ( e->lastRecord == dl ) {
+ if ( todelete->prevOfs == DiskLoc::NullOfs )
+ e->lastRecord.Null();
+ else
+ e->lastRecord.setOfs(dl.a(), todelete->prevOfs);
+ }
}
- else {
- DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
- d->addDeletedRec((DeletedRecord*)todelete, dl);
+
+ /* add to the free list */
+ {
+ d->nrecords--;
+ d->datasize -= todelete->netLength();
+ /* temp: if in system.indexes, don't reuse, and zero out: we want to be
+ careful until validated more, as IndexDetails has pointers
+ to this disk location. so an incorrectly done remove would cause
+ a lot of problems.
+ */
+ if ( strstr(ns, ".system.indexes") ) {
+ memset(todelete, 0, todelete->lengthWithHeaders);
+ }
+ else {
+ DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
+ d->addDeletedRec((DeletedRecord*)todelete, dl);
+ }
}
}
-}
-
-void setDifference(BSONObjSetDefaultOrder &l, BSONObjSetDefaultOrder &r, vector<BSONObj*> &diff) {
- BSONObjSetDefaultOrder::iterator i = l.begin();
- BSONObjSetDefaultOrder::iterator j = r.begin();
- while ( 1 ) {
- if ( i == l.end() )
- break;
- while ( j != r.end() && j->woCompare( *i ) < 0 )
- j++;
- if ( j == r.end() || i->woCompare(*j) != 0 ) {
- const BSONObj *jo = &*i;
- diff.push_back( (BSONObj *) jo );
+
+ void setDifference(BSONObjSetDefaultOrder &l, BSONObjSetDefaultOrder &r, vector<BSONObj*> &diff) {
+ BSONObjSetDefaultOrder::iterator i = l.begin();
+ BSONObjSetDefaultOrder::iterator j = r.begin();
+ while ( 1 ) {
+ if ( i == l.end() )
+ break;
+ while ( j != r.end() && j->woCompare( *i ) < 0 )
+ j++;
+ if ( j == r.end() || i->woCompare(*j) != 0 ) {
+ const BSONObj *jo = &*i;
+ diff.push_back( (BSONObj *) jo );
+ }
+ i++;
}
- i++;
}
-}
-/** Note: as written so far, if the object shrinks a lot, we don't free up space. */
-void DataFileMgr::update(
- const char *ns,
- Record *toupdate, const DiskLoc& dl,
- const char *buf, int len, stringstream& ss)
-{
- dassert( toupdate == dl.rec() );
+ /** Note: as written so far, if the object shrinks a lot, we don't free up space. */
+ void DataFileMgr::update(
+ const char *ns,
+ Record *toupdate, const DiskLoc& dl,
+ const char *buf, int len, stringstream& ss)
+ {
+ dassert( toupdate == dl.rec() );
- NamespaceDetails *d = nsdetails(ns);
+ NamespaceDetails *d = nsdetails(ns);
- if ( toupdate->netLength() < len ) {
- // doesn't fit. must reallocate.
+ if ( toupdate->netLength() < len ) {
+ // doesn't fit. must reallocate.
- if ( d && d->capped ) {
- ss << " failing a growing update on a capped ns " << ns << endl;
+ if ( d && d->capped ) {
+ ss << " failing a growing update on a capped ns " << ns << endl;
+ return;
+ }
+
+ d->paddingTooSmall();
+ if ( database->profile )
+ ss << " moved ";
+ deleteRecord(ns, toupdate, dl);
+ insert(ns, buf, len);
return;
}
- d->paddingTooSmall();
- if ( database->profile )
- ss << " moved ";
- deleteRecord(ns, toupdate, dl);
- insert(ns, buf, len);
- return;
- }
-
- d->paddingFits();
-
- /* has any index keys changed? */
- {
- NamespaceDetails *d = nsdetails(ns);
- if ( d->nIndexes ) {
- BSONObj newObj(buf);
- BSONObj oldObj = dl.obj();
- for ( int i = 0; i < d->nIndexes; i++ ) {
- IndexDetails& idx = d->indexes[i];
- BSONObj idxKey = idx.info.obj().getObjectField("key");
-
- BSONObjSetDefaultOrder oldkeys;
- BSONObjSetDefaultOrder newkeys;
- idx.getKeysFromObject(oldObj, oldkeys);
- idx.getKeysFromObject(newObj, newkeys);
- vector<BSONObj*> removed;
- setDifference(oldkeys, newkeys, removed);
- string idxns = idx.indexNamespace();
- for ( unsigned i = 0; i < removed.size(); i++ ) {
- try {
- idx.head.btree()->unindex(idx.head, idx, *removed[i], dl);
+ d->paddingFits();
+
+ /* has any index keys changed? */
+ {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d->nIndexes ) {
+ BSONObj newObj(buf);
+ BSONObj oldObj = dl.obj();
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ IndexDetails& idx = d->indexes[i];
+ BSONObj idxKey = idx.info.obj().getObjectField("key");
+
+ BSONObjSetDefaultOrder oldkeys;
+ BSONObjSetDefaultOrder newkeys;
+ idx.getKeysFromObject(oldObj, oldkeys);
+ idx.getKeysFromObject(newObj, newkeys);
+ vector<BSONObj*> removed;
+ setDifference(oldkeys, newkeys, removed);
+ string idxns = idx.indexNamespace();
+ for ( unsigned i = 0; i < removed.size(); i++ ) {
+ try {
+ idx.head.btree()->unindex(idx.head, idx, *removed[i], dl);
+ }
+ catch (AssertionException&) {
+ ss << " exception update unindex ";
+ problem() << " caught assertion update unindex " << idxns.c_str() << endl;
+ }
}
- catch (AssertionException&) {
- ss << " exception update unindex ";
- problem() << " caught assertion update unindex " << idxns.c_str() << endl;
+ vector<BSONObj*> added;
+ setDifference(newkeys, oldkeys, added);
+ assert( !dl.isNull() );
+ for ( unsigned i = 0; i < added.size(); i++ ) {
+ try {
+ idx.head.btree()->insert(
+ idx.head,
+ dl, *added[i], idxKey, false, idx, true);
+ }
+ catch (AssertionException&) {
+ ss << " exception update index ";
+ out() << " caught assertion update index " << idxns.c_str() << '\n';
+ problem() << " caught assertion update index " << idxns.c_str() << endl;
+ }
}
- }
- vector<BSONObj*> added;
- setDifference(newkeys, oldkeys, added);
- assert( !dl.isNull() );
- for ( unsigned i = 0; i < added.size(); i++ ) {
- try {
- idx.head.btree()->insert(
- idx.head,
- dl, *added[i], idxKey, false, idx, true);
- }
- catch (AssertionException&) {
- ss << " exception update index ";
- cout << " caught assertion update index " << idxns.c_str() << '\n';
- problem() << " caught assertion update index " << idxns.c_str() << endl;
- }
- }
- if ( database->profile )
- ss << "<br>" << added.size() << " key updates ";
+ if ( database->profile )
+ ss << "<br>" << added.size() << " key updates ";
+ }
}
}
+
+ // update in place
+ memcpy(toupdate->data, buf, len);
}
- // update in place
- memcpy(toupdate->data, buf, len);
-}
-
-int followupExtentSize(int len, int lastExtentLen) {
- int x = initialExtentSize(len);
- int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
- int sz = y > x ? y : x;
- sz = ((int)sz) & 0xffffff00;
- assert( sz > len );
- return sz;
-}
-
-int deb=0;
-
-/* add keys to indexes for a new record */
-void _indexRecord(IndexDetails& idx, BSONObj& obj, DiskLoc newRecordLoc) {
-
- BSONObjSetDefaultOrder keys;
- idx.getKeysFromObject(obj, keys);
- BSONObj order = idx.keyPattern();
- for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
- assert( !newRecordLoc.isNull() );
- try {
- idx.head.btree()->insert(idx.head, newRecordLoc,
- (BSONObj&) *i, order, false, idx, true);
- }
- catch (AssertionException&) {
- problem() << " caught assertion _indexRecord " << idx.indexNamespace() << endl;
+ int followupExtentSize(int len, int lastExtentLen) {
+ int x = initialExtentSize(len);
+ int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.2);
+ int sz = y > x ? y : x;
+ sz = ((int)sz) & 0xffffff00;
+ assert( sz > len );
+ return sz;
+ }
+
+ int deb=0;
+
+ /* add keys to indexes for a new record */
+ void _indexRecord(IndexDetails& idx, BSONObj& obj, DiskLoc newRecordLoc) {
+
+ BSONObjSetDefaultOrder keys;
+ idx.getKeysFromObject(obj, keys);
+ BSONObj order = idx.keyPattern();
+ for ( set<BSONObj>::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ assert( !newRecordLoc.isNull() );
+ try {
+ idx.head.btree()->insert(idx.head, newRecordLoc,
+ (BSONObj&) *i, order, false, idx, true);
+ }
+ catch (AssertionException&) {
+ problem() << " caught assertion _indexRecord " << idx.indexNamespace() << endl;
+ }
}
}
-}
-
-/* note there are faster ways to build an index in bulk, that can be
- done eventually */
-void addExistingToIndex(const char *ns, IndexDetails& idx) {
- Timer t;
- Logstream& l = log();
- l << "building new index for " << ns << "...";
- l.flush();
- int n = 0;
- auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
- while ( c->ok() ) {
- BSONObj js = c->current();
- _indexRecord(idx, js, c->currLoc());
- c->advance();
- n++;
- };
- l << "done for " << n << " records" << endl;
-}
-
-/* add keys to indexes for a new record */
-void indexRecord(NamespaceDetails *d, const void *buf, int len, DiskLoc newRecordLoc) {
- BSONObj obj((const char *)buf);
- for ( int i = 0; i < d->nIndexes; i++ ) {
- _indexRecord(d->indexes[i], obj, newRecordLoc);
+
+ /* note there are faster ways to build an index in bulk, that can be
+ done eventually */
+ void addExistingToIndex(const char *ns, IndexDetails& idx) {
+ Timer t;
+ Logstream& l = log();
+ l << "building new index for " << ns << "...";
+ l.flush();
+ int n = 0;
+ auto_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ _indexRecord(idx, js, c->currLoc());
+ c->advance();
+ n++;
+ };
+ l << "done for " << n << " records" << endl;
+ }
+
+ /* add keys to indexes for a new record */
+ void indexRecord(NamespaceDetails *d, const void *buf, int len, DiskLoc newRecordLoc) {
+ BSONObj obj((const char *)buf);
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ _indexRecord(d->indexes[i], obj, newRecordLoc);
+ }
}
-}
-extern BSONObj emptyObj;
-extern BSONObj id_obj;
+ extern BSONObj emptyObj;
+ extern BSONObj id_obj;
-void ensureHaveIdIndex(const char *ns) {
- NamespaceDetails *d = nsdetails(ns);
- if ( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
- return;
+ void ensureHaveIdIndex(const char *ns) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
+ return;
- d->flags |= NamespaceDetails::Flag_HaveIdIndex;
+ d->flags |= NamespaceDetails::Flag_HaveIdIndex;
- string system_indexes = database->name + ".system.indexes";
+ string system_indexes = database->name + ".system.indexes";
- BSONObjBuilder b;
- b.append("name", "_id_");
- b.append("ns", ns);
- b.append("key", id_obj);
- BSONObj o = b.done();
+ BSONObjBuilder b;
+ b.append("name", "_id_");
+ b.append("ns", ns);
+ b.append("key", id_obj);
+ BSONObj o = b.done();
- /* edge case: note the insert could fail if we have hit maxindexes already */
- theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
-}
+ /* edge case: note the insert could fail if we have hit maxindexes already */
+ theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
+ }
-DiskLoc DataFileMgr::insert(const char *ns, const void *buf, int len, bool god) {
- bool addIndex = false;
- const char *sys = strstr(ns, "system.");
- if ( sys ) {
- if ( sys == ns ) {
- cout << "ERROR: attempt to insert for invalid database 'system': " << ns << endl;
- return DiskLoc();
- }
- if ( strstr(ns, ".system.") ) {
- if ( strstr(ns, ".system.indexes") )
- addIndex = true;
- else if ( !god ) {
- cout << "ERROR: attempt to insert in system namespace " << ns << endl;
+ DiskLoc DataFileMgr::insert(const char *ns, const void *buf, int len, bool god) {
+ bool addIndex = false;
+ const char *sys = strstr(ns, "system.");
+ if ( sys ) {
+ if ( sys == ns ) {
+ out() << "ERROR: attempt to insert for invalid database 'system': " << ns << endl;
return DiskLoc();
}
+ if ( strstr(ns, ".system.") ) {
+ if ( strstr(ns, ".system.indexes") )
+ addIndex = true;
+ else if ( !god ) {
+ out() << "ERROR: attempt to insert in system namespace " << ns << endl;
+ return DiskLoc();
+ }
+ }
}
- }
- NamespaceDetails *d = nsdetails(ns);
- if ( d == 0 ) {
- addNewNamespaceToCatalog(ns);
- /* todo: shouldn't be in the namespace catalog until after the allocations here work.
- also if this is an addIndex, those checks should happen before this!
- */
- database->newestFile()->newExtent(ns, initialExtentSize(len));
- d = nsdetails(ns);
- }
- d->paddingFits();
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) {
+ addNewNamespaceToCatalog(ns);
+ /* todo: shouldn't be in the namespace catalog until after the allocations here work.
+ also if this is an addIndex, those checks should happen before this!
+ */
+ database->newestFile()->newExtent(ns, initialExtentSize(len));
+ d = nsdetails(ns);
+ }
+ d->paddingFits();
- NamespaceDetails *tableToIndex = 0;
+ NamespaceDetails *tableToIndex = 0;
- string tabletoidxns;
- if ( addIndex ) {
- BSONObj io((const char *) buf);
- const char *name = io.getStringField("name"); // name of the index
- tabletoidxns = io.getStringField("ns"); // table it indexes
+ string tabletoidxns;
+ if ( addIndex ) {
+ BSONObj io((const char *) buf);
+ const char *name = io.getStringField("name"); // name of the index
+ tabletoidxns = io.getStringField("ns"); // table it indexes
- if ( database->name != nsToClient(tabletoidxns.c_str()) ) {
- uassert("bad table to index name on add index attempt", false);
- return DiskLoc();
- }
+ if ( database->name != nsToClient(tabletoidxns.c_str()) ) {
+ uassert("bad table to index name on add index attempt", false);
+ return DiskLoc();
+ }
- BSONObj key = io.getObjectField("key");
- if ( *name == 0 || tabletoidxns.empty() || key.isEmpty() || key.objsize() > 2048 ) {
- cout << "user warning: bad add index attempt name:" << (name?name:"") << "\n ns:" <<
- tabletoidxns << "\n ourns:" << ns;
- cout << "\n idxobj:" << io.toString() << endl;
- return DiskLoc();
- }
- tableToIndex = nsdetails(tabletoidxns.c_str());
- if ( tableToIndex == 0 ) {
- // try to create it
- string err;
- if ( !userCreateNS(tabletoidxns.c_str(), emptyObj, err, false) ) {
- problem() << "ERROR: failed to create collection while adding its index. " << tabletoidxns << endl;
+ BSONObj key = io.getObjectField("key");
+ if ( *name == 0 || tabletoidxns.empty() || key.isEmpty() || key.objsize() > 2048 ) {
+ out() << "user warning: bad add index attempt name:" << (name?name:"") << "\n ns:" <<
+ tabletoidxns << "\n ourns:" << ns;
+ out() << "\n idxobj:" << io.toString() << endl;
return DiskLoc();
}
tableToIndex = nsdetails(tabletoidxns.c_str());
- log() << "info: creating collection " << tabletoidxns << " on add index\n";
- assert( tableToIndex );
+ if ( tableToIndex == 0 ) {
+ // try to create it
+ string err;
+ if ( !userCreateNS(tabletoidxns.c_str(), emptyObj, err, false) ) {
+ problem() << "ERROR: failed to create collection while adding its index. " << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ tableToIndex = nsdetails(tabletoidxns.c_str());
+ log() << "info: creating collection " << tabletoidxns << " on add index\n";
+ assert( tableToIndex );
+ }
+ if ( tableToIndex->nIndexes >= MaxIndexes ) {
+ log() << "user warning: bad add index attempt, too many indexes for:" << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ if ( tableToIndex->findIndexByName(name) >= 0 ) {
+ //out() << "INFO: index:" << name << " already exists for:" << tabletoidxns << endl;
+ return DiskLoc();
+ }
+ //indexFullNS = tabletoidxns;
+ //indexFullNS += ".$";
+ //indexFullNS += name; // database.table.$index -- note this doesn't contain jsobjs, it contains BtreeBuckets.
}
- if ( tableToIndex->nIndexes >= MaxIndexes ) {
- log() << "user warning: bad add index attempt, too many indexes for:" << tabletoidxns << endl;
- return DiskLoc();
+
+ DiskLoc extentLoc;
+ int lenWHdr = len + Record::HeaderSize;
+ lenWHdr = (int) (lenWHdr * d->paddingFactor);
+ if ( lenWHdr == 0 ) {
+ // old datafiles, backward compatible here.
+ assert( d->paddingFactor == 0 );
+ d->paddingFactor = 1.0;
+ lenWHdr = len + Record::HeaderSize;
}
- if ( tableToIndex->findIndexByName(name) >= 0 ) {
- //cout << "INFO: index:" << name << " already exists for:" << tabletoidxns << endl;
- return DiskLoc();
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ // out of space
+ if ( d->capped == 0 ) { // size capped doesn't grow
+ DEV log() << "allocating new extent for " << ns << " padding:" << d->paddingFactor << endl;
+ database->newestFile()->newExtent(ns, followupExtentSize(len, d->lastExtentSize));
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ }
+ if ( loc.isNull() ) {
+ log() << "out of space in datafile " << ns << " capped:" << d->capped << endl;
+ assert(d->capped);
+ return DiskLoc();
+ }
}
- //indexFullNS = tabletoidxns;
- //indexFullNS += ".$";
- //indexFullNS += name; // database.table.$index -- note this doesn't contain jsobjs, it contains BtreeBuckets.
- }
- DiskLoc extentLoc;
- int lenWHdr = len + Record::HeaderSize;
- lenWHdr = (int) (lenWHdr * d->paddingFactor);
- if ( lenWHdr == 0 ) {
- // old datafiles, backward compatible here.
- assert( d->paddingFactor == 0 );
- d->paddingFactor = 1.0;
- lenWHdr = len + Record::HeaderSize;
- }
- DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( loc.isNull() ) {
- // out of space
- if ( d->capped == 0 ) { // size capped doesn't grow
- DEV log() << "allocating new extent for " << ns << " padding:" << d->paddingFactor << endl;
- database->newestFile()->newExtent(ns, followupExtentSize(len, d->lastExtentSize));
- loc = d->alloc(ns, lenWHdr, extentLoc);
+ Record *r = loc.rec();
+ assert( r->lengthWithHeaders >= lenWHdr );
+ memcpy(r->data, buf, len);
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ e->firstRecord = e->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
}
- if ( loc.isNull() ) {
- log() << "out of space in datafile " << ns << " capped:" << d->capped << endl;
- assert(d->capped);
- return DiskLoc();
+ else {
+
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ oldlast->nextOfs = loc.getOfs();
+ e->lastRecord = loc;
}
- }
- Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
- memcpy(r->data, buf, len);
- Extent *e = r->myExtent(loc);
- if ( e->lastRecord.isNull() ) {
- e->firstRecord = e->lastRecord = loc;
- r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
- }
- else {
+ d->nrecords++;
+ d->datasize += r->netLength();
- Record *oldlast = e->lastRecord.rec();
- r->prevOfs = e->lastRecord.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- oldlast->nextOfs = loc.getOfs();
- e->lastRecord = loc;
- }
+ if ( tableToIndex ) {
+ IndexDetails& idxinfo = tableToIndex->indexes[tableToIndex->nIndexes];
+ idxinfo.info = loc;
+ idxinfo.head = BtreeBucket::addHead(idxinfo);
+ tableToIndex->addingIndex(tabletoidxns.c_str(), idxinfo);
+ /* todo: index existing records here */
+ addExistingToIndex(tabletoidxns.c_str(), idxinfo);
+ }
- d->nrecords++;
- d->datasize += r->netLength();
+ /* add this record to our indexes */
+ if ( d->nIndexes )
+ indexRecord(d, buf, len, loc);
- if ( tableToIndex ) {
- IndexDetails& idxinfo = tableToIndex->indexes[tableToIndex->nIndexes];
- idxinfo.info = loc;
- idxinfo.head = BtreeBucket::addHead(idxinfo);
- tableToIndex->addingIndex(tabletoidxns.c_str(), idxinfo);
- /* todo: index existing records here */
- addExistingToIndex(tabletoidxns.c_str(), idxinfo);
+// out() << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
+ return loc;
}
- /* add this record to our indexes */
- if ( d->nIndexes )
- indexRecord(d, buf, len, loc);
-
-// cout << " inserted at loc:" << hex << loc.getOfs() << " lenwhdr:" << hex << lenWHdr << dec << ' ' << ns << endl;
- return loc;
-}
+ /* special version of insert for transaction logging -- streamlined a bit.
+ assumes ns is capped and no indexes
+ */
+ Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
+ RARELY assert( d == nsdetails(ns) );
-/* special version of insert for transaction logging -- streamlined a bit.
- assumes ns is capped and no indexes
-*/
-Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
- RARELY assert( d == nsdetails(ns) );
-
- DiskLoc extentLoc;
- int lenWHdr = len + Record::HeaderSize;
- DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
- if ( loc.isNull() ) {
- assert(false);
- return 0;
- }
+ DiskLoc extentLoc;
+ int lenWHdr = len + Record::HeaderSize;
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ assert(false);
+ return 0;
+ }
- Record *r = loc.rec();
- assert( r->lengthWithHeaders >= lenWHdr );
+ Record *r = loc.rec();
+ assert( r->lengthWithHeaders >= lenWHdr );
- Extent *e = r->myExtent(loc);
- if ( e->lastRecord.isNull() ) {
- e->firstRecord = e->lastRecord = loc;
- r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
- }
- else {
- Record *oldlast = e->lastRecord.rec();
- r->prevOfs = e->lastRecord.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- oldlast->nextOfs = loc.getOfs();
- e->lastRecord = loc;
- }
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ e->firstRecord = e->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
+ }
+ else {
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ oldlast->nextOfs = loc.getOfs();
+ e->lastRecord = loc;
+ }
- d->nrecords++;
+ d->nrecords++;
- return r;
-}
+ return r;
+ }
-void DataFileMgr::init(const char *dir) {
- /* boost::filesystem::path path( dir );
- path /= "temp.dat";
- string pathString = path.string();
- temp.open(pathString.c_str(), 64 * 1024 * 1024);
- */
-}
+ void DataFileMgr::init(const char *dir) {
+ /* boost::filesystem::path path( dir );
+ path /= "temp.dat";
+ string pathString = path.string();
+ temp.open(pathString.c_str(), 64 * 1024 * 1024);
+ */
+ }
-void pdfileInit() {
+ void pdfileInit() {
// namespaceIndex.init(dbpath);
- theDataFileMgr.init(dbpath);
-}
+ theDataFileMgr.init(dbpath);
+ }
} // namespace mongo
@@ -1001,163 +1001,163 @@ void pdfileInit() {
namespace mongo {
-void dropDatabase(const char *ns) {
- // ns is of the form "<dbname>.$cmd"
- char cl[256];
- nsToClient(ns, cl);
- problem() << "dropDatabase " << cl << endl;
- assert( database->name == cl );
+ void dropDatabase(const char *ns) {
+ // ns is of the form "<dbname>.$cmd"
+ char cl[256];
+ nsToClient(ns, cl);
+ problem() << "dropDatabase " << cl << endl;
+ assert( database->name == cl );
- closeClient( cl );
- _deleteDataFiles(cl);
-}
+ closeClient( cl );
+ _deleteDataFiles(cl);
+ }
-typedef boost::filesystem::path Path;
+ typedef boost::filesystem::path Path;
// back up original database files to 'temp' dir
-void _renameForBackup( const char *database, const Path &reservedPath ) {
- class Renamer : public FileOp {
- public:
- Renamer( const Path &reservedPath ) : reservedPath_( reservedPath ) {}
- private:
- const boost::filesystem::path &reservedPath_;
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boost::filesystem::rename( p, reservedPath_ / ( p.leaf() + ".bak" ) );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } renamer( reservedPath );
- _applyOpToDataFiles( database, renamer );
-}
+ void _renameForBackup( const char *database, const Path &reservedPath ) {
+ class Renamer : public FileOp {
+ public:
+ Renamer( const Path &reservedPath ) : reservedPath_( reservedPath ) {}
+ private:
+ const boost::filesystem::path &reservedPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boost::filesystem::rename( p, reservedPath_ / ( p.leaf() + ".bak" ) );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer( reservedPath );
+ _applyOpToDataFiles( database, renamer );
+ }
// move temp files to standard data dir
-void _replaceWithRecovered( const char *database, const char *reservedPathString ) {
- class : public FileOp {
- virtual bool apply( const Path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- boost::filesystem::rename( p, boost::filesystem::path(dbpath) / p.leaf() );
- return true;
- }
- virtual const char * op() const {
- return "renaming";
- }
- } renamer;
- _applyOpToDataFiles( database, renamer, reservedPathString );
-}
+ void _replaceWithRecovered( const char *database, const char *reservedPathString ) {
+ class : public FileOp {
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boost::filesystem::rename( p, boost::filesystem::path(dbpath) / p.leaf() );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer;
+ _applyOpToDataFiles( database, renamer, reservedPathString );
+ }
// generate a directory name for storing temp data files
-Path uniqueReservedPath( const char *prefix ) {
- Path dbPath = Path( dbpath );
- Path reservedPath;
- int i = 0;
- bool exists = false;
- do {
- stringstream ss;
- ss << prefix << "_repairDatabase_" << i++;
- reservedPath = dbPath / ss.str();
- BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
- } while ( exists );
- return reservedPath;
-}
-
-boost::intmax_t dbSize( const char *database ) {
- class SizeAccumulator : public FileOp {
- public:
- SizeAccumulator() : totalSize_( 0 ) {}
- boost::intmax_t size() const {
- return totalSize_;
- }
- private:
- virtual bool apply( const boost::filesystem::path &p ) {
- if ( !boost::filesystem::exists( p ) )
- return false;
- totalSize_ += boost::filesystem::file_size( p );
- return true;
- }
- virtual const char *op() const {
- return "checking size";
- }
- boost::intmax_t totalSize_;
- };
- SizeAccumulator sa;
- _applyOpToDataFiles( database, sa );
- return sa.size();
-}
+ Path uniqueReservedPath( const char *prefix ) {
+ Path dbPath = Path( dbpath );
+ Path reservedPath;
+ int i = 0;
+ bool exists = false;
+ do {
+ stringstream ss;
+ ss << prefix << "_repairDatabase_" << i++;
+ reservedPath = dbPath / ss.str();
+ BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
+ } while ( exists );
+ return reservedPath;
+ }
+
+ boost::intmax_t dbSize( const char *database ) {
+ class SizeAccumulator : public FileOp {
+ public:
+ SizeAccumulator() : totalSize_( 0 ) {}
+ boost::intmax_t size() const {
+ return totalSize_;
+ }
+ private:
+ virtual bool apply( const boost::filesystem::path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ totalSize_ += boost::filesystem::file_size( p );
+ return true;
+ }
+ virtual const char *op() const {
+ return "checking size";
+ }
+ boost::intmax_t totalSize_;
+ };
+ SizeAccumulator sa;
+ _applyOpToDataFiles( database, sa );
+ return sa.size();
+ }
#if !defined(_WIN32)
} // namespace mongo
#include <sys/statvfs.h>
namespace mongo {
#endif
-boost::intmax_t freeSpace() {
+ boost::intmax_t freeSpace() {
#if !defined(_WIN32)
- struct statvfs info;
- assert( !statvfs( dbpath, &info ) );
- return boost::intmax_t( info.f_bavail ) * info.f_frsize;
+ struct statvfs info;
+ assert( !statvfs( dbpath, &info ) );
+ return boost::intmax_t( info.f_bavail ) * info.f_frsize;
#else
- return -1;
+ return -1;
#endif
-}
-
-bool repairDatabase( const char *ns, string &errmsg,
- bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
- stringstream ss;
- ss << "localhost:" << port;
- string localhost = ss.str();
-
- // ns is of the form "<dbname>.$cmd"
- char dbName[256];
- nsToClient(ns, dbName);
- problem() << "repairDatabase " << dbName << endl;
- assert( database->name == dbName );
-
- boost::intmax_t totalSize = dbSize( dbName );
- boost::intmax_t freeSize = freeSpace();
- if ( freeSize > -1 && freeSize < totalSize ) {
- stringstream ss;
- ss << "Cannot repair database " << dbName << " having size: " << totalSize
- << " (bytes) because free disk space is: " << freeSize << " (bytes)";
- errmsg = ss.str();
- problem() << errmsg << endl;
- return false;
}
- Path reservedPath =
- uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
- "backup" : "tmp" );
- BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
- string reservedPathString = reservedPath.native_directory_string();
- assert( setClient( dbName, reservedPathString.c_str() ) );
-
- bool res = cloneFrom(localhost.c_str(), errmsg, dbName, /*logForReplication=*/false, /*slaveok*/false);
- closeClient( dbName, reservedPathString.c_str() );
+ bool repairDatabase( const char *ns, string &errmsg,
+ bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
+ stringstream ss;
+ ss << "localhost:" << port;
+ string localhost = ss.str();
+
+ // ns is of the form "<dbname>.$cmd"
+ char dbName[256];
+ nsToClient(ns, dbName);
+ problem() << "repairDatabase " << dbName << endl;
+ assert( database->name == dbName );
+
+ boost::intmax_t totalSize = dbSize( dbName );
+ boost::intmax_t freeSize = freeSpace();
+ if ( freeSize > -1 && freeSize < totalSize ) {
+ stringstream ss;
+ ss << "Cannot repair database " << dbName << " having size: " << totalSize
+ << " (bytes) because free disk space is: " << freeSize << " (bytes)";
+ errmsg = ss.str();
+ problem() << errmsg << endl;
+ return false;
+ }
- if ( !res ) {
- problem() << "clone failed for " << dbName << " with error: " << errmsg << endl;
- if ( !preserveClonedFilesOnFailure )
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
- return false;
- }
+ Path reservedPath =
+ uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
+ "backup" : "tmp" );
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
+ string reservedPathString = reservedPath.native_directory_string();
+ assert( setClient( dbName, reservedPathString.c_str() ) );
+
+ bool res = cloneFrom(localhost.c_str(), errmsg, dbName, /*logForReplication=*/false, /*slaveok*/false);
+ closeClient( dbName, reservedPathString.c_str() );
+
+ if ( !res ) {
+ problem() << "clone failed for " << dbName << " with error: " << errmsg << endl;
+ if ( !preserveClonedFilesOnFailure )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+ return false;
+ }
- assert( !setClientTempNs( dbName ) );
- closeClient( dbName );
+ assert( !setClientTempNs( dbName ) );
+ closeClient( dbName );
- if ( backupOriginalFiles )
- _renameForBackup( dbName, reservedPath );
- else
- _deleteDataFiles( dbName );
+ if ( backupOriginalFiles )
+ _renameForBackup( dbName, reservedPath );
+ else
+ _deleteDataFiles( dbName );
- _replaceWithRecovered( dbName, reservedPathString.c_str() );
+ _replaceWithRecovered( dbName, reservedPathString.c_str() );
- if ( !backupOriginalFiles )
- BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+ if ( !backupOriginalFiles )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
- return true;
-}
+ return true;
+ }
} // namespace mongo
diff --git a/db/pdfile.h b/db/pdfile.h
index dbd7e893ddd..d2378313a9a 100644
--- a/db/pdfile.h
+++ b/db/pdfile.h
@@ -37,252 +37,252 @@ const int VERSION_MINOR = 4;
namespace mongo {
-class PDFHeader;
-class Extent;
-class Record;
-class Cursor;
-
-extern bool verbose;
-void dropDatabase(const char *ns);
-bool repairDatabase(const char *ns, string &errmsg, bool preserveClonedFilesOnFailure = false, bool backupOriginalFiles = false);
-void dropNS(string& dropNs);;
-bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication);
-auto_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, bool *isSorted=0);
+ class PDFHeader;
+ class Extent;
+ class Record;
+ class Cursor;
+
+ extern bool verbose;
+ void dropDatabase(const char *ns);
+ bool repairDatabase(const char *ns, string &errmsg, bool preserveClonedFilesOnFailure = false, bool backupOriginalFiles = false);
+ void dropNS(string& dropNs);;
+ bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication);
+ auto_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, bool *isSorted=0);
// -1 if library unavailable.
-boost::intmax_t freeSpace();
+ boost::intmax_t freeSpace();
-/*---------------------------------------------------------------------*/
+ /*---------------------------------------------------------------------*/
-class PDFHeader;
-class PhysicalDataFile {
- friend class DataFileMgr;
- friend class BasicCursor;
-public:
- PhysicalDataFile(int fn) : fileNo(fn) { }
- void open(const char *filename, int requestedDataSize = 0);
+ class PDFHeader;
+ class PhysicalDataFile {
+ friend class DataFileMgr;
+ friend class BasicCursor;
+ public:
+ PhysicalDataFile(int fn) : fileNo(fn) { }
+ void open(const char *filename, int requestedDataSize = 0);
- Extent* newExtent(const char *ns, int approxSize, bool newCapped = false, int loops = 0);
- PDFHeader *getHeader() {
- return header;
- }
- static int maxSize();
-private:
- int defaultSize( const char *filename ) const;
-
- Extent* getExtent(DiskLoc loc);
- Extent* _getExtent(DiskLoc loc);
- Record* recordAt(DiskLoc dl);
-
- MemoryMappedFile mmf;
- PDFHeader *header;
- int __unUsEd;
- // int length;
- int fileNo;
-};
-
-class DataFileMgr {
- friend class BasicCursor;
-public:
- void init(const char *);
-
- void update(
- const char *ns,
- Record *toupdate, const DiskLoc& dl,
- const char *buf, int len, stringstream& profiling);
- DiskLoc insert(const char *ns, const void *buf, int len, bool god = false);
- void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false);
- static auto_ptr<Cursor> findAll(const char *ns);
-
- /* special version of insert for transaction logging -- streamlined a bit.
- assumes ns is capped and no indexes
- */
- Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len);
-
- static Extent* getExtent(const DiskLoc& dl);
- static Record* getRecord(const DiskLoc& dl);
-private:
- vector<PhysicalDataFile *> files;
-};
-
-extern DataFileMgr theDataFileMgr;
+ Extent* newExtent(const char *ns, int approxSize, bool newCapped = false, int loops = 0);
+ PDFHeader *getHeader() {
+ return header;
+ }
+ static int maxSize();
+ private:
+ int defaultSize( const char *filename ) const;
+
+ Extent* getExtent(DiskLoc loc);
+ Extent* _getExtent(DiskLoc loc);
+ Record* recordAt(DiskLoc dl);
+
+ MemoryMappedFile mmf;
+ PDFHeader *header;
+ int __unUsEd;
+ // int length;
+ int fileNo;
+ };
+
+ class DataFileMgr {
+ friend class BasicCursor;
+ public:
+ void init(const char *);
+
+ void update(
+ const char *ns,
+ Record *toupdate, const DiskLoc& dl,
+ const char *buf, int len, stringstream& profiling);
+ DiskLoc insert(const char *ns, const void *buf, int len, bool god = false);
+ void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false);
+ static auto_ptr<Cursor> findAll(const char *ns);
+
+ /* special version of insert for transaction logging -- streamlined a bit.
+ assumes ns is capped and no indexes
+ */
+ Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len);
+
+ static Extent* getExtent(const DiskLoc& dl);
+ static Record* getRecord(const DiskLoc& dl);
+ private:
+ vector<PhysicalDataFile *> files;
+ };
+
+ extern DataFileMgr theDataFileMgr;
#pragma pack(push,1)
-class DeletedRecord {
-public:
- int lengthWithHeaders;
- int extentOfs;
- DiskLoc nextDeleted;
- Extent* myExtent(const DiskLoc& myLoc) {
- return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
- }
-};
-
-/* Record is a record in a datafile. DeletedRecord is similar but for deleted space.
-
-*11:03:20 AM) dm10gen: regarding extentOfs...
-(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
-(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
-(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
-(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
-(11:04:33 AM) dm10gen: see class DiskLoc for more info
-(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
-(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must popular its extentOfs then
-*/
-class Record {
-public:
- enum { HeaderSize = 16 };
- int lengthWithHeaders;
- int extentOfs;
- int nextOfs;
- int prevOfs;
- char data[4];
- int netLength() {
- return lengthWithHeaders - HeaderSize;
- }
- //void setNewLength(int netlen) { lengthWithHeaders = netlen + HeaderSize; }
+ class DeletedRecord {
+ public:
+ int lengthWithHeaders;
+ int extentOfs;
+ DiskLoc nextDeleted;
+ Extent* myExtent(const DiskLoc& myLoc) {
+ return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
+ }
+ };
+
+ /* Record is a record in a datafile. DeletedRecord is similar but for deleted space.
+
+ *11:03:20 AM) dm10gen: regarding extentOfs...
+ (11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+ (11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
+ (11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+ (11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
+ (11:04:33 AM) dm10gen: see class DiskLoc for more info
+ (11:04:43 AM) dm10gen: so that is how Record::myExtent() works
+ (11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must popular its extentOfs then
+ */
+ class Record {
+ public:
+ enum { HeaderSize = 16 };
+ int lengthWithHeaders;
+ int extentOfs;
+ int nextOfs;
+ int prevOfs;
+ char data[4];
+ int netLength() {
+ return lengthWithHeaders - HeaderSize;
+ }
+ //void setNewLength(int netlen) { lengthWithHeaders = netlen + HeaderSize; }
- /* use this when a record is deleted. basically a union with next/prev fields */
- DeletedRecord& asDeleted() {
- return *((DeletedRecord*) this);
- }
+ /* use this when a record is deleted. basically a union with next/prev fields */
+ DeletedRecord& asDeleted() {
+ return *((DeletedRecord*) this);
+ }
- Extent* myExtent(const DiskLoc& myLoc) {
- return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
- }
- /* get the next record in the namespace, traversing extents as necessary */
- DiskLoc getNext(const DiskLoc& myLoc);
- DiskLoc getPrev(const DiskLoc& myLoc);
-};
+ Extent* myExtent(const DiskLoc& myLoc) {
+ return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
+ }
+ /* get the next record in the namespace, traversing extents as necessary */
+ DiskLoc getNext(const DiskLoc& myLoc);
+ DiskLoc getPrev(const DiskLoc& myLoc);
+ };
-/* extents are datafile regions where all the records within the region
- belong to the same namespace.
+ /* extents are datafile regions where all the records within the region
+ belong to the same namespace.
-(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
-(11:12:55 AM) dm10gen: and that is placed on the free list
-*/
-class Extent {
-public:
- unsigned magic;
- DiskLoc myLoc;
- DiskLoc xnext, xprev; /* next/prev extent for this namespace */
- Namespace ns; /* which namespace this extent is for. this is just for troubleshooting really */
- int length; /* size of the extent, including these fields */
- DiskLoc firstRecord, lastRecord;
- char extentData[4];
-
- bool validates() {
- return !(firstRecord.isNull() ^ lastRecord.isNull()) &&
- length >= 0 && !myLoc.isNull();
- }
+ (11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+ (11:12:55 AM) dm10gen: and that is placed on the free list
+ */
+ class Extent {
+ public:
+ unsigned magic;
+ DiskLoc myLoc;
+ DiskLoc xnext, xprev; /* next/prev extent for this namespace */
+ Namespace ns; /* which namespace this extent is for. this is just for troubleshooting really */
+ int length; /* size of the extent, including these fields */
+ DiskLoc firstRecord, lastRecord;
+ char extentData[4];
+
+ bool validates() {
+ return !(firstRecord.isNull() ^ lastRecord.isNull()) &&
+ length >= 0 && !myLoc.isNull();
+ }
- void dump(iostream& s) {
- s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
- s << " ns:" << ns.buf << '\n';
- s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
- }
+ void dump(iostream& s) {
+ s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
+ s << " ns:" << ns.buf << '\n';
+ s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
+ }
- /* assumes already zeroed -- insufficient for block 'reuse' perhaps
- Returns a DeletedRecord location which is the data in the extent ready for us.
- Caller will need to add that to the freelist structure in namespacedetail.
- */
- DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset);
+ /* assumes already zeroed -- insufficient for block 'reuse' perhaps
+ Returns a DeletedRecord location which is the data in the extent ready for us.
+ Caller will need to add that to the freelist structure in namespacedetail.
+ */
+ DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset);
- void assertOk() {
- assert(magic == 0x41424344);
- }
+ void assertOk() {
+ assert(magic == 0x41424344);
+ }
- Record* newRecord(int len);
+ Record* newRecord(int len);
- Record* getRecord(DiskLoc dl) {
- assert( !dl.isNull() );
- assert( dl.sameFile(myLoc) );
- int x = dl.getOfs() - myLoc.getOfs();
- assert( x > 0 );
- return (Record *) (((char *) this) + x);
- }
+ Record* getRecord(DiskLoc dl) {
+ assert( !dl.isNull() );
+ assert( dl.sameFile(myLoc) );
+ int x = dl.getOfs() - myLoc.getOfs();
+ assert( x > 0 );
+ return (Record *) (((char *) this) + x);
+ }
- Extent* getNextExtent() {
- return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext);
- }
- Extent* getPrevExtent() {
- return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev);
- }
-};
-
-/*
- ----------------------
- Header
- ----------------------
- Extent (for a particular namespace)
- Record
- ...
- Record (some chained for unused space)
- ----------------------
- more Extents...
- ----------------------
-*/
+ Extent* getNextExtent() {
+ return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext);
+ }
+ Extent* getPrevExtent() {
+ return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev);
+ }
+ };
+
+ /*
+ ----------------------
+ Header
+ ----------------------
+ Extent (for a particular namespace)
+ Record
+ ...
+ Record (some chained for unused space)
+ ----------------------
+ more Extents...
+ ----------------------
+ */
-/* data file header */
-class PDFHeader {
-public:
- int version;
- int versionMinor;
- int fileLength;
- DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
- int unusedLength;
- char reserved[8192 - 4*4 - 8];
+ /* data file header */
+ class PDFHeader {
+ public:
+ int version;
+ int versionMinor;
+ int fileLength;
+ DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ int unusedLength;
+ char reserved[8192 - 4*4 - 8];
- char data[4];
+ char data[4];
- static int headerSize() {
- return sizeof(PDFHeader) - 4;
- }
+ static int headerSize() {
+ return sizeof(PDFHeader) - 4;
+ }
- bool currentVersion() const {
- return ( version == VERSION ) && ( versionMinor == VERSION_MINOR );
- }
+ bool currentVersion() const {
+ return ( version == VERSION ) && ( versionMinor == VERSION_MINOR );
+ }
- bool uninitialized() {
- if ( version == 0 ) return true;
- return false;
- }
+ bool uninitialized() {
+ if ( version == 0 ) return true;
+ return false;
+ }
- Record* getRecord(DiskLoc dl) {
- int ofs = dl.getOfs();
- assert( ofs >= headerSize() );
- return (Record*) (((char *) this) + ofs);
- }
+ Record* getRecord(DiskLoc dl) {
+ int ofs = dl.getOfs();
+ assert( ofs >= headerSize() );
+ return (Record*) (((char *) this) + ofs);
+ }
- void init(int fileno, int filelength) {
- if ( uninitialized() ) {
- assert(filelength > 32768 );
- assert( headerSize() == 8192 );
- fileLength = filelength;
- version = VERSION;
- versionMinor = VERSION_MINOR;
- unused.setOfs( fileno, headerSize() );
- assert( (data-(char*)this) == headerSize() );
- unusedLength = fileLength - headerSize() - 16;
- memcpy(data+unusedLength, " \nthe end\n", 16);
+ void init(int fileno, int filelength) {
+ if ( uninitialized() ) {
+ assert(filelength > 32768 );
+ assert( headerSize() == 8192 );
+ fileLength = filelength;
+ version = VERSION;
+ versionMinor = VERSION_MINOR;
+ unused.setOfs( fileno, headerSize() );
+ assert( (data-(char*)this) == headerSize() );
+ unusedLength = fileLength - headerSize() - 16;
+ memcpy(data+unusedLength, " \nthe end\n", 16);
+ }
}
- }
-};
+ };
#pragma pack(pop)
-inline Extent* PhysicalDataFile::_getExtent(DiskLoc loc) {
- loc.assertOk();
- Extent *e = (Extent *) (((char *)header) + loc.getOfs());
- return e;
-}
+ inline Extent* PhysicalDataFile::_getExtent(DiskLoc loc) {
+ loc.assertOk();
+ Extent *e = (Extent *) (((char *)header) + loc.getOfs());
+ return e;
+ }
-inline Extent* PhysicalDataFile::getExtent(DiskLoc loc) {
- Extent *e = _getExtent(loc);
- e->assertOk();
- return e;
-}
+ inline Extent* PhysicalDataFile::getExtent(DiskLoc loc) {
+ Extent *e = _getExtent(loc);
+ e->assertOk();
+ return e;
+ }
} // namespace mongo
@@ -290,60 +290,60 @@ inline Extent* PhysicalDataFile::getExtent(DiskLoc loc) {
namespace mongo {
-inline Record* PhysicalDataFile::recordAt(DiskLoc dl) {
- return header->getRecord(dl);
-}
+ inline Record* PhysicalDataFile::recordAt(DiskLoc dl) {
+ return header->getRecord(dl);
+ }
-inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
- if ( nextOfs != DiskLoc::NullOfs ) {
- /* defensive */
- if ( nextOfs >= 0 && nextOfs < 10 ) {
- sayDbContext("Assertion failure - Record::getNext() referencing a deleted record?");
- return DiskLoc();
+ inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
+ if ( nextOfs != DiskLoc::NullOfs ) {
+ /* defensive */
+ if ( nextOfs >= 0 && nextOfs < 10 ) {
+ sayDbContext("Assertion failure - Record::getNext() referencing a deleted record?");
+ return DiskLoc();
+ }
+
+ return DiskLoc(myLoc.a(), nextOfs);
+ }
+ Extent *e = myExtent(myLoc);
+ while ( 1 ) {
+ if ( e->xnext.isNull() )
+ return DiskLoc(); // end of table.
+ e = e->xnext.ext();
+ if ( !e->firstRecord.isNull() )
+ break;
+ // entire extent could be empty, keep looking
}
+ return e->firstRecord;
+ }
+ inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
+ if ( prevOfs != DiskLoc::NullOfs )
+ return DiskLoc(myLoc.a(), prevOfs);
+ Extent *e = myExtent(myLoc);
+ if ( e->xprev.isNull() )
+ return DiskLoc();
+ return e->xprev.ext()->lastRecord;
+ }
- return DiskLoc(myLoc.a(), nextOfs);
+ inline Record* DiskLoc::rec() const {
+ return DataFileMgr::getRecord(*this);
+ }
+ inline BSONObj DiskLoc::obj() const {
+ return BSONObj(rec());
}
- Extent *e = myExtent(myLoc);
- while ( 1 ) {
- if ( e->xnext.isNull() )
- return DiskLoc(); // end of table.
- e = e->xnext.ext();
- if ( !e->firstRecord.isNull() )
- break;
- // entire extent could be empty, keep looking
+ inline DeletedRecord* DiskLoc::drec() const {
+ assert( fileNo != -1 );
+ return (DeletedRecord*) rec();
}
- return e->firstRecord;
-}
-inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
- if ( prevOfs != DiskLoc::NullOfs )
- return DiskLoc(myLoc.a(), prevOfs);
- Extent *e = myExtent(myLoc);
- if ( e->xprev.isNull() )
- return DiskLoc();
- return e->xprev.ext()->lastRecord;
-}
-
-inline Record* DiskLoc::rec() const {
- return DataFileMgr::getRecord(*this);
-}
-inline BSONObj DiskLoc::obj() const {
- return BSONObj(rec());
-}
-inline DeletedRecord* DiskLoc::drec() const {
- assert( fileNo != -1 );
- return (DeletedRecord*) rec();
-}
-inline Extent* DiskLoc::ext() const {
- return DataFileMgr::getExtent(*this);
-}
-
-inline BtreeBucket* DiskLoc::btree() const {
- assert( fileNo != -1 );
- return (BtreeBucket*) rec()->data;
-}
-
-/*---------------------------------------------------------------------*/
+ inline Extent* DiskLoc::ext() const {
+ return DataFileMgr::getExtent(*this);
+ }
+
+ inline BtreeBucket* DiskLoc::btree() const {
+ assert( fileNo != -1 );
+ return (BtreeBucket*) rec()->data;
+ }
+
+ /*---------------------------------------------------------------------*/
} // namespace mongo
@@ -362,87 +362,87 @@ namespace mongo {
assert( false ); \
}
-class FileOp {
-public:
- virtual bool apply( const boost::filesystem::path &p ) = 0;
- virtual const char * op() const = 0;
-};
-
-inline void _applyOpToDataFiles( const char *database, FileOp &fo, const char *path = dbpath ) {
- string c = database;
- c += '.';
- boost::filesystem::path p(path);
- boost::filesystem::path q;
- q = p / (c+"ns");
- bool ok = false;
- BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
- if ( ok && verbose )
- log() << fo.op() << " file " << q.string() << '\n';
- int i = 0;
- int extra = 10; // should not be necessary, this is defensive in case there are missing files
- while ( 1 ) {
- assert( i <= DiskLoc::MaxFiles );
- stringstream ss;
- ss << c << i;
- q = p / ss.str();
- BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
- if ( ok ) {
- if ( verbose || extra != 10 )
- log() << fo.op() << " file " << q.string() << '\n';
- if ( extra != 10 )
- log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ class FileOp {
+ public:
+ virtual bool apply( const boost::filesystem::path &p ) = 0;
+ virtual const char * op() const = 0;
+ };
+
+ inline void _applyOpToDataFiles( const char *database, FileOp &fo, const char *path = dbpath ) {
+ string c = database;
+ c += '.';
+ boost::filesystem::path p(path);
+ boost::filesystem::path q;
+ q = p / (c+"ns");
+ bool ok = false;
+ BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
+ if ( ok && verbose )
+ log() << fo.op() << " file " << q.string() << '\n';
+ int i = 0;
+ int extra = 10; // should not be necessary, this is defensive in case there are missing files
+ while ( 1 ) {
+ assert( i <= DiskLoc::MaxFiles );
+ stringstream ss;
+ ss << c << i;
+ q = p / ss.str();
+ BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
+ if ( ok ) {
+ if ( verbose || extra != 10 )
+ log() << fo.op() << " file " << q.string() << '\n';
+ if ( extra != 10 )
+ log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ }
+ else if ( --extra <= 0 )
+ break;
+ i++;
}
- else if ( --extra <= 0 )
- break;
- i++;
}
-}
-inline void _deleteDataFiles(const char *database) {
- class : public FileOp {
- virtual bool apply( const boost::filesystem::path &p ) {
- return boost::filesystem::remove( p );
- }
- virtual const char * op() const {
- return "remove";
- }
- } deleter;
- _applyOpToDataFiles( database, deleter );
-}
-
-boost::intmax_t dbSize( const char *database );
-
-inline NamespaceIndex* nsindex(const char *ns) {
- DEV {
- char buf[256];
- nsToClient(ns, buf);
- if ( database->name != buf ) {
- cout << "ERROR: attempt to write to wrong database database\n";
- cout << " ns:" << ns << '\n';
- cout << " database->name:" << database->name << endl;
- assert( database->name == buf );
+ inline void _deleteDataFiles(const char *database) {
+ class : public FileOp {
+ virtual bool apply( const boost::filesystem::path &p ) {
+ return boost::filesystem::remove( p );
+ }
+ virtual const char * op() const {
+ return "remove";
+ }
+ } deleter;
+ _applyOpToDataFiles( database, deleter );
+ }
+
+ boost::intmax_t dbSize( const char *database );
+
+ inline NamespaceIndex* nsindex(const char *ns) {
+ DEV {
+ char buf[256];
+ nsToClient(ns, buf);
+ if ( database->name != buf ) {
+ out() << "ERROR: attempt to write to wrong database database\n";
+ out() << " ns:" << ns << '\n';
+ out() << " database->name:" << database->name << endl;
+ assert( database->name == buf );
+ }
}
+ return &database->namespaceIndex;
+ }
+
+ inline NamespaceDetails* nsdetails(const char *ns) {
+ return nsindex(ns)->details(ns);
+ }
+
+ inline PhysicalDataFile& DiskLoc::pdf() const {
+ assert( fileNo != -1 );
+ return *database->getFile(fileNo);
+ }
+
+ inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
+ assert( dl.a() != -1 );
+ return database->getFile(dl.a())->getExtent(dl);
+ }
+
+ inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
+ assert( dl.a() != -1 );
+ return database->getFile(dl.a())->recordAt(dl);
}
- return &database->namespaceIndex;
-}
-
-inline NamespaceDetails* nsdetails(const char *ns) {
- return nsindex(ns)->details(ns);
-}
-
-inline PhysicalDataFile& DiskLoc::pdf() const {
- assert( fileNo != -1 );
- return *database->getFile(fileNo);
-}
-
-inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
- assert( dl.a() != -1 );
- return database->getFile(dl.a())->getExtent(dl);
-}
-
-inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
- assert( dl.a() != -1 );
- return database->getFile(dl.a())->recordAt(dl);
-}
} // namespace mongo
diff --git a/db/query.cpp b/db/query.cpp
index dcfe4705ebd..9481c71c8fe 100644
--- a/db/query.cpp
+++ b/db/query.cpp
@@ -33,858 +33,858 @@
namespace mongo {
-/* We cut off further objects once we cross this threshold; thus, you might get
- a little bit more than this, it is a threshold rather than a limit.
-*/
-const int MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
+ /* We cut off further objects once we cross this threshold; thus, you might get
+ a little bit more than this, it is a threshold rather than a limit.
+ */
+ const int MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
//ns->query->DiskLoc
-LRUishMap<BSONObj,DiskLoc,5> lrutest(123);
-
-int nextCursorId = 1;
-extern bool useCursors;
-
-void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e);
-
-int matchDirection( const BSONObj &index, const BSONObj &sort ) {
- int direction = 0;
- BSONObjIterator i( index );
- BSONObjIterator s( sort );
- while ( 1 ) {
- BSONElement ie = i.next();
- BSONElement se = s.next();
- if ( ie.eoo() ) {
- if ( !se.eoo() )
- return 0;
- return direction;
- }
- if ( strcmp( ie.fieldName(), se.fieldName() ) != 0 )
- return 0;
+ LRUishMap<BSONObj,DiskLoc,5> lrutest(123);
- int d = ie.number() == se.number() ? 1 : -1;
- if ( direction == 0 )
- direction = d;
- else if ( direction != d )
- return 0;
- }
-}
+ int nextCursorId = 1;
+ extern bool useCursors;
-/* todo: _ cache query plans
- _ use index on partial match with the query
+ void appendElementHandlingGtLt(BSONObjBuilder& b, BSONElement& e);
- parameters
- query - the query, e.g., { name: 'joe' }
- order - order by spec, e.g., { name: 1 } 1=ASC, -1=DESC
- simpleKeyMatch - set to true if the query is purely for a single key value
- unchanged otherwise.
-*/
-auto_ptr<Cursor> getIndexCursor(const char *ns, BSONObj& query, BSONObj& order, bool *simpleKeyMatch = 0, bool *isSorted = 0, string *hint = 0) {
- NamespaceDetails *d = nsdetails(ns);
- if ( d == 0 ) return auto_ptr<Cursor>();
-
- if ( hint && !hint->empty() ) {
- /* todo: more work needed. doesn't handle $lt & $gt for example.
- waiting for query optimizer rewrite (see queryoptimizer.h) before finishing the work.
- */
- for (int i = 0; i < d->nIndexes; i++ ) {
- IndexDetails& ii = d->indexes[i];
- if ( ii.indexName() == *hint ) {
- BSONObj startKey = ii.getKeyFromQuery(query);
- int direction = 1;
- if ( simpleKeyMatch )
- *simpleKeyMatch = query.nFields() == startKey.nFields();
- if ( isSorted ) *isSorted = false;
- return auto_ptr<Cursor>(
- new BtreeCursor(ii, startKey, direction, query));
+ int matchDirection( const BSONObj &index, const BSONObj &sort ) {
+ int direction = 0;
+ BSONObjIterator i( index );
+ BSONObjIterator s( sort );
+ while ( 1 ) {
+ BSONElement ie = i.next();
+ BSONElement se = s.next();
+ if ( ie.eoo() ) {
+ if ( !se.eoo() )
+ return 0;
+ return direction;
}
+ if ( strcmp( ie.fieldName(), se.fieldName() ) != 0 )
+ return 0;
+
+ int d = ie.number() == se.number() ? 1 : -1;
+ if ( direction == 0 )
+ direction = d;
+ else if ( direction != d )
+ return 0;
}
}
- if ( !order.isEmpty() ) {
- // order by
- for (int i = 0; i < d->nIndexes; i++ ) {
- BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
- assert( strcmp(ns, idxInfo.getStringField("ns")) == 0 );
- BSONObj idxKey = idxInfo.getObjectField("key");
- int direction = matchDirection( idxKey, order );
- if ( direction != 0 ) {
- BSONObjBuilder b;
- DEV cout << " using index " << d->indexes[i].indexNamespace() << '\n';
- if ( isSorted )
- *isSorted = true;
-
- return auto_ptr<Cursor>(new BtreeCursor(d->indexes[i], BSONObj(), direction, query));
+ /* todo: _ cache query plans
+ _ use index on partial match with the query
+
+ parameters
+ query - the query, e.g., { name: 'joe' }
+ order - order by spec, e.g., { name: 1 } 1=ASC, -1=DESC
+ simpleKeyMatch - set to true if the query is purely for a single key value
+ unchanged otherwise.
+ */
+ auto_ptr<Cursor> getIndexCursor(const char *ns, BSONObj& query, BSONObj& order, bool *simpleKeyMatch = 0, bool *isSorted = 0, string *hint = 0) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) return auto_ptr<Cursor>();
+
+ if ( hint && !hint->empty() ) {
+ /* todo: more work needed. doesn't handle $lt & $gt for example.
+ waiting for query optimizer rewrite (see queryoptimizer.h) before finishing the work.
+ */
+ for (int i = 0; i < d->nIndexes; i++ ) {
+ IndexDetails& ii = d->indexes[i];
+ if ( ii.indexName() == *hint ) {
+ BSONObj startKey = ii.getKeyFromQuery(query);
+ int direction = 1;
+ if ( simpleKeyMatch )
+ *simpleKeyMatch = query.nFields() == startKey.nFields();
+ if ( isSorted ) *isSorted = false;
+ return auto_ptr<Cursor>(
+ new BtreeCursor(ii, startKey, direction, query));
+ }
}
}
- }
- // queryFields, e.g. { 'name' }
- set<string> queryFields;
- query.getFieldNames(queryFields);
-
- // regular query without order by
- for (int i = 0; i < d->nIndexes; i++ ) {
- BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
- BSONObj idxKey = idxInfo.getObjectField("key");
- set<string> keyFields;
- idxKey.getFieldNames(keyFields);
-
- // keyFields: e.g. { "name" }
- bool match = keyFields == queryFields;
- if ( 0 && !match && queryFields.size() > 1 && simpleKeyMatch == 0 && keyFields.size() == 1 ) {
- // TEMP
- string s = *(keyFields.begin());
- match = queryFields.count(s) == 1;
+ if ( !order.isEmpty() ) {
+ // order by
+ for (int i = 0; i < d->nIndexes; i++ ) {
+ BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
+ assert( strcmp(ns, idxInfo.getStringField("ns")) == 0 );
+ BSONObj idxKey = idxInfo.getObjectField("key");
+ int direction = matchDirection( idxKey, order );
+ if ( direction != 0 ) {
+ BSONObjBuilder b;
+ DEV out() << " using index " << d->indexes[i].indexNamespace() << '\n';
+ if ( isSorted )
+ *isSorted = true;
+
+ return auto_ptr<Cursor>(new BtreeCursor(d->indexes[i], BSONObj(), direction, query));
+ }
+ }
}
- if ( match ) {
- bool simple = true;
- //BSONObjBuilder b;
- BSONObj q = query.extractFieldsUnDotted(idxKey);
- assert(q.objsize() != 0); // guard against a seg fault if details is 0
- /* regexp: only supported if form is /^text/ */
- BSONObjBuilder b2;
- BSONObjIterator it(q);
- bool first = true;
- while ( it.more() ) {
- BSONElement e = it.next();
- if ( e.eoo() )
- break;
+ // queryFields, e.g. { 'name' }
+ set<string> queryFields;
+ query.getFieldNames(queryFields);
- // GT/LT
- if ( e.type() == Object ) {
- int op = getGtLtOp(e);
- if ( op ) {
- if ( !first || !it.next().eoo() ) {
- // compound keys with GT/LT not supported yet via index.
- goto fail;
- }
- if ( op >= JSMatcher::opIN ) {
- // $in does not use an index (at least yet, should when # of elems is tiny)
- // likewise $ne
- goto fail;
- }
+ // regular query without order by
+ for (int i = 0; i < d->nIndexes; i++ ) {
+ BSONObj idxInfo = d->indexes[i].info.obj(); // { name:, ns:, key: }
+ BSONObj idxKey = idxInfo.getObjectField("key");
+ set<string> keyFields;
+ idxKey.getFieldNames(keyFields);
+
+ // keyFields: e.g. { "name" }
+ bool match = keyFields == queryFields;
+ if ( 0 && !match && queryFields.size() > 1 && simpleKeyMatch == 0 && keyFields.size() == 1 ) {
+ // TEMP
+ string s = *(keyFields.begin());
+ match = queryFields.count(s) == 1;
+ }
- {
- BSONObjIterator k(e.embeddedObject());
- k.next();
- if ( !k.next().eoo() ) {
- /* compound query like { $lt : 9, $gt : 2 }
- for those our method below won't work.
- need more work on "stopOnMiss" in general -- may
- be issues with it. so fix this to use index after
- that is fixed.
- */
- OCCASIONALLY cout << "finish query optimizer for lt gt compound\n";
+ if ( match ) {
+ bool simple = true;
+ //BSONObjBuilder b;
+ BSONObj q = query.extractFieldsUnDotted(idxKey);
+ assert(q.objsize() != 0); // guard against a seg fault if details is 0
+ /* regexp: only supported if form is /^text/ */
+ BSONObjBuilder b2;
+ BSONObjIterator it(q);
+ bool first = true;
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( e.eoo() )
+ break;
+
+ // GT/LT
+ if ( e.type() == Object ) {
+ int op = getGtLtOp(e);
+ if ( op ) {
+ if ( !first || !it.next().eoo() ) {
+ // compound keys with GT/LT not supported yet via index.
goto fail;
}
- }
+ if ( op >= JSMatcher::opIN ) {
+ // $in does not use an index (at least yet, should when # of elems is tiny)
+ // likewise $ne
+ goto fail;
+ }
+
+ {
+ BSONObjIterator k(e.embeddedObject());
+ k.next();
+ if ( !k.next().eoo() ) {
+ /* compound query like { $lt : 9, $gt : 2 }
+ for those our method below won't work.
+ need more work on "stopOnMiss" in general -- may
+ be issues with it. so fix this to use index after
+ that is fixed.
+ */
+ OCCASIONALLY out() << "finish query optimizer for lt gt compound\n";
+ goto fail;
+ }
+ }
- int direction = - JSMatcher::opDirection(op);
- return auto_ptr<Cursor>( new BtreeCursor(
- d->indexes[i],
- BSONObj(),
- direction, query) );
+ int direction = - JSMatcher::opDirection(op);
+ return auto_ptr<Cursor>( new BtreeCursor(
+ d->indexes[i],
+ BSONObj(),
+ direction, query) );
+ }
}
- }
- first = false;
- if ( e.type() == RegEx ) {
- simple = false;
- if ( *e.regexFlags() )
- goto fail;
- const char *re = e.regex();
- const char *p = re;
- if ( *p++ != '^' ) goto fail;
- while ( *p ) {
- if ( *p == ' ' || (*p>='0'&&*p<='9') || (*p>='@'&&*p<='Z') || (*p>='a'&&*p<='z') )
- ;
- else
+ first = false;
+ if ( e.type() == RegEx ) {
+ simple = false;
+ if ( *e.regexFlags() )
goto fail;
- p++;
+ const char *re = e.regex();
+ const char *p = re;
+ if ( *p++ != '^' ) goto fail;
+ while ( *p ) {
+ if ( *p == ' ' || (*p>='0'&&*p<='9') || (*p>='@'&&*p<='Z') || (*p>='a'&&*p<='z') )
+ ;
+ else
+ goto fail;
+ p++;
+ }
+ if ( it.more() && !it.next().eoo() ) // we must be the last part of the key (for now until we are smarter)
+ goto fail;
+ // ok!
+ b2.append(e.fieldName(), re+1);
+ break;
+ }
+ else {
+ b2.append(e);
+ //appendElementHandlingGtLt(b2, e);
}
- if ( it.more() && !it.next().eoo() ) // we must be the last part of the key (for now until we are smarter)
- goto fail;
- // ok!
- b2.append(e.fieldName(), re+1);
- break;
- }
- else {
- b2.append(e);
- //appendElementHandlingGtLt(b2, e);
}
+ BSONObj q2 = b2.done();
+ DEV out() << "using index " << d->indexes[i].indexNamespace() << endl;
+ if ( simple && simpleKeyMatch ) *simpleKeyMatch = true;
+ return auto_ptr<Cursor>(
+ new BtreeCursor(d->indexes[i], q2, 1, query));
}
- BSONObj q2 = b2.done();
- DEV cout << "using index " << d->indexes[i].indexNamespace() << endl;
- if ( simple && simpleKeyMatch ) *simpleKeyMatch = true;
- return auto_ptr<Cursor>(
- new BtreeCursor(d->indexes[i], q2, 1, query));
}
- }
fail:
- DEV cout << "getIndexCursor fail " << ns << '\n';
- return auto_ptr<Cursor>();
-}
-
-/* ns: namespace, e.g. <database>.<collection>
- pattern: the "where" clause / criteria
- justOne: stop after 1 match
-*/
-int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god) {
- if ( strstr(ns, ".system.") && !god ) {
- /*if( strstr(ns, ".system.namespaces") ){
- cout << "info: delete on system namespace " << ns << '\n';
- }
- else if( strstr(ns, ".system.indexes") ) {
- cout << "info: delete on system namespace " << ns << '\n';
- }
- else*/ {
- cout << "ERROR: attempt to delete in system namespace " << ns << endl;
- return -1;
- }
- }
-
- int nDeleted = 0;
- BSONObj order;
- auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
- if ( c.get() == 0 )
- c = theDataFileMgr.findAll(ns);
- JSMatcher matcher(pattern, c->indexKeyPattern());
-
- while ( c->ok() ) {
- Record *r = c->_current();
- DiskLoc rloc = c->currLoc();
- BSONObj js(r);
-
- bool deep;
- if ( !matcher.matches(js, &deep) ) {
- c->advance(); // advance must be after noMoreMatches() because it uses currKey()
- }
- else {
- c->advance(); // must advance before deleting as the next ptr will die
- assert( !deep || !c->getsetdup(rloc) ); // can't be a dup, we deleted it!
- if ( !justOne )
- c->noteLocation();
-
- theDataFileMgr.deleteRecord(ns, r, rloc);
- nDeleted++;
- if ( justOne )
- break;
- c->checkLocation();
- }
+ DEV out() << "getIndexCursor fail " << ns << '\n';
+ return auto_ptr<Cursor>();
}
- return nDeleted;
-}
-
-struct Mod {
- enum Op { INC, SET } op;
- const char *fieldName;
- double *ndouble;
- int *nint;
- void setn(double n) {
- if ( ndouble ) *ndouble = n;
- else *nint = (int) n;
- }
- double getn() {
- return ndouble ? *ndouble : *nint;
- }
- int type;
- static void getMods(vector<Mod>& mods, BSONObj from);
- static void applyMods(vector<Mod>& mods, BSONObj obj);
-};
-
-void Mod::applyMods(vector<Mod>& mods, BSONObj obj) {
- for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
- Mod& m = *i;
- BSONElement e = obj.findElement(m.fieldName);
- if ( e.isNumber() ) {
- if ( m.op == INC ) {
- e.setNumber( e.number() + m.getn() );
- m.setn( e.number() );
- // *m.n = e.number() += *m.n;
- } else {
- e.setNumber( m.getn() ); // $set or $SET
+ /* ns: namespace, e.g. <database>.<collection>
+ pattern: the "where" clause / criteria
+ justOne: stop after 1 match
+ */
+ int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god) {
+ if ( strstr(ns, ".system.") && !god ) {
+ /*if( strstr(ns, ".system.namespaces") ){
+ out() << "info: delete on system namespace " << ns << '\n';
}
- }
- }
-}
-
-/* get special operations like $inc
- { $inc: { a:1, b:1 } }
- { $set: { a:77 } }
- NOTE: MODIFIES source from object!
-*/
-void Mod::getMods(vector<Mod>& mods, BSONObj from) {
- BSONObjIterator it(from);
- while ( it.more() ) {
- BSONElement e = it.next();
- const char *fn = e.fieldName();
- if ( *fn == '$' && e.type() == Object &&
- fn[4] == 0 ) {
- BSONObj j = e.embeddedObject();
- BSONObjIterator jt(j);
- Op op = Mod::SET;
- if ( strcmp("$inc",fn) == 0 ) {
- op = Mod::INC;
- // we rename to $SET instead of $set so that on an op like
- // { $set: {x:1}, $inc: {y:1} }
- // we don't get two "$set" fields which isn't allowed
- strcpy((char *) fn, "$SET");
+ else if( strstr(ns, ".system.indexes") ) {
+ out() << "info: delete on system namespace " << ns << '\n';
}
- while ( jt.more() ) {
- BSONElement f = jt.next();
- if ( f.eoo() )
- break;
- Mod m;
- m.op = op;
- m.fieldName = f.fieldName();
- if ( f.isNumber() ) {
- if ( f.type() == NumberDouble ) {
- m.ndouble = (double *) f.value();
- m.nint = 0;
- }
- else {
- m.ndouble = 0;
- m.nint = (int *) f.value();
- }
- mods.push_back( m );
- }
+ else*/ {
+ out() << "ERROR: attempt to delete in system namespace " << ns << endl;
+ return -1;
}
}
- }
-}
-
-/* todo:
- _ smart requery find record immediately
- returns:
- 2: we did applyMods() but didn't logOp()
- 5: we did applyMods() and did logOp() (so don't do it again)
- (clean these up later...)
-*/
-int _updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss, bool logop=false) {
- //cout << "TEMP BAD";
- //lrutest.find(updateobj);
-
- int profile = database->profile;
-
- // cout << "update ns:" << ns << " objsize:" << updateobj.objsize() << " queryobjsize:" <<
- // pattern.objsize();
-
- if ( strstr(ns, ".system.") ) {
- cout << "\nERROR: attempt to update in system namespace " << ns << endl;
- ss << " can't update system namespace ";
- return 0;
- }
- int nscanned = 0;
- {
+ int nDeleted = 0;
BSONObj order;
auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
if ( c.get() == 0 )
c = theDataFileMgr.findAll(ns);
JSMatcher matcher(pattern, c->indexKeyPattern());
+
while ( c->ok() ) {
Record *r = c->_current();
- nscanned++;
+ DiskLoc rloc = c->currLoc();
BSONObj js(r);
- if ( !matcher.matches(js) ) {
+
+ bool deep;
+ if ( !matcher.matches(js, &deep) ) {
+ c->advance(); // advance must be after noMoreMatches() because it uses currKey()
}
else {
- /* note: we only update one row and quit. if you do multiple later,
- be careful or multikeys in arrays could break things badly. best
- to only allow updating a single row with a multikey lookup.
- */
+ c->advance(); // must advance before deleting as the next ptr will die
+ assert( !deep || !c->getsetdup(rloc) ); // can't be a dup, we deleted it!
+ if ( !justOne )
+ c->noteLocation();
+
+ theDataFileMgr.deleteRecord(ns, r, rloc);
+ nDeleted++;
+ if ( justOne )
+ break;
+ c->checkLocation();
+ }
+ }
- if ( profile )
- ss << " nscanned:" << nscanned;
-
- /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
- regular ones at the moment. */
- const char *firstField = updateobj.firstElement().fieldName();
- if ( firstField[0] == '$' ) {
- vector<Mod> mods;
- Mod::getMods(mods, updateobj);
- NamespaceDetailsTransient& ndt = NamespaceDetailsTransient::get(ns);
- set<string>& idxKeys = ndt.indexKeys();
- for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
- if ( idxKeys.count(i->fieldName) ) {
- uassert("can't $inc/$set an indexed field", false);
+ return nDeleted;
+ }
+
+ struct Mod {
+ enum Op { INC, SET } op;
+ const char *fieldName;
+ double *ndouble;
+ int *nint;
+ void setn(double n) {
+ if ( ndouble ) *ndouble = n;
+ else *nint = (int) n;
+ }
+ double getn() {
+ return ndouble ? *ndouble : *nint;
+ }
+ int type;
+ static void getMods(vector<Mod>& mods, BSONObj from);
+ static void applyMods(vector<Mod>& mods, BSONObj obj);
+ };
+
+ void Mod::applyMods(vector<Mod>& mods, BSONObj obj) {
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
+ Mod& m = *i;
+ BSONElement e = obj.findElement(m.fieldName);
+ if ( e.isNumber() ) {
+ if ( m.op == INC ) {
+ e.setNumber( e.number() + m.getn() );
+ m.setn( e.number() );
+ // *m.n = e.number() += *m.n;
+ } else {
+ e.setNumber( m.getn() ); // $set or $SET
+ }
+ }
+ }
+ }
+
+ /* get special operations like $inc
+ { $inc: { a:1, b:1 } }
+ { $set: { a:77 } }
+ NOTE: MODIFIES source from object!
+ */
+ void Mod::getMods(vector<Mod>& mods, BSONObj from) {
+ BSONObjIterator it(from);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ const char *fn = e.fieldName();
+ if ( *fn == '$' && e.type() == Object &&
+ fn[4] == 0 ) {
+ BSONObj j = e.embeddedObject();
+ BSONObjIterator jt(j);
+ Op op = Mod::SET;
+ if ( strcmp("$inc",fn) == 0 ) {
+ op = Mod::INC;
+ // we rename to $SET instead of $set so that on an op like
+ // { $set: {x:1}, $inc: {y:1} }
+ // we don't get two "$set" fields which isn't allowed
+ strcpy((char *) fn, "$SET");
+ }
+ while ( jt.more() ) {
+ BSONElement f = jt.next();
+ if ( f.eoo() )
+ break;
+ Mod m;
+ m.op = op;
+ m.fieldName = f.fieldName();
+ if ( f.isNumber() ) {
+ if ( f.type() == NumberDouble ) {
+ m.ndouble = (double *) f.value();
+ m.nint = 0;
}
+ else {
+ m.ndouble = 0;
+ m.nint = (int *) f.value();
+ }
+ mods.push_back( m );
}
+ }
+ }
+ }
+ }
+
+ /* todo:
+ _ smart requery find record immediately
+ returns:
+ 2: we did applyMods() but didn't logOp()
+ 5: we did applyMods() and did logOp() (so don't do it again)
+ (clean these up later...)
+ */
+ int _updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss, bool logop=false) {
+ //out() << "TEMP BAD";
+ //lrutest.find(updateobj);
+
+ int profile = database->profile;
+
+ // out() << "update ns:" << ns << " objsize:" << updateobj.objsize() << " queryobjsize:" <<
+ // pattern.objsize();
+
+ if ( strstr(ns, ".system.") ) {
+ out() << "\nERROR: attempt to update in system namespace " << ns << endl;
+ ss << " can't update system namespace ";
+ return 0;
+ }
+
+ int nscanned = 0;
+ {
+ BSONObj order;
+ auto_ptr<Cursor> c = getIndexCursor(ns, pattern, order);
+ if ( c.get() == 0 )
+ c = theDataFileMgr.findAll(ns);
+ JSMatcher matcher(pattern, c->indexKeyPattern());
+ while ( c->ok() ) {
+ Record *r = c->_current();
+ nscanned++;
+ BSONObj js(r);
+ if ( !matcher.matches(js) ) {
+ }
+ else {
+ /* note: we only update one row and quit. if you do multiple later,
+ be careful or multikeys in arrays could break things badly. best
+ to only allow updating a single row with a multikey lookup.
+ */
- Mod::applyMods(mods, c->currLoc().obj());
if ( profile )
- ss << " fastmod ";
- if ( logop ) {
- if ( mods.size() ) {
- logOp("u", ns, updateobj, &pattern, &upsert);
- return 5;
+ ss << " nscanned:" << nscanned;
+
+ /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
+ regular ones at the moment. */
+ const char *firstField = updateobj.firstElement().fieldName();
+ if ( firstField[0] == '$' ) {
+ vector<Mod> mods;
+ Mod::getMods(mods, updateobj);
+ NamespaceDetailsTransient& ndt = NamespaceDetailsTransient::get(ns);
+ set<string>& idxKeys = ndt.indexKeys();
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ ) {
+ if ( idxKeys.count(i->fieldName) ) {
+ uassert("can't $inc/$set an indexed field", false);
+ }
}
+
+ Mod::applyMods(mods, c->currLoc().obj());
+ if ( profile )
+ ss << " fastmod ";
+ if ( logop ) {
+ if ( mods.size() ) {
+ logOp("u", ns, updateobj, &pattern, &upsert);
+ return 5;
+ }
+ }
+ return 2;
}
- return 2;
- }
- theDataFileMgr.update(ns, r, c->currLoc(), updateobj.objdata(), updateobj.objsize(), ss);
- return 1;
+ theDataFileMgr.update(ns, r, c->currLoc(), updateobj.objdata(), updateobj.objsize(), ss);
+ return 1;
+ }
+ c->advance();
}
- c->advance();
}
- }
- if ( profile )
- ss << " nscanned:" << nscanned;
-
- if ( upsert ) {
- if ( updateobj.firstElement().fieldName()[0] == '$' ) {
- /* upsert of an $inc. build a default */
- vector<Mod> mods;
- Mod::getMods(mods, updateobj);
- BSONObjBuilder b;
- b.appendElements(pattern);
- for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ )
- b.append(i->fieldName, i->getn());
- BSONObj obj = b.done();
- theDataFileMgr.insert(ns, (void*) obj.objdata(), obj.objsize());
+ if ( profile )
+ ss << " nscanned:" << nscanned;
+
+ if ( upsert ) {
+ if ( updateobj.firstElement().fieldName()[0] == '$' ) {
+ /* upsert of an $inc. build a default */
+ vector<Mod> mods;
+ Mod::getMods(mods, updateobj);
+ BSONObjBuilder b;
+ b.appendElements(pattern);
+ for ( vector<Mod>::iterator i = mods.begin(); i != mods.end(); i++ )
+ b.append(i->fieldName, i->getn());
+ BSONObj obj = b.done();
+ theDataFileMgr.insert(ns, (void*) obj.objdata(), obj.objsize());
+ if ( profile )
+ ss << " fastmodinsert ";
+ return 3;
+ }
if ( profile )
- ss << " fastmodinsert ";
- return 3;
+ ss << " upsert ";
+ theDataFileMgr.insert(ns, (void*) updateobj.objdata(), updateobj.objsize());
+ return 4;
}
- if ( profile )
- ss << " upsert ";
- theDataFileMgr.insert(ns, (void*) updateobj.objdata(), updateobj.objsize());
- return 4;
+ return 0;
+ }
+ /* todo: we can optimize replication by just doing insert when an upsert triggers.
+ */
+ void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss) {
+ int rc = _updateObjects(ns, updateobj, pattern, upsert, ss, true);
+ if ( rc != 5 )
+ logOp("u", ns, updateobj, &pattern, &upsert);
}
- return 0;
-}
-/* todo: we can optimize replication by just doing insert when an upsert triggers.
-*/
-void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss) {
- int rc = _updateObjects(ns, updateobj, pattern, upsert, ss, true);
- if ( rc != 5 )
- logOp("u", ns, updateobj, &pattern, &upsert);
-}
-int queryTraceLevel = 0;
-int otherTraceLevel = 0;
+ int queryTraceLevel = 0;
+ int otherTraceLevel = 0;
-int initialExtentSize(int len);
+ int initialExtentSize(int len);
-bool _runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl);
+ bool _runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl);
-bool runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
- try {
- return _runCommands(ns, jsobj, ss, b, anObjBuilder, fromRepl);
- }
- catch ( AssertionException e ) {
- if ( !e.msg.empty() )
- anObjBuilder.append("assertion", e.msg);
- }
- ss << " assertion ";
- anObjBuilder.append("errmsg", "db assertion failure");
- anObjBuilder.append("ok", 0.0);
- BSONObj x = anObjBuilder.done();
- b.append((void*) x.objdata(), x.objsize());
- return true;
-}
-
-int nCaught = 0;
-
-void killCursors(int n, long long *ids) {
- int k = 0;
- for ( int i = 0; i < n; i++ ) {
- if ( ClientCursor::erase(ids[i]) )
- k++;
+ bool runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl) {
+ try {
+ return _runCommands(ns, jsobj, ss, b, anObjBuilder, fromRepl);
+ }
+ catch ( AssertionException e ) {
+ if ( !e.msg.empty() )
+ anObjBuilder.append("assertion", e.msg);
+ }
+ ss << " assertion ";
+ anObjBuilder.append("errmsg", "db assertion failure");
+ anObjBuilder.append("ok", 0.0);
+ BSONObj x = anObjBuilder.done();
+ b.append((void*) x.objdata(), x.objsize());
+ return true;
}
- log() << "killCursors: found " << k << " of " << n << '\n';
-}
-BSONObj id_obj = fromjson("{\"_id\":ObjectId( \"000000000000000000000000\" )}");
-BSONObj empty_obj = fromjson("{}");
+ int nCaught = 0;
-/* { count: "collectionname"[, query: <query>] }
- returns -1 on error.
-*/
-int runCount(const char *ns, BSONObj& cmd, string& err) {
- NamespaceDetails *d = nsdetails(ns);
- if ( d == 0 ) {
- err = "ns does not exist";
- return -1;
+ void killCursors(int n, long long *ids) {
+ int k = 0;
+ for ( int i = 0; i < n; i++ ) {
+ if ( ClientCursor::erase(ids[i]) )
+ k++;
+ }
+ log() << "killCursors: found " << k << " of " << n << '\n';
}
- BSONObj query = cmd.getObjectField("query");
+ BSONObj id_obj = fromjson("{\"_id\":ObjectId( \"000000000000000000000000\" )}");
+ BSONObj empty_obj = fromjson("{}");
- if ( query.isEmpty() ) {
- // count of all objects
- return (int) d->nrecords;
- }
+ /* { count: "collectionname"[, query: <query>] }
+ returns -1 on error.
+ */
+ int runCount(const char *ns, BSONObj& cmd, string& err) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) {
+ err = "ns does not exist";
+ return -1;
+ }
- auto_ptr<Cursor> c;
+ BSONObj query = cmd.getObjectField("query");
- bool simpleKeyToMatch = false;
- c = getIndexCursor(ns, query, empty_obj, &simpleKeyToMatch);
+ if ( query.isEmpty() ) {
+ // count of all objects
+ return (int) d->nrecords;
+ }
- if ( c.get() ) {
- if ( simpleKeyToMatch ) {
- /* Here we only look at the btree keys to determine if a match, instead of looking
- into the records, which would be much slower.
- */
- int count = 0;
- BtreeCursor *bc = dynamic_cast<BtreeCursor *>(c.get());
- if ( c->ok() && !query.woCompare( bc->currKeyNode().key, BSONObj(), false ) ) {
- BSONObj firstMatch = bc->currKeyNode().key;
- count++;
- while ( c->advance() ) {
- if ( !firstMatch.woEqual( bc->currKeyNode().key ) )
- break;
+ auto_ptr<Cursor> c;
+
+ bool simpleKeyToMatch = false;
+ c = getIndexCursor(ns, query, empty_obj, &simpleKeyToMatch);
+
+ if ( c.get() ) {
+ if ( simpleKeyToMatch ) {
+ /* Here we only look at the btree keys to determine if a match, instead of looking
+ into the records, which would be much slower.
+ */
+ int count = 0;
+ BtreeCursor *bc = dynamic_cast<BtreeCursor *>(c.get());
+ if ( c->ok() && !query.woCompare( bc->currKeyNode().key, BSONObj(), false ) ) {
+ BSONObj firstMatch = bc->currKeyNode().key;
count++;
+ while ( c->advance() ) {
+ if ( !firstMatch.woEqual( bc->currKeyNode().key ) )
+ break;
+ count++;
+ }
}
+ return count;
}
- return count;
+ } else {
+ c = findTableScan(ns, empty_obj);
}
- } else {
- c = findTableScan(ns, empty_obj);
- }
- int count = 0;
- auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
- while ( c->ok() ) {
- BSONObj js = c->current();
- bool deep;
- if ( !matcher->matches(js, &deep) ) {
- }
- else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
- // got a match.
- count++;
+ int count = 0;
+ auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ bool deep;
+ if ( !matcher->matches(js, &deep) ) {
+ }
+ else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
+ // got a match.
+ count++;
+ }
+ c->advance();
}
- c->advance();
+ return count;
}
- return count;
-}
-/* This is for languages whose "objects" are not well ordered (JSON is well ordered).
- [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
-*/
-inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
- /* note: this is slow, but that is ok as order will have very few pieces */
- BSONObjBuilder b;
- char p[2] = "0";
-
- while ( 1 ) {
- BSONObj j = order.getObjectField(p);
- if ( j.isEmpty() )
- break;
- BSONElement e = j.firstElement();
- uassert("bad order array", !e.eoo());
- uassert("bad order array [2]", e.isNumber());
- b.append(e);
- (*p)++;
- uassert("too many ordering elements", *p <= '9');
- }
+ /* This is for languages whose "objects" are not well ordered (JSON is well ordered).
+ [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ */
+ inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
+ /* note: this is slow, but that is ok as order will have very few pieces */
+ BSONObjBuilder b;
+ char p[2] = "0";
- return b.doneAndDecouple();
-}
-
-QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoreturn, BSONObj jsobj,
- auto_ptr< set<string> > filter, stringstream& ss, int queryOptions)
-{
- Timer t;
- int nscanned = 0;
- bool wantMore = true;
- int ntoreturn = _ntoreturn;
- if ( _ntoreturn < 0 ) {
- ntoreturn = -_ntoreturn;
- wantMore = false;
- }
- ss << "query " << ns << " ntoreturn:" << ntoreturn;
+ while ( 1 ) {
+ BSONObj j = order.getObjectField(p);
+ if ( j.isEmpty() )
+ break;
+ BSONElement e = j.firstElement();
+ uassert("bad order array", !e.eoo());
+ uassert("bad order array [2]", e.isNumber());
+ b.append(e);
+ (*p)++;
+ uassert("too many ordering elements", *p <= '9');
+ }
- int n = 0;
- BufBuilder b(32768);
- BSONObjBuilder cmdResBuf;
- long long cursorid = 0;
+ return b.doneAndDecouple();
+ }
- b.skip(sizeof(QueryResult));
+ QueryResult* runQuery(Message& message, const char *ns, int ntoskip, int _ntoreturn, BSONObj jsobj,
+ auto_ptr< set<string> > filter, stringstream& ss, int queryOptions)
+ {
+ Timer t;
+ int nscanned = 0;
+ bool wantMore = true;
+ int ntoreturn = _ntoreturn;
+ if ( _ntoreturn < 0 ) {
+ ntoreturn = -_ntoreturn;
+ wantMore = false;
+ }
+ ss << "query " << ns << " ntoreturn:" << ntoreturn;
- /* we assume you are using findOne() for running a cmd... */
- if ( ntoreturn == 1 && runCommands(ns, jsobj, ss, b, cmdResBuf, false) ) {
- n = 1;
- }
- else {
+ int n = 0;
+ BufBuilder b(32768);
+ BSONObjBuilder cmdResBuf;
+ long long cursorid = 0;
- uassert("not master", isMaster() || (queryOptions & Option_SlaveOk));
+ b.skip(sizeof(QueryResult));
- string hint;
- bool explain = false;
- bool _gotquery = false;
- BSONObj query;// = jsobj.getObjectField("query");
- {
- BSONElement e = jsobj.findElement("query");
- if ( !e.eoo() && (e.type() == Object || e.type() == Array) ) {
- query = e.embeddedObject();
- _gotquery = true;
- }
- }
- BSONObj order;
- {
- BSONElement e = jsobj.findElement("orderby");
- if ( !e.eoo() ) {
- order = e.embeddedObjectUserCheck();
- if ( e.type() == Array )
- order = transformOrderFromArrayFormat(order);
- }
+ /* we assume you are using findOne() for running a cmd... */
+ if ( ntoreturn == 1 && runCommands(ns, jsobj, ss, b, cmdResBuf, false) ) {
+ n = 1;
}
- if ( !_gotquery && order.isEmpty() )
- query = jsobj;
else {
- explain = jsobj.getBoolField("$explain");
- hint = jsobj.getStringField("$hint");
- }
-
- /* The ElemIter will not be happy if this isn't really an object. So throw exception
- here when that is true.
- (Which may indicate bad data from appserver?)
- */
- if ( query.objsize() == 0 ) {
- cout << "Bad query object?\n jsobj:";
- cout << jsobj.toString() << "\n query:";
- cout << query.toString() << endl;
- uassert("bad query object", false);
- }
-
- bool isSorted = false;
- auto_ptr<Cursor> c = getSpecialCursor(ns);
- if ( c.get() == 0 )
- c = getIndexCursor(ns, query, order, 0, &isSorted, &hint);
- if ( c.get() == 0 )
- c = findTableScan(ns, order, &isSorted);
+ uassert("not master", isMaster() || (queryOptions & Option_SlaveOk));
+
+ string hint;
+ bool explain = false;
+ bool _gotquery = false;
+ BSONObj query;// = jsobj.getObjectField("query");
+ {
+ BSONElement e = jsobj.findElement("query");
+ if ( !e.eoo() && (e.type() == Object || e.type() == Array) ) {
+ query = e.embeddedObject();
+ _gotquery = true;
+ }
+ }
+ BSONObj order;
+ {
+ BSONElement e = jsobj.findElement("orderby");
+ if ( !e.eoo() ) {
+ order = e.embeddedObjectUserCheck();
+ if ( e.type() == Array )
+ order = transformOrderFromArrayFormat(order);
+ }
+ }
+ if ( !_gotquery && order.isEmpty() )
+ query = jsobj;
+ else {
+ explain = jsobj.getBoolField("$explain");
+ hint = jsobj.getStringField("$hint");
+ }
- auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
- JSMatcher &debug1 = *matcher;
- assert( debug1.getN() < 1000 );
-
- auto_ptr<ScanAndOrder> so;
- bool ordering = false;
- if ( !order.isEmpty() && !isSorted ) {
- ordering = true;
- ss << " scanAndOrder ";
- so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoskip, ntoreturn,order));
- wantMore = false;
- // scanAndOrder(b, c.get(), order, ntoreturn);
- }
+ /* The ElemIter will not be happy if this isn't really an object. So throw exception
+ here when that is true.
+ (Which may indicate bad data from appserver?)
+ */
+ if ( query.objsize() == 0 ) {
+ out() << "Bad query object?\n jsobj:";
+ out() << jsobj.toString() << "\n query:";
+ out() << query.toString() << endl;
+ uassert("bad query object", false);
+ }
- while ( c->ok() ) {
- BSONObj js = c->current();
- //if( queryTraceLevel >= 50 )
- // cout << " checking against:\n " << js.toString() << endl;
- nscanned++;
- bool deep;
- if ( !matcher->matches(js, &deep) ) {
+ bool isSorted = false;
+ auto_ptr<Cursor> c = getSpecialCursor(ns);
+
+ if ( c.get() == 0 )
+ c = getIndexCursor(ns, query, order, 0, &isSorted, &hint);
+ if ( c.get() == 0 )
+ c = findTableScan(ns, order, &isSorted);
+
+ auto_ptr<JSMatcher> matcher(new JSMatcher(query, c->indexKeyPattern()));
+ JSMatcher &debug1 = *matcher;
+ assert( debug1.getN() < 1000 );
+
+ auto_ptr<ScanAndOrder> so;
+ bool ordering = false;
+ if ( !order.isEmpty() && !isSorted ) {
+ ordering = true;
+ ss << " scanAndOrder ";
+ so = auto_ptr<ScanAndOrder>(new ScanAndOrder(ntoskip, ntoreturn,order));
+ wantMore = false;
+ // scanAndOrder(b, c.get(), order, ntoreturn);
}
- else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
- // got a match.
- assert( js.objsize() >= 0 ); //defensive for segfaults
- if ( ordering ) {
- // note: no cursors for non-indexed, ordered results. results must be fairly small.
- so->add(js);
+
+ while ( c->ok() ) {
+ BSONObj js = c->current();
+ //if( queryTraceLevel >= 50 )
+ // out() << " checking against:\n " << js.toString() << endl;
+ nscanned++;
+ bool deep;
+ if ( !matcher->matches(js, &deep) ) {
}
- else if ( ntoskip > 0 ) {
- ntoskip--;
- } else {
- if ( explain ) {
- n++;
- if ( n >= ntoreturn && !wantMore )
- break; // .limit() was used, show just that much.
+ else if ( !deep || !c->getsetdup(c->currLoc()) ) { // i.e., check for dups on deep items only
+ // got a match.
+ assert( js.objsize() >= 0 ); //defensive for segfaults
+ if ( ordering ) {
+ // note: no cursors for non-indexed, ordered results. results must be fairly small.
+ so->add(js);
}
- else {
- bool ok = fillQueryResultFromObj(b, filter.get(), js);
- if ( ok ) n++;
- if ( ok ) {
- if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
- /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
- is only a size limit. The idea is that on a find() where one doesn't use much results,
- we don't return much, but once getmore kicks in, we start pushing significant quantities.
-
- The n limit (vs. size) is important when someone fetches only one small field from big
- objects, which causes massive scanning server-side.
- */
- /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
- if ( wantMore && ntoreturn != 1 ) {
- if ( useCursors ) {
- c->advance();
- if ( c->ok() ) {
- // more...so save a cursor
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cursorid = cc->cursorid;
- DEV cout << " query has more, cursorid: " << cursorid << endl;
- //cc->pattern = query;
- cc->matcher = matcher;
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = message;
- cc->updateLocation();
+ else if ( ntoskip > 0 ) {
+ ntoskip--;
+ } else {
+ if ( explain ) {
+ n++;
+ if ( n >= ntoreturn && !wantMore )
+ break; // .limit() was used, show just that much.
+ }
+ else {
+ bool ok = fillQueryResultFromObj(b, filter.get(), js);
+ if ( ok ) n++;
+ if ( ok ) {
+ if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
+ (ntoreturn==0 && (b.len()>1*1024*1024 || n>=101)) ) {
+ /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
+ is only a size limit. The idea is that on a find() where one doesn't use much results,
+ we don't return much, but once getmore kicks in, we start pushing significant quantities.
+
+ The n limit (vs. size) is important when someone fetches only one small field from big
+ objects, which causes massive scanning server-side.
+ */
+ /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
+ if ( wantMore && ntoreturn != 1 ) {
+ if ( useCursors ) {
+ c->advance();
+ if ( c->ok() ) {
+ // more...so save a cursor
+ ClientCursor *cc = new ClientCursor();
+ cc->c = c;
+ cursorid = cc->cursorid;
+ DEV out() << " query has more, cursorid: " << cursorid << endl;
+ //cc->pattern = query;
+ cc->matcher = matcher;
+ cc->ns = ns;
+ cc->pos = n;
+ cc->filter = filter;
+ cc->originalMessage = message;
+ cc->updateLocation();
+ }
}
}
+ break;
}
- break;
}
}
}
}
+ c->advance();
+ } // end while
+
+ if ( explain ) {
+ BSONObjBuilder builder;
+ builder.append("cursor", c->toString());
+ builder.append("nscanned", nscanned);
+ builder.append("n", ordering ? so->size() : n);
+ if ( ordering )
+ builder.append("scanAndOrder", true);
+ builder.append("millis", t.millis());
+ BSONObj obj = builder.done();
+ fillQueryResultFromObj(b, 0, obj);
+ n = 1;
+ } else if ( ordering ) {
+ so->fill(b, filter.get(), n);
+ }
+ else if ( cursorid == 0 && (queryOptions & Option_CursorTailable) && c->tailable() ) {
+ c->setAtTail();
+ ClientCursor *cc = new ClientCursor();
+ cc->c = c;
+ cursorid = cc->cursorid;
+ DEV out() << " query has no more but tailable, cursorid: " << cursorid << endl;
+ //cc->pattern = query;
+ cc->matcher = matcher;
+ cc->ns = ns;
+ cc->pos = n;
+ cc->filter = filter;
+ cc->originalMessage = message;
+ cc->updateLocation();
}
- c->advance();
- } // end while
-
- if ( explain ) {
- BSONObjBuilder builder;
- builder.append("cursor", c->toString());
- builder.append("nscanned", nscanned);
- builder.append("n", ordering ? so->size() : n);
- if ( ordering )
- builder.append("scanAndOrder", true);
- builder.append("millis", t.millis());
- BSONObj obj = builder.done();
- fillQueryResultFromObj(b, 0, obj);
- n = 1;
- } else if ( ordering ) {
- so->fill(b, filter.get(), n);
- }
- else if ( cursorid == 0 && (queryOptions & Option_CursorTailable) && c->tailable() ) {
- c->setAtTail();
- ClientCursor *cc = new ClientCursor();
- cc->c = c;
- cursorid = cc->cursorid;
- DEV cout << " query has no more but tailable, cursorid: " << cursorid << endl;
- //cc->pattern = query;
- cc->matcher = matcher;
- cc->ns = ns;
- cc->pos = n;
- cc->filter = filter;
- cc->originalMessage = message;
- cc->updateLocation();
}
- }
- QueryResult *qr = (QueryResult *) b.buf();
- qr->resultFlags() = 0;
- qr->len = b.len();
- ss << " reslen:" << b.len();
- // qr->channel = 0;
- qr->setOperation(opReply);
- qr->cursorId = cursorid;
- qr->startingFrom = 0;
- qr->nReturned = n;
- b.decouple();
-
- int duration = t.millis();
- if ( (database && database->profile) || duration >= 100 ) {
- ss << " nscanned:" << nscanned << ' ';
- if ( ntoskip )
- ss << " ntoskip:" << ntoskip;
- if ( database && database->profile )
- ss << " <br>query: ";
- ss << jsobj.toString() << ' ';
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->resultFlags() = 0;
+ qr->len = b.len();
+ ss << " reslen:" << b.len();
+ // qr->channel = 0;
+ qr->setOperation(opReply);
+ qr->cursorId = cursorid;
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+ b.decouple();
+
+ int duration = t.millis();
+ if ( (database && database->profile) || duration >= 100 ) {
+ ss << " nscanned:" << nscanned << ' ';
+ if ( ntoskip )
+ ss << " ntoskip:" << ntoskip;
+ if ( database && database->profile )
+ ss << " <br>query: ";
+ ss << jsobj.toString() << ' ';
+ }
+ ss << " nreturned:" << n;
+ return qr;
}
- ss << " nreturned:" << n;
- return qr;
-}
//int dump = 0;
-/* empty result for error conditions */
-QueryResult* emptyMoreResult(long long cursorid) {
- BufBuilder b(32768);
- b.skip(sizeof(QueryResult));
- QueryResult *qr = (QueryResult *) b.buf();
- qr->cursorId = 0; // 0 indicates no more data to retrieve.
- qr->startingFrom = 0;
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->nReturned = 0;
- b.decouple();
- return qr;
-}
-
-QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid) {
- BufBuilder b(32768);
-
- ClientCursor *cc = ClientCursor::find(cursorid);
-
- b.skip(sizeof(QueryResult));
-
- int resultFlags = 0;
- int start = 0;
- int n = 0;
-
- if ( !cc ) {
- DEV log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
- cursorid = 0;
- resultFlags = QueryResult::ResultFlag_CursorNotFound;
+ /* empty result for error conditions */
+ QueryResult* emptyMoreResult(long long cursorid) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->cursorId = 0; // 0 indicates no more data to retrieve.
+ qr->startingFrom = 0;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->nReturned = 0;
+ b.decouple();
+ return qr;
}
- else {
- start = cc->pos;
- Cursor *c = cc->c.get();
- c->checkLocation();
- c->tailResume();
- while ( 1 ) {
- if ( !c->ok() ) {
- if ( c->tailing() ) {
- c->setAtTail();
+
+ QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid) {
+ BufBuilder b(32768);
+
+ ClientCursor *cc = ClientCursor::find(cursorid);
+
+ b.skip(sizeof(QueryResult));
+
+ int resultFlags = 0;
+ int start = 0;
+ int n = 0;
+
+ if ( !cc ) {
+ DEV log() << "getMore: cursorid not found " << ns << " " << cursorid << endl;
+ cursorid = 0;
+ resultFlags = QueryResult::ResultFlag_CursorNotFound;
+ }
+ else {
+ start = cc->pos;
+ Cursor *c = cc->c.get();
+ c->checkLocation();
+ c->tailResume();
+ while ( 1 ) {
+ if ( !c->ok() ) {
+ if ( c->tailing() ) {
+ c->setAtTail();
+ break;
+ }
+ DEV log() << " getmore: last batch, erasing cursor " << cursorid << endl;
+ bool ok = ClientCursor::erase(cursorid);
+ assert(ok);
+ cursorid = 0;
+ cc = 0;
break;
}
- DEV log() << " getmore: last batch, erasing cursor " << cursorid << endl;
- bool ok = ClientCursor::erase(cursorid);
- assert(ok);
- cursorid = 0;
- cc = 0;
- break;
- }
- BSONObj js = c->current();
+ BSONObj js = c->current();
- bool deep;
- if ( !cc->matcher->matches(js, &deep) ) {
- }
- else {
- //cout << "matches " << c->currLoc().toString() << ' ' << deep << '\n';
- if ( deep && c->getsetdup(c->currLoc()) ) {
- //cout << " but it's a dup \n";
+ bool deep;
+ if ( !cc->matcher->matches(js, &deep) ) {
}
else {
- bool ok = fillQueryResultFromObj(b, cc->filter.get(), js);
- if ( ok ) {
- n++;
- if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
- (ntoreturn==0 && b.len()>1*1024*1024) ) {
- c->advance();
- if ( c->tailing() && !c->ok() )
- c->setAtTail();
- cc->pos += n;
- //cc->updateLocation();
- break;
+ //out() << "matches " << c->currLoc().toString() << ' ' << deep << '\n';
+ if ( deep && c->getsetdup(c->currLoc()) ) {
+ //out() << " but it's a dup \n";
+ }
+ else {
+ bool ok = fillQueryResultFromObj(b, cc->filter.get(), js);
+ if ( ok ) {
+ n++;
+ if ( (ntoreturn>0 && (n >= ntoreturn || b.len() > MaxBytesToReturnToClientAtOnce)) ||
+ (ntoreturn==0 && b.len()>1*1024*1024) ) {
+ c->advance();
+ if ( c->tailing() && !c->ok() )
+ c->setAtTail();
+ cc->pos += n;
+ //cc->updateLocation();
+ break;
+ }
}
}
}
+ c->advance();
}
- c->advance();
+ if ( cc )
+ cc->updateLocation();
}
- if ( cc )
- cc->updateLocation();
- }
- QueryResult *qr = (QueryResult *) b.buf();
- qr->len = b.len();
- qr->setOperation(opReply);
- qr->resultFlags() = resultFlags;
- qr->cursorId = cursorid;
- qr->startingFrom = start;
- qr->nReturned = n;
- b.decouple();
-
- return qr;
-}
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->resultFlags() = resultFlags;
+ qr->cursorId = cursorid;
+ qr->startingFrom = start;
+ qr->nReturned = n;
+ b.decouple();
+
+ return qr;
+ }
} // namespace mongo
diff --git a/db/query.h b/db/query.h
index 4a121b8c1d7..2c8ea78b2ab 100644
--- a/db/query.h
+++ b/db/query.h
@@ -69,16 +69,16 @@
namespace mongo {
// for an existing query (ie a ClientCursor), send back additional information.
-QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid);
+ QueryResult* getMore(const char *ns, int ntoreturn, long long cursorid);
// caller must free() returned QueryResult.
-QueryResult* runQuery(Message&, const char *ns, int ntoskip, int ntoreturn,
- BSONObj j, auto_ptr< set<string> > fieldFilter,
- stringstream&, int queryOptions);
+ QueryResult* runQuery(Message&, const char *ns, int ntoskip, int ntoreturn,
+ BSONObj j, auto_ptr< set<string> > fieldFilter,
+ stringstream&, int queryOptions);
-void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss);
+ void updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss);
-int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god=false);
+ int deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool god=false);
} // namespace mongo
diff --git a/db/queryoptimizer.cpp b/db/queryoptimizer.cpp
index 9393796c29b..456afe92bc8 100644
--- a/db/queryoptimizer.cpp
+++ b/db/queryoptimizer.cpp
@@ -32,17 +32,17 @@
namespace mongo {
-QueryPlan QueryOptimizer::getPlan(
- const char *ns,
- BSONObj* query,
- BSONObj* order,
- BSONObj* hint)
-{
- QueryPlan plan;
+ QueryPlan QueryOptimizer::getPlan(
+ const char *ns,
+ BSONObj* query,
+ BSONObj* order,
+ BSONObj* hint)
+ {
+ QueryPlan plan;
- return plan;
-}
+ return plan;
+ }
} // namespace mongo
diff --git a/db/queryoptimizer.h b/db/queryoptimizer.h
index c00b00bea72..a1af76e1718 100644
--- a/db/queryoptimizer.h
+++ b/db/queryoptimizer.h
@@ -20,34 +20,34 @@
namespace mongo {
-class QueryPlan {
-public:
- QueryPlan() {
- scanAndOrderRequired = false;
- simpleKeyMatch = false;
- }
-
- auto_ptr<Cursor> cursor;
-
- /* ScanAndOrder processing will be required if true */
- bool scanAndOrderRequired;
-
- /* When true, the index we are using has keys such that it can completely resolve the
- query expression to match by itself without ever checking the main object.
- */
- bool simpleKeyMatch;
-};
-
-/* We put these objects inside the Database objects: that way later if we want to do
- stats, it's in the right place.
-*/
-class QueryOptimizer {
-public:
- QueryPlan getPlan(
- const char *ns,
- BSONObj* query,
- BSONObj* order = 0,
- BSONObj* hint = 0);
-};
+ class QueryPlan {
+ public:
+ QueryPlan() {
+ scanAndOrderRequired = false;
+ simpleKeyMatch = false;
+ }
+
+ auto_ptr<Cursor> cursor;
+
+ /* ScanAndOrder processing will be required if true */
+ bool scanAndOrderRequired;
+
+ /* When true, the index we are using has keys such that it can completely resolve the
+ query expression to match by itself without ever checking the main object.
+ */
+ bool simpleKeyMatch;
+ };
+
+ /* We put these objects inside the Database objects: that way later if we want to do
+ stats, it's in the right place.
+ */
+ class QueryOptimizer {
+ public:
+ QueryPlan getPlan(
+ const char *ns,
+ BSONObj* query,
+ BSONObj* order = 0,
+ BSONObj* hint = 0);
+ };
} // namespace mongo
diff --git a/db/repl.cpp b/db/repl.cpp
index c516335547b..9a649d812de 100644
--- a/db/repl.cpp
+++ b/db/repl.cpp
@@ -47,25 +47,25 @@
namespace mongo {
-extern bool quiet;
-extern boost::mutex dbMutex;
-extern long long oplogSize;
-int _updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss, bool logOp=false);
-bool _runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl);
-void ensureHaveIdIndex(const char *ns);
-
-/* if 1 sync() is running */
-int syncing = 0;
-
-/* if true replace our peer in a replication pair -- don't worry about if his
- local.oplog.$main is empty.
-*/
-bool replacePeer = false;
+ extern bool quiet;
+ extern boost::mutex dbMutex;
+ extern long long oplogSize;
+ int _updateObjects(const char *ns, BSONObj updateobj, BSONObj pattern, bool upsert, stringstream& ss, bool logOp=false);
+ bool _runCommands(const char *ns, BSONObj& jsobj, stringstream& ss, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl);
+ void ensureHaveIdIndex(const char *ns);
+
+ /* if 1 sync() is running */
+ int syncing = 0;
+
+ /* if true replace our peer in a replication pair -- don't worry about if his
+ local.oplog.$main is empty.
+ */
+ bool replacePeer = false;
-/* "dead" means something really bad happened like replication falling completely out of sync.
- when non-null, we are dead and the string is informational
-*/
-const char *allDead = 0;
+ /* "dead" means something really bad happened like replication falling completely out of sync.
+ when non-null, we are dead and the string is informational
+ */
+ const char *allDead = 0;
} // namespace mongo
@@ -76,1120 +76,1120 @@ namespace mongo {
#define debugrepl(z) log() << "debugrepl " << z << '\n'
//define debugrepl
-PairSync *pairSync = new PairSync();
-bool getInitialSyncCompleted() {
- return pairSync->initialSyncCompleted();
-}
+ PairSync *pairSync = new PairSync();
+ bool getInitialSyncCompleted() {
+ return pairSync->initialSyncCompleted();
+ }
-/* --- ReplPair -------------------------------- */
+ /* --- ReplPair -------------------------------- */
-ReplPair *replPair = 0;
+ ReplPair *replPair = 0;
-/* output by the web console */
-const char *replInfo = "";
-struct ReplInfo {
- ReplInfo(const char *msg) {
- replInfo = msg;
- }
- ~ReplInfo() {
- replInfo = "?";
- }
-};
-
-void ReplPair::setMaster(int n, const char *_comment ) {
- if ( n == State_Master && !getInitialSyncCompleted() )
- return;
- info = _comment;
- if ( n != state && !quiet )
- log() << "pair: setting master=" << n << " was " << state << '\n';
- state = n;
-}
-
-/* peer unreachable, try our arbiter */
-void ReplPair::arbitrate() {
- ReplInfo r("arbitrate");
-
- if ( arbHost == "-" ) {
- // no arbiter. we are up, let's assume he is down and network is not partitioned.
- setMasterLocked(State_Master, "remote unreachable");
- return;
- }
+ /* output by the web console */
+ const char *replInfo = "";
+ struct ReplInfo {
+ ReplInfo(const char *msg) {
+ replInfo = msg;
+ }
+ ~ReplInfo() {
+ replInfo = "?";
+ }
+ };
- auto_ptr<DBClientConnection> conn( newClientConnection() );
- string errmsg;
- if ( !conn->connect(arbHost.c_str(), errmsg) ) {
- setMasterLocked(State_CantArb, "can't connect to arb");
- return;
+ void ReplPair::setMaster(int n, const char *_comment ) {
+ if ( n == State_Master && !getInitialSyncCompleted() )
+ return;
+ info = _comment;
+ if ( n != state && !quiet )
+ log() << "pair: setting master=" << n << " was " << state << '\n';
+ state = n;
}
- /* todo: make an arbitrate command we send to the arbiter instead of this */
- bool is_master;
- bool ok = conn->isMaster(is_master);
- if ( !ok ) {
- setMasterLocked(State_CantArb, "can't arb 2");
- return;
- }
+ /* peer unreachable, try our arbiter */
+ void ReplPair::arbitrate() {
+ ReplInfo r("arbitrate");
- setMasterLocked(State_Master, "remote down, arbiter reached");
-}
+ if ( arbHost == "-" ) {
+ // no arbiter. we are up, let's assume he is down and network is not partitioned.
+ setMasterLocked(State_Master, "remote unreachable");
+ return;
+ }
-/* --------------------------------------------- */
+ auto_ptr<DBClientConnection> conn( newClientConnection() );
+ string errmsg;
+ if ( !conn->connect(arbHost.c_str(), errmsg) ) {
+ setMasterLocked(State_CantArb, "can't connect to arb");
+ return;
+ }
-class CmdReplacePeer : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- virtual bool adminOnly() {
- return true;
- }
- virtual bool logTheOp() {
- return false;
+ /* todo: make an arbitrate command we send to the arbiter instead of this */
+ bool is_master;
+ bool ok = conn->isMaster(is_master);
+ if ( !ok ) {
+ setMasterLocked(State_CantArb, "can't arb 2");
+ return;
+ }
+
+ setMasterLocked(State_Master, "remote down, arbiter reached");
}
- CmdReplacePeer() : Command("replacepeer") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- if ( replPair == 0 ) {
- errmsg = "not paired";
- return false;
+
+ /* --------------------------------------------- */
+
+ class CmdReplacePeer : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
}
- if ( !getInitialSyncCompleted() ) {
- errmsg = "not caught up cannot replace peer";
- return false;
+ virtual bool adminOnly() {
+ return true;
}
- if ( syncing < 0 ) {
- errmsg = "replacepeer already invoked";
+ virtual bool logTheOp() {
return false;
}
- Timer t;
- while ( 1 ) {
- if ( syncing == 0 || t.millis() > 20000 )
- break;
- {
- dbtemprelease t;
- sleepmillis(10);
+ CmdReplacePeer() : Command("replacepeer") { }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( replPair == 0 ) {
+ errmsg = "not paired";
+ return false;
}
- }
- if ( syncing ) {
- assert( syncing > 0 );
- errmsg = "timeout waiting for sync() to finish";
- return false;
- }
- {
- vector<ReplSource*> sources;
- ReplSource::loadAll(sources);
- if ( sources.size() != 1 ) {
- errmsg = "local.sources.count() != 1, cannot replace peer";
+ if ( !getInitialSyncCompleted() ) {
+ errmsg = "not caught up cannot replace peer";
return false;
}
+ if ( syncing < 0 ) {
+ errmsg = "replacepeer already invoked";
+ return false;
+ }
+ Timer t;
+ while ( 1 ) {
+ if ( syncing == 0 || t.millis() > 20000 )
+ break;
+ {
+ dbtemprelease t;
+ sleepmillis(10);
+ }
+ }
+ if ( syncing ) {
+ assert( syncing > 0 );
+ errmsg = "timeout waiting for sync() to finish";
+ return false;
+ }
+ {
+ vector<ReplSource*> sources;
+ ReplSource::loadAll(sources);
+ if ( sources.size() != 1 ) {
+ errmsg = "local.sources.count() != 1, cannot replace peer";
+ return false;
+ }
+ }
+ {
+ emptyCollection("local.sources");
+ BSONObj o = fromjson("{\"replacepeer\":1}");
+ putSingleton("local.pair.startup", o);
+ }
+ syncing = -1;
+ allDead = "replacepeer invoked -- adjust local.sources hostname then restart this db process";
+ result.append("info", "adjust local.sources hostname; db restart now required");
+ return true;
}
- {
- emptyCollection("local.sources");
- BSONObj o = fromjson("{\"replacepeer\":1}");
- putSingleton("local.pair.startup", o);
- }
- syncing = -1;
- allDead = "replacepeer invoked -- adjust local.sources hostname then restart this db process";
- result.append("info", "adjust local.sources hostname; db restart now required");
- return true;
- }
-} cmdReplacePeer;
+ } cmdReplacePeer;
-class CmdIsMaster : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdIsMaster() : Command("ismaster") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
- if ( allDead ) {
- result.append("ismaster", 0.0);
- if ( replPair )
+ class CmdIsMaster : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdIsMaster() : Command("ismaster") { }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ if ( allDead ) {
+ result.append("ismaster", 0.0);
+ if ( replPair )
+ result.append("remote", replPair->remote);
+ result.append("info", allDead);
+ }
+ else if ( replPair ) {
+ result.append("ismaster", replPair->state);
result.append("remote", replPair->remote);
- result.append("info", allDead);
+ if ( replPair->info.empty() )
+ result.append("info", replPair->info);
+ }
+ else {
+ result.append("ismaster", 1);
+ result.append("msg", "not paired");
+ }
+
+ return true;
}
- else if ( replPair ) {
- result.append("ismaster", replPair->state);
- result.append("remote", replPair->remote);
- if ( replPair->info.empty() )
- result.append("info", replPair->info);
+ } cmdismaster;
+
+ /* negotiate who is master
+
+ -1=not set (probably means we just booted)
+ 0=was slave
+ 1=was master
+
+ remote,local -> new remote,local
+ !1,1 -> 0,1
+ 1,!1 -> 1,0
+ -1,-1 -> dominant->1, nondom->0
+ 0,0 -> dominant->1, nondom->0
+ 1,1 -> dominant->1, nondom->0
+
+ { negotiatemaster:1, i_was:<state>, your_name:<hostname> }
+ returns:
+ { ok:1, you_are:..., i_am:... }
+ */
+ class CmdNegotiateMaster : public Command {
+ public:
+ CmdNegotiateMaster() : Command("negotiatemaster") { }
+ virtual bool slaveOk() {
+ return true;
}
- else {
- result.append("ismaster", 1);
- result.append("msg", "not paired");
+ virtual bool adminOnly() {
+ return true;
}
- return true;
- }
-} cmdismaster;
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ if ( replPair == 0 ) {
+ problem() << "got negotiatemaster cmd but we are not in paired mode." << endl;
+ errmsg = "not paired";
+ return false;
+ }
-/* negotiate who is master
+ int was = cmdObj.getIntField("i_was");
+ string myname = cmdObj.getStringField("your_name");
+ if ( myname.empty() || was < -1 ) {
+ errmsg = "your_name/i_was not specified";
+ return false;
+ }
- -1=not set (probably means we just booted)
- 0=was slave
- 1=was master
+ int N = ReplPair::State_Negotiating;
+ int M = ReplPair::State_Master;
+ int S = ReplPair::State_Slave;
- remote,local -> new remote,local
- !1,1 -> 0,1
- 1,!1 -> 1,0
- -1,-1 -> dominant->1, nondom->0
- 0,0 -> dominant->1, nondom->0
- 1,1 -> dominant->1, nondom->0
+ if ( !replPair->dominant( myname ) ) {
+ result.append( "you_are", N );
+ result.append( "i_am", N );
+ return true;
+ }
- { negotiatemaster:1, i_was:<state>, your_name:<hostname> }
- returns:
- { ok:1, you_are:..., i_am:... }
-*/
-class CmdNegotiateMaster : public Command {
-public:
- CmdNegotiateMaster() : Command("negotiatemaster") { }
- virtual bool slaveOk() {
- return true;
- }
- virtual bool adminOnly() {
- return true;
- }
+ int me, you;
+ if ( !getInitialSyncCompleted() || ( replPair->state != M && was == M ) ) {
+ me=S;
+ you=M;
+ }
+ else {
+ me=M;
+ you=S;
+ }
+ replPair->setMaster( me, "CmdNegotiateMaster::run()" );
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- if ( replPair == 0 ) {
- problem() << "got negotiatemaster cmd but we are not in paired mode." << endl;
- errmsg = "not paired";
- return false;
- }
+ result.append("you_are", you);
+ result.append("i_am", me);
- int was = cmdObj.getIntField("i_was");
- string myname = cmdObj.getStringField("your_name");
- if ( myname.empty() || was < -1 ) {
- errmsg = "your_name/i_was not specified";
- return false;
+ return true;
+ }
+ } cmdnegotiatemaster;
+
+ void ReplPair::negotiate(DBClientConnection *conn) {
+ BSONObjBuilder b;
+ b.append("negotiatemaster",1);
+ b.append("i_was", state);
+ b.append("your_name", remoteHost);
+ BSONObj cmd = b.done();
+ BSONObj res = conn->findOne("admin.$cmd", cmd);
+ if ( res.getIntField("ok") != 1 ) {
+ problem() << "negotiate fails: " << res.toString() << '\n';
+ setMasterLocked(State_Confused);
+ return;
+ }
+ int x = res.getIntField("you_are");
+ // State_Negotiating means the remote node is not dominant and cannot
+ // choose who is master.
+ if ( x != State_Slave && x != State_Master && x != State_Negotiating ) {
+ problem() << "negotiate: bad you_are value " << res.toString() << endl;
+ return;
}
+ if ( x != State_Negotiating ) {
+ // Don't actually have to lock here, since we only get here if not the
+ // dominant node.
+ setMaster(x);
+ }
+ }
- int N = ReplPair::State_Negotiating;
- int M = ReplPair::State_Master;
- int S = ReplPair::State_Slave;
+ OpTime last(0, 0);
- if ( !replPair->dominant( myname ) ) {
- result.append( "you_are", N );
- result.append( "i_am", N );
- return true;
+ OpTime OpTime::now() {
+ unsigned t = (unsigned) time(0);
+ if ( last.secs == t ) {
+ last.i++;
+ return last;
}
+ last = OpTime(t, 1);
+ return last;
+ }
- int me, you;
- if ( !getInitialSyncCompleted() || ( replPair->state != M && was == M ) ) {
- me=S;
- you=M;
- }
- else {
- me=M;
- you=S;
+ struct TestOpTime {
+ TestOpTime() {
+ OpTime t;
+ for ( int i = 0; i < 10; i++ ) {
+ OpTime s = OpTime::now();
+ assert( s != t );
+ t = s;
+ }
+ OpTime q = t;
+ assert( q == t );
+ assert( !(q != t) );
}
- replPair->setMaster( me, "CmdNegotiateMaster::run()" );
+ } testoptime;
- result.append("you_are", you);
- result.append("i_am", me);
+ /* --------------------------------------------------------------*/
- return true;
- }
-} cmdnegotiatemaster;
-
-void ReplPair::negotiate(DBClientConnection *conn) {
- BSONObjBuilder b;
- b.append("negotiatemaster",1);
- b.append("i_was", state);
- b.append("your_name", remoteHost);
- BSONObj cmd = b.done();
- BSONObj res = conn->findOne("admin.$cmd", cmd);
- if ( res.getIntField("ok") != 1 ) {
- problem() << "negotiate fails: " << res.toString() << '\n';
- setMasterLocked(State_Confused);
- return;
- }
- int x = res.getIntField("you_are");
- // State_Negotiating means the remote node is not dominant and cannot
- // choose who is master.
- if ( x != State_Slave && x != State_Master && x != State_Negotiating ) {
- problem() << "negotiate: bad you_are value " << res.toString() << endl;
- return;
- }
- if ( x != State_Negotiating ) {
- // Don't actually have to lock here, since we only get here if not the
- // dominant node.
- setMaster(x);
+ ReplSource::ReplSource() {
+ replacing = false;
+ nClonedThisPass = 0;
+ paired = false;
}
-}
-OpTime last(0, 0);
+ ReplSource::ReplSource(BSONObj o) : nClonedThisPass(0) {
+ replacing = false;
+ paired = false;
+ only = o.getStringField("only");
+ hostName = o.getStringField("host");
+ _sourceName = o.getStringField("source");
+ uassert( "'host' field not set in sources collection object", !hostName.empty() );
+ uassert( "only source='main' allowed for now with replication", sourceName() == "main" );
+ BSONElement e = o.getField("syncedTo");
+ if ( !e.eoo() ) {
+ uassert( "bad sources 'syncedTo' field value", e.type() == Date );
+ OpTime tmp( e.date() );
+ syncedTo = tmp;
+ //syncedTo.asDate() = e.date();
+ }
-OpTime OpTime::now() {
- unsigned t = (unsigned) time(0);
- if ( last.secs == t ) {
- last.i++;
- return last;
- }
- last = OpTime(t, 1);
- return last;
-}
-
-struct TestOpTime {
- TestOpTime() {
- OpTime t;
- for ( int i = 0; i < 10; i++ ) {
- OpTime s = OpTime::now();
- assert( s != t );
- t = s;
- }
- OpTime q = t;
- assert( q == t );
- assert( !(q != t) );
- }
-} testoptime;
-
-/* --------------------------------------------------------------*/
-
-ReplSource::ReplSource() {
- replacing = false;
- nClonedThisPass = 0;
- paired = false;
-}
-
-ReplSource::ReplSource(BSONObj o) : nClonedThisPass(0) {
- replacing = false;
- paired = false;
- only = o.getStringField("only");
- hostName = o.getStringField("host");
- _sourceName = o.getStringField("source");
- uassert( "'host' field not set in sources collection object", !hostName.empty() );
- uassert( "only source='main' allowed for now with replication", sourceName() == "main" );
- BSONElement e = o.getField("syncedTo");
- if ( !e.eoo() ) {
- uassert( "bad sources 'syncedTo' field value", e.type() == Date );
- OpTime tmp( e.date() );
- syncedTo = tmp;
- //syncedTo.asDate() = e.date();
+ BSONObj dbsObj = o.getObjectField("dbs");
+ if ( !dbsObj.isEmpty() ) {
+ BSONObjIterator i(dbsObj);
+ while ( 1 ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ dbs.insert( e.fieldName() );
+ }
+ }
}
- BSONObj dbsObj = o.getObjectField("dbs");
- if ( !dbsObj.isEmpty() ) {
- BSONObjIterator i(dbsObj);
- while ( 1 ) {
- BSONElement e = i.next();
- if ( e.eoo() )
- break;
- dbs.insert( e.fieldName() );
+ /* Turn our C++ Source object into a BSONObj */
+ BSONObj ReplSource::jsobj() {
+ BSONObjBuilder b;
+ b.append("host", hostName);
+ b.append("source", sourceName());
+ if ( !only.empty() )
+ b.append("only", only);
+ if ( !syncedTo.isNull() )
+ b.appendDate("syncedTo", syncedTo.asDate());
+
+ BSONObjBuilder dbs_builder;
+ int n = 0;
+ for ( set<string>::iterator i = dbs.begin(); i != dbs.end(); i++ ) {
+ n++;
+ dbs_builder.appendBool(i->c_str(), 1);
}
+ if ( n )
+ b.append("dbs", dbs_builder.done());
+
+ return b.doneAndDecouple();
}
-}
-
-/* Turn our C++ Source object into a BSONObj */
-BSONObj ReplSource::jsobj() {
- BSONObjBuilder b;
- b.append("host", hostName);
- b.append("source", sourceName());
- if ( !only.empty() )
- b.append("only", only);
- if ( !syncedTo.isNull() )
- b.appendDate("syncedTo", syncedTo.asDate());
-
- BSONObjBuilder dbs_builder;
- int n = 0;
- for ( set<string>::iterator i = dbs.begin(); i != dbs.end(); i++ ) {
- n++;
- dbs_builder.appendBool(i->c_str(), 1);
- }
- if ( n )
- b.append("dbs", dbs_builder.done());
-
- return b.doneAndDecouple();
-}
-
-void ReplSource::save() {
- BSONObjBuilder b;
- assert( !hostName.empty() );
- b.append("host", hostName);
- // todo: finish allowing multiple source configs.
- // this line doesn't work right when source is null, if that is allowed as it is now:
- //b.append("source", _sourceName);
- BSONObj pattern = b.done();
-
- BSONObj o = jsobj();
-
- stringstream ss;
- setClient("local.sources");
- int u = _updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, ss);
- assert( u == 1 || u == 4 );
- database = 0;
-
- if ( replacing ) {
- /* if we were in "replace" mode, we now have synced up with the replacement,
- so turn that off.
- */
- replacing = false;
- wassert( replacePeer );
- replacePeer = false;
- emptyCollection("local.pair.startup");
- }
-}
-void ReplSource::cleanup(vector<ReplSource*>& v) {
- for ( vector<ReplSource*>::iterator i = v.begin(); i != v.end(); i++ )
- delete *i;
-}
+ void ReplSource::save() {
+ BSONObjBuilder b;
+ assert( !hostName.empty() );
+ b.append("host", hostName);
+ // todo: finish allowing multiple source configs.
+ // this line doesn't work right when source is null, if that is allowed as it is now:
+ //b.append("source", _sourceName);
+ BSONObj pattern = b.done();
-string dashDashSource;
+ BSONObj o = jsobj();
-static void addSourceToList(vector<ReplSource*>&v, ReplSource& s, vector<ReplSource*>&old) {
- for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); ) {
- if ( s == **i ) {
- v.push_back(*i);
- old.erase(i);
- return;
+ stringstream ss;
+ setClient("local.sources");
+ int u = _updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, ss);
+ assert( u == 1 || u == 4 );
+ database = 0;
+
+ if ( replacing ) {
+ /* if we were in "replace" mode, we now have synced up with the replacement,
+ so turn that off.
+ */
+ replacing = false;
+ wassert( replacePeer );
+ replacePeer = false;
+ emptyCollection("local.pair.startup");
}
- i++;
}
- v.push_back( new ReplSource(s) );
-}
+ void ReplSource::cleanup(vector<ReplSource*>& v) {
+ for ( vector<ReplSource*>::iterator i = v.begin(); i != v.end(); i++ )
+ delete *i;
+ }
-/* we reuse our existing objects so that we can keep our existing connection
- and cursor in effect.
-*/
-void ReplSource::loadAll(vector<ReplSource*>& v) {
- vector<ReplSource *> old = v;
- v.erase(v.begin(), v.end());
+ string dashDashSource;
- bool gotPairWith = false;
+ static void addSourceToList(vector<ReplSource*>&v, ReplSource& s, vector<ReplSource*>&old) {
+ for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); ) {
+ if ( s == **i ) {
+ v.push_back(*i);
+ old.erase(i);
+ return;
+ }
+ i++;
+ }
+
+ v.push_back( new ReplSource(s) );
+ }
+
+ /* we reuse our existing objects so that we can keep our existing connection
+ and cursor in effect.
+ */
+ void ReplSource::loadAll(vector<ReplSource*>& v) {
+ vector<ReplSource *> old = v;
+ v.erase(v.begin(), v.end());
+
+ bool gotPairWith = false;
+
+ if ( !dashDashSource.empty() ) {
+ setClient("local.sources");
+ // --source <host> specified.
+ // check that no items are in sources other than that
+ // add if missing
+ auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
+ int n = 0;
+ while ( c->ok() ) {
+ n++;
+ ReplSource tmp(c->current());
+ if ( tmp.hostName != dashDashSource ) {
+ problem() << "--source " << dashDashSource << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "terminating after 30 seconds" << endl;
+ sleepsecs(30);
+ dbexit(18);
+ }
+ c->advance();
+ }
+ uassert( "local.sources collection corrupt?", n<2 );
+ if ( n == 0 ) {
+ // source missing. add.
+ ReplSource s;
+ s.hostName = dashDashSource;
+ s.save();
+ }
+ }
- if ( !dashDashSource.empty() ) {
setClient("local.sources");
- // --source <host> specified.
- // check that no items are in sources other than that
- // add if missing
auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
- int n = 0;
while ( c->ok() ) {
- n++;
ReplSource tmp(c->current());
- if ( tmp.hostName != dashDashSource ) {
- problem() << "--source " << dashDashSource << " != " << tmp.hostName << " from local.sources collection" << endl;
- log() << "terminating after 30 seconds" << endl;
- sleepsecs(30);
- dbexit(18);
+ if ( replPair && tmp.hostName == replPair->remote && tmp.sourceName() == "main" ) {
+ gotPairWith = true;
+ tmp.paired = true;
+ if ( replacePeer ) {
+ // peer was replaced -- start back at the beginning.
+ tmp.syncedTo = OpTime();
+ tmp.replacing = true;
+ }
}
+ addSourceToList(v, tmp, old);
c->advance();
}
- uassert( "local.sources collection corrupt?", n<2 );
- if ( n == 0 ) {
- // source missing. add.
- ReplSource s;
- s.hostName = dashDashSource;
- s.save();
+ database = 0;
+
+ if ( !gotPairWith && replPair ) {
+ /* add the --pairwith server */
+ ReplSource *s = new ReplSource();
+ s->paired = true;
+ s->hostName = replPair->remote;
+ s->replacing = replacePeer;
+ v.push_back(s);
}
+
+ for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); i++ )
+ delete *i;
}
- setClient("local.sources");
- auto_ptr<Cursor> c = findTableScan("local.sources", emptyObj);
- while ( c->ok() ) {
- ReplSource tmp(c->current());
- if ( replPair && tmp.hostName == replPair->remote && tmp.sourceName() == "main" ) {
- gotPairWith = true;
- tmp.paired = true;
- if ( replacePeer ) {
- // peer was replaced -- start back at the beginning.
- tmp.syncedTo = OpTime();
- tmp.replacing = true;
- }
+ BSONObj opTimeQuery = fromjson("{\"getoptime\":1}");
+
+ bool ReplSource::resync(string db) {
+ {
+ log() << "resync: dropping database " << db << endl;
+ string dummyns = db + ".";
+ assert( database->name == db );
+ dropDatabase(dummyns.c_str());
+ setClientTempNs(dummyns.c_str());
}
- addSourceToList(v, tmp, old);
- c->advance();
- }
- database = 0;
-
- if ( !gotPairWith && replPair ) {
- /* add the --pairwith server */
- ReplSource *s = new ReplSource();
- s->paired = true;
- s->hostName = replPair->remote;
- s->replacing = replacePeer;
- v.push_back(s);
- }
- for ( vector<ReplSource*>::iterator i = old.begin(); i != old.end(); i++ )
- delete *i;
-}
+ {
+ log() << "resync: cloning database " << db << endl;
+ ReplInfo r("resync: cloning a database");
+ string errmsg;
+ bool ok = cloneFrom(hostName.c_str(), errmsg, database->name, false, /*slaveok*/ true);
+ if ( !ok ) {
+ problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
+ throw SyncException();
+ }
+ }
-BSONObj opTimeQuery = fromjson("{\"getoptime\":1}");
+ log() << "resync: done " << db << endl;
-bool ReplSource::resync(string db) {
- {
- log() << "resync: dropping database " << db << endl;
- string dummyns = db + ".";
- assert( database->name == db );
- dropDatabase(dummyns.c_str());
- setClientTempNs(dummyns.c_str());
+ /* add the db to our dbs array which we will write back to local.sources.
+ note we are not in a consistent state until the oplog gets applied,
+ which happens next when this returns.
+ */
+ dbs.insert(db);
+ return true;
}
- {
- log() << "resync: cloning database " << db << endl;
- ReplInfo r("resync: cloning a database");
- string errmsg;
- bool ok = cloneFrom(hostName.c_str(), errmsg, database->name, false, /*slaveok*/ true);
- if ( !ok ) {
- problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
+ /* local.$oplog.main is of the form:
+ { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
+ ...
+ see logOp() comments.
+ */
+ void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op) {
+ char clientName[MaxClientLen];
+ const char *ns = op.getStringField("ns");
+ nsToClient(ns, clientName);
+
+ if ( *ns == '.' ) {
+ problem() << "skipping bad op in oplog: " << op.toString() << endl;
+ return;
+ }
+ else if ( *ns == 0 ) {
+ problem() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
+ allDead = "bad object in oplog";
throw SyncException();
}
- }
-
- log() << "resync: done " << db << endl;
- /* add the db to our dbs array which we will write back to local.sources.
- note we are not in a consistent state until the oplog gets applied,
- which happens next when this returns.
- */
- dbs.insert(db);
- return true;
-}
-
-/* local.$oplog.main is of the form:
- { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
- ...
- see logOp() comments.
-*/
-void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op) {
- char clientName[MaxClientLen];
- const char *ns = op.getStringField("ns");
- nsToClient(ns, clientName);
-
- if ( *ns == '.' ) {
- problem() << "skipping bad op in oplog: " << op.toString() << endl;
- return;
- }
- else if ( *ns == 0 ) {
- problem() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
- allDead = "bad object in oplog";
- throw SyncException();
- }
+ if ( !only.empty() && only != clientName )
+ return;
- if ( !only.empty() && only != clientName )
- return;
+ bool newDb = dbs.count(clientName) == 0;
+ if ( newDb && nClonedThisPass ) {
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ addDbNextPass.insert(clientName);
+ return;
+ }
- bool newDb = dbs.count(clientName) == 0;
- if ( newDb && nClonedThisPass ) {
- /* we only clone one database per pass, even if a lot need done. This helps us
- avoid overflowing the master's transaction log by doing too much work before going
- back to read more transactions. (Imagine a scenario of slave startup where we try to
- clone 100 databases in one pass.)
- */
- addDbNextPass.insert(clientName);
- return;
- }
+ dblock lk;
+ bool justCreated;
+ try {
+ justCreated = setClientTempNs(ns);
+ } catch ( AssertionException& ) {
+ problem() << "skipping bad(?) op in oplog, setClient() failed, ns: '" << ns << "'\n";
+ addDbNextPass.erase(clientName);
+ return;
+ }
- dblock lk;
- bool justCreated;
- try {
- justCreated = setClientTempNs(ns);
- } catch ( AssertionException& ) {
- problem() << "skipping bad(?) op in oplog, setClient() failed, ns: '" << ns << "'\n";
- addDbNextPass.erase(clientName);
- return;
- }
+ if ( allDead ) {
+ // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ log() << "allDead, throwing SyncException\n";
+ throw SyncException();
+ }
- if ( allDead ) {
- // hmmm why is this check here and not at top of this function? does it get set between top and here?
- log() << "allDead, throwing SyncException\n";
- throw SyncException();
- }
+ // operation type -- see logOp() comments for types
+ const char *opType = op.getStringField("op");
- // operation type -- see logOp() comments for types
- const char *opType = op.getStringField("op");
-
- if ( justCreated || /* datafiles were missing. so we need everything, no matter what sources object says */
- newDb ) /* if not in dbs, we've never synced this database before, so we need everything */
- {
- if ( op.getBoolField("first") &&
- pairSync->initialSyncCompleted() /*<- when false, we are a replacement volume for a pair and need a full sync */
- ) {
- log() << "pull: got {first:true} op ns:" << ns << '\n';
- /* this is the first thing in the oplog ever, so we don't need to resync(). */
- if ( newDb )
- dbs.insert(clientName);
- else
- problem() << "warning: justCreated && !newDb in repl " << op.toString() << endl;
- }
- else if ( paired && !justCreated ) {
- if ( strcmp(opType,"db") == 0 && strcmp(ns, "admin.") == 0 ) {
- // "admin" is a special namespace we use for priviledged commands -- ok if it exists first on
- // either side
+ if ( justCreated || /* datafiles were missing. so we need everything, no matter what sources object says */
+ newDb ) /* if not in dbs, we've never synced this database before, so we need everything */
+ {
+ if ( op.getBoolField("first") &&
+ pairSync->initialSyncCompleted() /*<- when false, we are a replacement volume for a pair and need a full sync */
+ ) {
+ log() << "pull: got {first:true} op ns:" << ns << '\n';
+ /* this is the first thing in the oplog ever, so we don't need to resync(). */
+ if ( newDb )
+ dbs.insert(clientName);
+ else
+ problem() << "warning: justCreated && !newDb in repl " << op.toString() << endl;
+ }
+ else if ( paired && !justCreated ) {
+ if ( strcmp(opType,"db") == 0 && strcmp(ns, "admin.") == 0 ) {
+ // "admin" is a special namespace we use for priviledged commands -- ok if it exists first on
+ // either side
+ }
+ else {
+ /* the other half of our pair has some operations. yet we already had a db on our
+ disk even though the db in question is not listed in the source.
+ */
+ allDead = "pair: historical image missing for a db";
+ problem() << "pair: historical image missing for " << clientName << ", setting allDead=true" << endl;
+ log() << "op:" << op.toString() << endl;
+ /*
+ log() << "TEMP: pair: assuming we have the historical image for: " <<
+ clientName << ". add extra checks here." << endl;
+ dbs.insert(clientName);
+ */
+ }
}
else {
- /* the other half of our pair has some operations. yet we already had a db on our
- disk even though the db in question is not listed in the source.
- */
- allDead = "pair: historical image missing for a db";
- problem() << "pair: historical image missing for " << clientName << ", setting allDead=true" << endl;
- log() << "op:" << op.toString() << endl;
- /*
- log() << "TEMP: pair: assuming we have the historical image for: " <<
- clientName << ". add extra checks here." << endl;
- dbs.insert(clientName);
- */
+ nClonedThisPass++;
+ resync(database->name);
}
+ addDbNextPass.erase(clientName);
}
- else {
- nClonedThisPass++;
- resync(database->name);
- }
- addDbNextPass.erase(clientName);
- }
- stringstream ss;
- BSONObj o = op.getObjectField("o");
- try {
- if ( *opType == 'i' ) {
- const char *p = strchr(ns, '.');
- if ( p && strcmp(p, ".system.indexes") == 0 ) {
- // updates aren't allowed for indexes -- so we will do a regular insert. if index already
- // exists, that is ok.
- theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
- }
- else {
- // do upserts for inserts as we might get replayed more than once
- OID *oid = o.getOID();
- if ( oid == 0 ) {
- _updateObjects(ns, o, o, true, ss);
+ stringstream ss;
+ BSONObj o = op.getObjectField("o");
+ try {
+ if ( *opType == 'i' ) {
+ const char *p = strchr(ns, '.');
+ if ( p && strcmp(p, ".system.indexes") == 0 ) {
+ // updates aren't allowed for indexes -- so we will do a regular insert. if index already
+ // exists, that is ok.
+ theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
}
else {
- BSONObjBuilder b;
- b.appendOID("_id", oid);
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
- _updateObjects(ns, o, b.done(), true, ss);
+ // do upserts for inserts as we might get replayed more than once
+ OID *oid = o.getOID();
+ if ( oid == 0 ) {
+ _updateObjects(ns, o, o, true, ss);
+ }
+ else {
+ BSONObjBuilder b;
+ b.appendOID("_id", oid);
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ _updateObjects(ns, o, b.done(), true, ss);
+ }
}
}
+ else if ( *opType == 'u' ) {
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ _updateObjects(ns, o, op.getObjectField("o2"), op.getBoolField("b"), ss);
+ }
+ else if ( *opType == 'd' ) {
+ if ( opType[1] == 0 )
+ deleteObjects(ns, o, op.getBoolField("b"));
+ else
+ assert( opType[1] == 'b' ); // "db" advertisement
+ }
+ else {
+ BufBuilder bb;
+ BSONObjBuilder ob;
+ assert( *opType == 'c' );
+ _runCommands(ns, o, ss, bb, ob, true);
+ }
}
- else if ( *opType == 'u' ) {
- RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
- _updateObjects(ns, o, op.getObjectField("o2"), op.getBoolField("b"), ss);
+ catch ( UserAssertionException& e ) {
+ log() << "sync: caught user assertion " << e.msg << '\n';
}
- else if ( *opType == 'd' ) {
- if ( opType[1] == 0 )
- deleteObjects(ns, o, op.getBoolField("b"));
- else
- assert( opType[1] == 'b' ); // "db" advertisement
+ database = 0;
+ }
+
+ /* note: not yet in mutex at this point. */
+ bool ReplSource::sync_pullOpLog() {
+ string ns = string("local.oplog.$") + sourceName();
+ debugrepl( "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() );
+
+ bool tailing = true;
+ DBClientCursor *c = cursor.get();
+ if ( c && c->isDead() ) {
+ log() << "pull: old cursor isDead, initiating a new one\n";
+ c = 0;
+ }
+
+ if ( c == 0 ) {
+ BSONObjBuilder q;
+ q.appendDate("$gte", syncedTo.asDate());
+ BSONObjBuilder query;
+ query.append("ts", q.done());
+ BSONObj queryObj = query.done();
+ // queryObj = { ts: { $gte: syncedTo } }
+
+ debugrepl( ns << ".find(" << queryObj.toString() << ')' );
+ cursor = conn->query( ns.c_str(), queryObj, 0, 0, 0, Option_CursorTailable | Option_SlaveOk );
+ c = cursor.get();
+ tailing = false;
}
else {
- BufBuilder bb;
- BSONObjBuilder ob;
- assert( *opType == 'c' );
- _runCommands(ns, o, ss, bb, ob, true);
+ debugrepl( "tailing=true" );
}
- }
- catch ( UserAssertionException& e ) {
- log() << "sync: caught user assertion " << e.msg << '\n';
- }
- database = 0;
-}
-
-/* note: not yet in mutex at this point. */
-bool ReplSource::sync_pullOpLog() {
- string ns = string("local.oplog.$") + sourceName();
- debugrepl( "sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() );
-
- bool tailing = true;
- DBClientCursor *c = cursor.get();
- if ( c && c->isDead() ) {
- log() << "pull: old cursor isDead, initiating a new one\n";
- c = 0;
- }
-
- if ( c == 0 ) {
- BSONObjBuilder q;
- q.appendDate("$gte", syncedTo.asDate());
- BSONObjBuilder query;
- query.append("ts", q.done());
- BSONObj queryObj = query.done();
- // queryObj = { ts: { $gte: syncedTo } }
-
- debugrepl( ns << ".find(" << queryObj.toString() << ')' );
- cursor = conn->query( ns.c_str(), queryObj, 0, 0, 0, Option_CursorTailable | Option_SlaveOk );
- c = cursor.get();
- tailing = false;
- }
- else {
- debugrepl( "tailing=true" );
- }
- if ( c == 0 ) {
- problem() << "pull: dbclient::query returns null (conn closed?)" << endl;
- resetConnection();
- sleepsecs(3);
- return false;
- }
+ if ( c == 0 ) {
+ problem() << "pull: dbclient::query returns null (conn closed?)" << endl;
+ resetConnection();
+ sleepsecs(3);
+ return false;
+ }
- // show any deferred database creates from a previous pass
- {
- set<string>::iterator i = addDbNextPass.begin();
- if ( i != addDbNextPass.end() ) {
- BSONObjBuilder b;
- b.append("ns", *i + '.');
- b.append("op", "db");
- BSONObj op = b.done();
- sync_pullOpLog_applyOperation(op);
+ // show any deferred database creates from a previous pass
+ {
+ set<string>::iterator i = addDbNextPass.begin();
+ if ( i != addDbNextPass.end() ) {
+ BSONObjBuilder b;
+ b.append("ns", *i + '.');
+ b.append("op", "db");
+ BSONObj op = b.done();
+ sync_pullOpLog_applyOperation(op);
+ }
}
- }
- if ( !c->more() ) {
- if ( tailing ) {
- debugrepl( "tailing & no new activity" );
- } else
- log() << "pull: " << ns << " oplog is empty\n";
- sleepsecs(3);
- return true;
- }
+ if ( !c->more() ) {
+ if ( tailing ) {
+ debugrepl( "tailing & no new activity" );
+ } else
+ log() << "pull: " << ns << " oplog is empty\n";
+ sleepsecs(3);
+ return true;
+ }
- int n = 0;
- BSONObj op = c->next();
- BSONElement ts = op.findElement("ts");
- if ( ts.type() != Date ) {
- string err = op.getStringField("$err");
- if ( !err.empty() ) {
- problem() << "pull: $err reading remote oplog: " + err << '\n';
- massert( "got $err reading remote oplog", false );
+ int n = 0;
+ BSONObj op = c->next();
+ BSONElement ts = op.findElement("ts");
+ if ( ts.type() != Date ) {
+ string err = op.getStringField("$err");
+ if ( !err.empty() ) {
+ problem() << "pull: $err reading remote oplog: " + err << '\n';
+ massert( "got $err reading remote oplog", false );
+ }
+ else {
+ problem() << "pull: bad object read from remote oplog: " << op.toString() << '\n';
+ massert("pull: bad object read from remote oplog", false);
+ }
}
- else {
- problem() << "pull: bad object read from remote oplog: " << op.toString() << '\n';
- massert("pull: bad object read from remote oplog", false);
+ OpTime nextOpTime( ts.date() );
+ debugrepl( "first op time received: " << nextOpTime.toString() );
+ bool initial = syncedTo.isNull();
+ if ( initial || tailing ) {
+ if ( tailing ) {
+ assert( syncedTo < nextOpTime );
+ }
+ else {
+ log() << "pull: initial run\n";
+ }
+ {
+ sync_pullOpLog_applyOperation(op);
+ n++;
+ }
}
- }
- OpTime nextOpTime( ts.date() );
- debugrepl( "first op time received: " << nextOpTime.toString() );
- bool initial = syncedTo.isNull();
- if ( initial || tailing ) {
- if ( tailing ) {
+ else if ( nextOpTime != syncedTo ) {
+ Logstream& l = log();
+ l << "pull: nextOpTime " << nextOpTime.toStringLong() << ' ';
+ if ( nextOpTime < syncedTo )
+ l << "<??";
+ else
+ l << ">";
+
+ l << " syncedTo " << syncedTo.toStringLong() << '\n';
+ log() << "pull: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
+ log() << "pull: tailing: " << tailing << '\n';
+ log() << "pull: data too stale, halting replication" << endl;
+ replInfo = allDead = "data too stale halted replication";
assert( syncedTo < nextOpTime );
+ throw SyncException();
}
else {
- log() << "pull: initial run\n";
+ /* t == syncedTo, so the first op was applied previously, no need to redo it. */
}
+
+ // apply operations
{
- sync_pullOpLog_applyOperation(op);
- n++;
- }
- }
- else if ( nextOpTime != syncedTo ) {
- Logstream& l = log();
- l << "pull: nextOpTime " << nextOpTime.toStringLong() << ' ';
- if ( nextOpTime < syncedTo )
- l << "<??";
- else
- l << ">";
-
- l << " syncedTo " << syncedTo.toStringLong() << '\n';
- log() << "pull: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
- log() << "pull: tailing: " << tailing << '\n';
- log() << "pull: data too stale, halting replication" << endl;
- replInfo = allDead = "data too stale halted replication";
- assert( syncedTo < nextOpTime );
- throw SyncException();
- }
- else {
- /* t == syncedTo, so the first op was applied previously, no need to redo it. */
- }
+ while ( 1 ) {
+ if ( !c->more() ) {
+ log() << "pull: applied " << n << " operations" << endl;
+ syncedTo = nextOpTime;
+ debugrepl( "end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() );
+ dblock lk;
+ save(); // note how far we are synced up to now
+ break;
+ }
+ /* todo: get out of the mutex for the next()? */
+ BSONObj op = c->next();
+ ts = op.findElement("ts");
+ assert( ts.type() == Date );
+ OpTime last = nextOpTime;
+ OpTime tmp( ts.date() );
+ nextOpTime = tmp;
+ if ( !( last < nextOpTime ) ) {
+ problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
+ uassert("bad 'ts' value in sources", false);
+ }
- // apply operations
- {
- while ( 1 ) {
- if ( !c->more() ) {
- log() << "pull: applied " << n << " operations" << endl;
- syncedTo = nextOpTime;
- debugrepl( "end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() );
- dblock lk;
- save(); // note how far we are synced up to now
- break;
- }
- /* todo: get out of the mutex for the next()? */
- BSONObj op = c->next();
- ts = op.findElement("ts");
- assert( ts.type() == Date );
- OpTime last = nextOpTime;
- OpTime tmp( ts.date() );
- nextOpTime = tmp;
- if ( !( last < nextOpTime ) ) {
- problem() << "sync error: last " << last.toString() << " >= nextOpTime " << nextOpTime.toString() << endl;
- uassert("bad 'ts' value in sources", false);
+ sync_pullOpLog_applyOperation(op);
+ n++;
}
-
- sync_pullOpLog_applyOperation(op);
- n++;
}
- }
- return true;
-}
-
-/* note: not yet in mutex at this point.
- returns true if everything happy. return false if you want to reconnect.
-*/
-bool ReplSource::sync() {
- ReplInfo r("sync");
- if ( !quiet )
- log() << "pull: " << sourceName() << '@' << hostName << endl;
- nClonedThisPass = 0;
-
- if ( (string("localhost") == hostName || string("127.0.0.1") == hostName) && port == DBPort ) {
- log() << "pull: can't sync from self (localhost). sources configuration may be wrong." << endl;
- sleepsecs(5);
- return false;
+ return true;
}
- if ( conn.get() == 0 ) {
- conn = auto_ptr<DBClientConnection>(new DBClientConnection());
- string errmsg;
- ReplInfo r("trying to connect to sync source");
- if ( !conn->connect(hostName.c_str(), errmsg) ) {
- resetConnection();
- log() << "pull: cantconn " << errmsg << endl;
- if ( replPair && paired ) {
- assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
- replPair->arbitrate();
- }
- {
- ReplInfo r("can't connect to sync source, sleeping");
- sleepsecs(1);
- }
+ /* note: not yet in mutex at this point.
+ returns true if everything happy. return false if you want to reconnect.
+ */
+ bool ReplSource::sync() {
+ ReplInfo r("sync");
+ if ( !quiet )
+ log() << "pull: " << sourceName() << '@' << hostName << endl;
+ nClonedThisPass = 0;
+
+ if ( (string("localhost") == hostName || string("127.0.0.1") == hostName) && port == DBPort ) {
+ log() << "pull: can't sync from self (localhost). sources configuration may be wrong." << endl;
+ sleepsecs(5);
return false;
}
- }
- if ( paired )
- replPair->negotiate(conn.get());
+ if ( conn.get() == 0 ) {
+ conn = auto_ptr<DBClientConnection>(new DBClientConnection());
+ string errmsg;
+ ReplInfo r("trying to connect to sync source");
+ if ( !conn->connect(hostName.c_str(), errmsg) ) {
+ resetConnection();
+ log() << "pull: cantconn " << errmsg << endl;
+ if ( replPair && paired ) {
+ assert( startsWith(hostName.c_str(), replPair->remoteHost.c_str()) );
+ replPair->arbitrate();
+ }
+ {
+ ReplInfo r("can't connect to sync source, sleeping");
+ sleepsecs(1);
+ }
+ return false;
+ }
+ }
- /*
- // get current mtime at the server.
- BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
- BSONElement e = o.findElement("optime");
- if( e.eoo() ) {
- log() << "pull: failed to get cur optime from master" << endl;
- log() << " " << o.toString() << endl;
- return false;
- }
- uassert( e.type() == Date );
- OpTime serverCurTime;
- serverCurTime.asDate() = e.date();
- */
- return sync_pullOpLog();
-}
+ if ( paired )
+ replPair->negotiate(conn.get());
+
+ /*
+ // get current mtime at the server.
+ BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
+ BSONElement e = o.findElement("optime");
+ if( e.eoo() ) {
+ log() << "pull: failed to get cur optime from master" << endl;
+ log() << " " << o.toString() << endl;
+ return false;
+ }
+ uassert( e.type() == Date );
+ OpTime serverCurTime;
+ serverCurTime.asDate() = e.date();
+ */
+ return sync_pullOpLog();
+ }
-/* -- Logging of operations -------------------------------------*/
+ /* -- Logging of operations -------------------------------------*/
// cached copies of these...
-NamespaceDetails *localOplogMainDetails = 0;
-Database *localOplogClient = 0;
-
-/* we write to local.opload.$main:
- { ts : ..., op: ..., ns: ..., o: ... }
- ts: an OpTime timestamp
- op:
- "i" insert
- "u" update
- "d" delete
- "c" db cmd
- "db" declares presence of a database (ns is set to the db name + '.')
- bb:
- if not null, specifies a boolean to pass along to the other side as b: param.
- used for "justOne" or "upsert" flags on 'd', 'u'
- first: true
- when set, indicates this is the first thing we have logged for this database.
- thus, the slave does not need to copy down all the data when it sees this.
-*/
-void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *o2, bool *bb) {
- if ( strncmp(ns, "local.", 6) == 0 )
- return;
-
- Database *oldClient = database;
- bool haveLogged = database && database->haveLogged();
-
- /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
- instead we do a single copy to the destination position in the memory mapped file.
+ NamespaceDetails *localOplogMainDetails = 0;
+ Database *localOplogClient = 0;
+
+ /* we write to local.opload.$main:
+ { ts : ..., op: ..., ns: ..., o: ... }
+ ts: an OpTime timestamp
+ op:
+ "i" insert
+ "u" update
+ "d" delete
+ "c" db cmd
+ "db" declares presence of a database (ns is set to the db name + '.')
+ bb:
+ if not null, specifies a boolean to pass along to the other side as b: param.
+ used for "justOne" or "upsert" flags on 'd', 'u'
+ first: true
+ when set, indicates this is the first thing we have logged for this database.
+ thus, the slave does not need to copy down all the data when it sees this.
*/
+ void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *o2, bool *bb) {
+ if ( strncmp(ns, "local.", 6) == 0 )
+ return;
- BSONObjBuilder b;
- b.appendDate("ts", OpTime::now().asDate());
- b.append("op", opstr);
- b.append("ns", ns);
- if ( bb )
- b.appendBool("b", *bb);
- if ( o2 )
- b.append("o2", *o2);
- if ( !haveLogged ) {
- b.appendBool("first", true);
- if ( database ) // null on dropDatabase()'s logging.
- database->setHaveLogged();
- }
- BSONObj partial = b.done();
- int posz = partial.objsize();
- int len = posz + obj.objsize() + 1 + 2 /*o:*/;
-
- if ( localOplogMainDetails == 0 ) {
- setClientTempNs("local.");
- localOplogClient = database;
- localOplogMainDetails = nsdetails("local.oplog.$main");
- }
- database = localOplogClient;
-
- Record *r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, "local.oplog.$main", len);
+ Database *oldClient = database;
+ bool haveLogged = database && database->haveLogged();
- char *p = r->data;
- memcpy(p, partial.objdata(), posz);
- *((unsigned *)p) += obj.objsize() + 1 + 2;
- p += posz - 1;
- *p++ = (char) Object;
- *p++ = 'o';
- *p++ = 0;
- memcpy(p, obj.objdata(), obj.objsize());
- p += obj.objsize();
- *p = EOO;
+ /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
+ instead we do a single copy to the destination position in the memory mapped file.
+ */
- //BSONObj temp(r);
- //cout << "temp:" << temp.toString() << endl;
+ BSONObjBuilder b;
+ b.appendDate("ts", OpTime::now().asDate());
+ b.append("op", opstr);
+ b.append("ns", ns);
+ if ( bb )
+ b.appendBool("b", *bb);
+ if ( o2 )
+ b.append("o2", *o2);
+ if ( !haveLogged ) {
+ b.appendBool("first", true);
+ if ( database ) // null on dropDatabase()'s logging.
+ database->setHaveLogged();
+ }
+ BSONObj partial = b.done();
+ int posz = partial.objsize();
+ int len = posz + obj.objsize() + 1 + 2 /*o:*/;
+
+ if ( localOplogMainDetails == 0 ) {
+ setClientTempNs("local.");
+ localOplogClient = database;
+ localOplogMainDetails = nsdetails("local.oplog.$main");
+ }
+ database = localOplogClient;
- database = oldClient;
-}
+ Record *r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, "local.oplog.$main", len);
-/* --------------------------------------------------------------*/
+ char *p = r->data;
+ memcpy(p, partial.objdata(), posz);
+ *((unsigned *)p) += obj.objsize() + 1 + 2;
+ p += posz - 1;
+ *p++ = (char) Object;
+ *p++ = 'o';
+ *p++ = 0;
+ memcpy(p, obj.objdata(), obj.objsize());
+ p += obj.objsize();
+ *p = EOO;
-/*
-TODO:
-_ source has autoptr to the cursor
-_ reuse that cursor when we can
-*/
+ //BSONObj temp(r);
+ //out() << "temp:" << temp.toString() << endl;
-/* returns: # of seconds to sleep before next pass */
-int _replMain(vector<ReplSource*>& sources) {
- {
- ReplInfo r("replMain load sources");
- dblock lk;
- ReplSource::loadAll(sources);
+ database = oldClient;
}
- if ( sources.empty() ) {
- /* replication is not configured yet (for --slave) in local.sources. Poll for config it
- every 20 seconds.
- */
- return 20;
- }
+ /* --------------------------------------------------------------*/
- bool sleep = true;
- for ( vector<ReplSource*>::iterator i = sources.begin(); i != sources.end(); i++ ) {
- ReplSource *s = *i;
- bool ok = false;
- try {
- ok = s->sync();
- bool moreToSync = s->haveMoreDbsToSync();
- sleep = !moreToSync;
- if ( ok && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
- pairSync->setInitialSyncCompletedLocking();
- }
- }
- catch ( SyncException& ) {
- log() << "caught SyncException, sleeping 10 secs" << endl;
- return 10;
- }
- catch ( AssertionException& e ) {
- if ( e.severe() ) {
- log() << "replMain caught AssertionException, sleeping 1 minutes" << endl;
- return 60;
- }
- else {
- log() << e.toString() << '\n';
- }
- replInfo = "replMain caught AssertionException";
- }
- if ( !ok )
- s->resetConnection();
- }
- if ( sleep ) {
- return 3;
- }
- return 0;
-}
+ /*
+ TODO:
+ _ source has autoptr to the cursor
+ _ reuse that cursor when we can
+ */
-void replMain() {
- vector<ReplSource*> sources;
- while ( 1 ) {
- int s = 0;
+ /* returns: # of seconds to sleep before next pass */
+ int _replMain(vector<ReplSource*>& sources) {
{
+ ReplInfo r("replMain load sources");
dblock lk;
- if ( allDead )
- break;
- assert( syncing == 0 );
- syncing++;
+ ReplSource::loadAll(sources);
}
- try {
- s = _replMain(sources);
- } catch (...) {
- cout << "TEMP: caught exception in _replMain" << endl;
+
+ if ( sources.empty() ) {
+ /* replication is not configured yet (for --slave) in local.sources. Poll for config it
+ every 20 seconds.
+ */
+ return 20;
}
- {
- dblock lk;
- assert( syncing == 1 );
- syncing--;
+
+ bool sleep = true;
+ for ( vector<ReplSource*>::iterator i = sources.begin(); i != sources.end(); i++ ) {
+ ReplSource *s = *i;
+ bool ok = false;
+ try {
+ ok = s->sync();
+ bool moreToSync = s->haveMoreDbsToSync();
+ sleep = !moreToSync;
+ if ( ok && !moreToSync /*&& !s->syncedTo.isNull()*/ ) {
+ pairSync->setInitialSyncCompletedLocking();
+ }
+ }
+ catch ( SyncException& ) {
+ log() << "caught SyncException, sleeping 10 secs" << endl;
+ return 10;
+ }
+ catch ( AssertionException& e ) {
+ if ( e.severe() ) {
+ log() << "replMain caught AssertionException, sleeping 1 minutes" << endl;
+ return 60;
+ }
+ else {
+ log() << e.toString() << '\n';
+ }
+ replInfo = "replMain caught AssertionException";
+ }
+ if ( !ok )
+ s->resetConnection();
}
- if ( s ) {
- stringstream ss;
- ss << "replMain: sleep " << s << " before next pass";
- string msg = ss.str();
- ReplInfo r(msg.c_str());
- sleepsecs(s);
+ if ( sleep ) {
+ return 3;
}
+ return 0;
}
+ void replMain() {
+ vector<ReplSource*> sources;
+ while ( 1 ) {
+ int s = 0;
+ {
+ dblock lk;
+ if ( allDead )
+ break;
+ assert( syncing == 0 );
+ syncing++;
+ }
+ try {
+ s = _replMain(sources);
+ } catch (...) {
+ out() << "TEMP: caught exception in _replMain" << endl;
+ }
+ {
+ dblock lk;
+ assert( syncing == 1 );
+ syncing--;
+ }
+ if ( s ) {
+ stringstream ss;
+ ss << "replMain: sleep " << s << " before next pass";
+ string msg = ss.str();
+ ReplInfo r(msg.c_str());
+ sleepsecs(s);
+ }
+ }
+
// assert(false);
// ReplSource::cleanup(sources);
-}
+ }
-int debug_stop_repl = 0;
+ int debug_stop_repl = 0;
-void replSlaveThread() {
- sleepsecs(1);
+ void replSlaveThread() {
+ sleepsecs(1);
- {
- dblock lk;
- BSONObj obj;
- if ( getSingleton("local.pair.startup", obj) ) {
- // should be: {replacepeer:1}
- replacePeer = true;
- pairSync->setInitialSyncCompleted(); // we are the half that has all the data
+ {
+ dblock lk;
+ BSONObj obj;
+ if ( getSingleton("local.pair.startup", obj) ) {
+ // should be: {replacepeer:1}
+ replacePeer = true;
+ pairSync->setInitialSyncCompleted(); // we are the half that has all the data
+ }
}
- }
- while ( 1 ) {
- try {
- replMain();
- if ( debug_stop_repl )
- break;
- sleepsecs(5);
- }
- catch ( AssertionException& ) {
- ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
- problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
- sleepsecs(300);
- }
- }
-}
-
-/* used to verify that slave knows what databases we have */
-void logOurDbsPresence() {
- path dbs(dbpath);
- directory_iterator end;
- directory_iterator i(dbs);
-
- dblock lk;
-
- while ( i != end ) {
- path p = *i;
- string f = p.leaf();
- if ( endsWith(f.c_str(), ".ns") ) {
- /* note: we keep trailing "." so that when slave calls setClient(ns) everything is happy; e.g.,
- valid namespaces must always have a dot, even though here it is just a placeholder not
- a real one
- */
- string dbname = string(f.c_str(), f.size() - 2);
- if ( dbname != "local." ) {
- setClientTempNs(dbname.c_str());
- logOp("db", dbname.c_str(), emptyObj);
+ while ( 1 ) {
+ try {
+ replMain();
+ if ( debug_stop_repl )
+ break;
+ sleepsecs(5);
+ }
+ catch ( AssertionException& ) {
+ ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
+ problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
}
}
- i++;
}
- database = 0;
-}
+ /* used to verify that slave knows what databases we have */
+ void logOurDbsPresence() {
+ path dbs(dbpath);
+ directory_iterator end;
+ directory_iterator i(dbs);
-/* we have to log the db presence periodically as that "advertisement" will roll out of the log
- as it is of finite length. also as we only do one db cloning per pass, we could skip over a bunch of
- advertisements and thus need to see them again later. so this mechanism can actually be very slow to
- work, and should be improved.
-*/
-void replMasterThread() {
- sleepsecs(15);
- logOurDbsPresence();
+ dblock lk;
- // if you are testing, you might finish test and shutdown in less than 10
- // minutes yet not have done something in first 15 -- this is to exercise
- // this code some.
- sleepsecs(90);
- logOurDbsPresence();
+ while ( i != end ) {
+ path p = *i;
+ string f = p.leaf();
+ if ( endsWith(f.c_str(), ".ns") ) {
+ /* note: we keep trailing "." so that when slave calls setClient(ns) everything is happy; e.g.,
+ valid namespaces must always have a dot, even though here it is just a placeholder not
+ a real one
+ */
+ string dbname = string(f.c_str(), f.size() - 2);
+ if ( dbname != "local." ) {
+ setClientTempNs(dbname.c_str());
+ logOp("db", dbname.c_str(), emptyObj);
+ }
+ }
+ i++;
+ }
- while ( 1 ) {
- logOurDbsPresence();
- sleepsecs(60 * 10);
+ database = 0;
}
-}
-
-void tempThread() {
- while ( 1 ) {
- cout << dbMutexInfo.isLocked() << endl;
- sleepmillis(100);
- }
-}
+ /* we have to log the db presence periodically as that "advertisement" will roll out of the log
+ as it is of finite length. also as we only do one db cloning per pass, we could skip over a bunch of
+ advertisements and thus need to see them again later. so this mechanism can actually be very slow to
+ work, and should be improved.
+ */
+ void replMasterThread() {
+ sleepsecs(15);
+ logOurDbsPresence();
-void startReplication() {
- /* this was just to see if anything locks for longer than it should -- we need to be careful
- not to be locked when trying to connect() or query() the other side.
- */
- //boost::thread tempt(tempThread);
+ // if you are testing, you might finish test and shutdown in less than 10
+ // minutes yet not have done something in first 15 -- this is to exercise
+ // this code some.
+ sleepsecs(90);
+ logOurDbsPresence();
- if ( !slave && !master && !replPair )
- return;
+ while ( 1 ) {
+ logOurDbsPresence();
+ sleepsecs(60 * 10);
+ }
- {
- dblock lk;
- pairSync->init();
}
- if ( slave || replPair ) {
- if ( slave && !quiet )
- log() << "slave=true" << endl;
- slave = true;
- boost::thread repl_thread(replSlaveThread);
+ void tempThread() {
+ while ( 1 ) {
+ out() << dbMutexInfo.isLocked() << endl;
+ sleepmillis(100);
+ }
}
- if ( master || replPair ) {
- if ( master && !quiet )
- log() << "master=true" << endl;
- master = true;
+ void startReplication() {
+ /* this was just to see if anything locks for longer than it should -- we need to be careful
+ not to be locked when trying to connect() or query() the other side.
+ */
+ //boost::thread tempt(tempThread);
+
+ if ( !slave && !master && !replPair )
+ return;
+
{
dblock lk;
- /* create an oplog collection, if it doesn't yet exist. */
- BSONObjBuilder b;
- double sz;
- if ( oplogSize != 0 )
- sz = oplogSize;
- else {
- sz = 50.0 * 1000 * 1000;
- if ( sizeof(int *) >= 8 ) {
- sz = 990.0 * 1000 * 1000;
- boost::intmax_t free = freeSpace(); //-1 if call not supported.
- double fivePct = free * 0.05;
- if ( fivePct > sz )
- sz = fivePct;
+ pairSync->init();
+ }
+
+ if ( slave || replPair ) {
+ if ( slave && !quiet )
+ log() << "slave=true" << endl;
+ slave = true;
+ boost::thread repl_thread(replSlaveThread);
+ }
+
+ if ( master || replPair ) {
+ if ( master && !quiet )
+ log() << "master=true" << endl;
+ master = true;
+ {
+ dblock lk;
+ /* create an oplog collection, if it doesn't yet exist. */
+ BSONObjBuilder b;
+ double sz;
+ if ( oplogSize != 0 )
+ sz = oplogSize;
+ else {
+ sz = 50.0 * 1000 * 1000;
+ if ( sizeof(int *) >= 8 ) {
+ sz = 990.0 * 1000 * 1000;
+ boost::intmax_t free = freeSpace(); //-1 if call not supported.
+ double fivePct = free * 0.05;
+ if ( fivePct > sz )
+ sz = fivePct;
+ }
}
+ b.append("size", sz);
+ b.appendBool("capped", 1);
+ setClientTempNs("local.oplog.$main");
+ string err;
+ BSONObj o = b.done();
+ userCreateNS("local.oplog.$main", o, err, false);
+ database = 0;
}
- b.append("size", sz);
- b.appendBool("capped", 1);
- setClientTempNs("local.oplog.$main");
- string err;
- BSONObj o = b.done();
- userCreateNS("local.oplog.$main", o, err, false);
- database = 0;
- }
- boost::thread mt(replMasterThread);
+ boost::thread mt(replMasterThread);
+ }
}
-}
-/* called from main at server startup */
-void pairWith(const char *remoteEnd, const char *arb) {
- replPair = new ReplPair(remoteEnd, arb);
-}
+ /* called from main at server startup */
+ void pairWith(const char *remoteEnd, const char *arb) {
+ replPair = new ReplPair(remoteEnd, arb);
+ }
} // namespace mongo
diff --git a/db/repl.h b/db/repl.h
index b8b2d4417f1..23ad1f63d76 100644
--- a/db/repl.h
+++ b/db/repl.h
@@ -32,157 +32,157 @@
namespace mongo {
-class DBClientConnection;
-class DBClientCursor;
-extern bool slave;
-extern bool master;
+ class DBClientConnection;
+ class DBClientCursor;
+ extern bool slave;
+ extern bool master;
-bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk);
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication, bool slaveOk);
#pragma pack(push,4)
-class OpTime {
- unsigned i;
- unsigned secs;
-public:
- unsigned getSecs() const {
- return secs;
- }
- OpTime(unsigned long long date) {
- reinterpret_cast<unsigned long long&>(*this) = date;
- }
- OpTime(unsigned a, unsigned b) {
- secs = a;
- i = b;
- }
- OpTime() {
- secs = 0;
- i = 0;
- }
- static OpTime now();
-
- /* We store OpTime's in the database as Javascript Date datatype -- we needed some sort of
- 64 bit "container" for these values. While these are not really "Dates", that seems a
- better choice for now than say, Number, which is floating point. Note the BinData type
- is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has a
- couple bytes of overhead.
- */
- unsigned long long asDate() const {
- return *((unsigned long long *) &i);
- }
+ class OpTime {
+ unsigned i;
+ unsigned secs;
+ public:
+ unsigned getSecs() const {
+ return secs;
+ }
+ OpTime(unsigned long long date) {
+ reinterpret_cast<unsigned long long&>(*this) = date;
+ }
+ OpTime(unsigned a, unsigned b) {
+ secs = a;
+ i = b;
+ }
+ OpTime() {
+ secs = 0;
+ i = 0;
+ }
+ static OpTime now();
+
+ /* We store OpTime's in the database as Javascript Date datatype -- we needed some sort of
+ 64 bit "container" for these values. While these are not really "Dates", that seems a
+ better choice for now than say, Number, which is floating point. Note the BinData type
+ is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has a
+ couple bytes of overhead.
+ */
+ unsigned long long asDate() const {
+ return *((unsigned long long *) &i);
+ }
// unsigned long long& asDate() { return *((unsigned long long *) &i); }
- bool isNull() {
- return secs == 0;
- }
-
- string toStringLong() const {
- char buf[64];
- time_t_to_String(secs, buf);
- stringstream ss;
- ss << buf << ' ';
- ss << hex << secs << ':' << i;
- return ss.str();
- }
-
- string toString() const {
- stringstream ss;
- ss << hex << secs << ':' << i;
- return ss.str();
- }
- bool operator==(const OpTime& r) const {
- return i == r.i && secs == r.secs;
- }
- bool operator!=(const OpTime& r) const {
- return !(*this == r);
- }
- bool operator<(const OpTime& r) const {
- if ( secs != r.secs )
- return secs < r.secs;
- return i < r.i;
- }
-};
+ bool isNull() {
+ return secs == 0;
+ }
+
+ string toStringLong() const {
+ char buf[64];
+ time_t_to_String(secs, buf);
+ stringstream ss;
+ ss << buf << ' ';
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+ bool operator==(const OpTime& r) const {
+ return i == r.i && secs == r.secs;
+ }
+ bool operator!=(const OpTime& r) const {
+ return !(*this == r);
+ }
+ bool operator<(const OpTime& r) const {
+ if ( secs != r.secs )
+ return secs < r.secs;
+ return i < r.i;
+ }
+ };
#pragma pack(pop)
-/* A replication exception */
-struct SyncException {
-};
+ /* A replication exception */
+ struct SyncException {
+ };
-/* A Source is a source from which we can pull (replicate) data.
- stored in collection local.sources.
+ /* A Source is a source from which we can pull (replicate) data.
+ stored in collection local.sources.
- Can be a group of things to replicate for several databases.
+ Can be a group of things to replicate for several databases.
- { host: ..., source: ..., syncedTo: ..., dbs: { ... } }
+ { host: ..., source: ..., syncedTo: ..., dbs: { ... } }
- 'source' defaults to 'main'; support for multiple source names is
- not done (always use main for now).
-*/
-class ReplSource {
- bool resync(string db);
- bool sync_pullOpLog();
- void sync_pullOpLog_applyOperation(BSONObj& op);
-
- auto_ptr<DBClientConnection> conn;
- auto_ptr<DBClientCursor> cursor;
-
- set<string> addDbNextPass;
-
- ReplSource();
-public:
- bool replacing; // in "replace mode" -- see CmdReplacePeer
- bool paired; // --pair in use
- string hostName; // ip addr or hostname plus optionally, ":<port>"
- string _sourceName; // a logical source name.
- string sourceName() const {
- return _sourceName.empty() ? "main" : _sourceName;
- }
- string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
-
- /* the last time point we have already synced up to. */
- OpTime syncedTo;
-
- /* list of databases that we have synced.
- we need this so that if we encounter a new one, we know
- to go fetch the old data.
+ 'source' defaults to 'main'; support for multiple source names is
+ not done (always use main for now).
*/
- set<string> dbs;
-
- int nClonedThisPass;
-
- static void loadAll(vector<ReplSource*>&);
- static void cleanup(vector<ReplSource*>&);
- ReplSource(BSONObj);
- bool sync();
- void save(); // write ourself to local.sources
- void resetConnection() {
- conn = auto_ptr<DBClientConnection>(0);
- cursor = auto_ptr<DBClientCursor>(0);
- }
-
- // make a jsobj from our member fields of the form
- // { host: ..., source: ..., syncedTo: ... }
- BSONObj jsobj();
-
- bool operator==(const ReplSource&r) const {
- return hostName == r.hostName && sourceName() == r.sourceName();
- }
-
- bool haveMoreDbsToSync() const {
- return !addDbNextPass.empty();
+ class ReplSource {
+ bool resync(string db);
+ bool sync_pullOpLog();
+ void sync_pullOpLog_applyOperation(BSONObj& op);
+
+ auto_ptr<DBClientConnection> conn;
+ auto_ptr<DBClientCursor> cursor;
+
+ set<string> addDbNextPass;
+
+ ReplSource();
+ public:
+ bool replacing; // in "replace mode" -- see CmdReplacePeer
+ bool paired; // --pair in use
+ string hostName; // ip addr or hostname plus optionally, ":<port>"
+ string _sourceName; // a logical source name.
+ string sourceName() const {
+ return _sourceName.empty() ? "main" : _sourceName;
+ }
+ string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ /* the last time point we have already synced up to. */
+ OpTime syncedTo;
+
+ /* list of databases that we have synced.
+ we need this so that if we encounter a new one, we know
+ to go fetch the old data.
+ */
+ set<string> dbs;
+
+ int nClonedThisPass;
+
+ static void loadAll(vector<ReplSource*>&);
+ static void cleanup(vector<ReplSource*>&);
+ ReplSource(BSONObj);
+ bool sync();
+ void save(); // write ourself to local.sources
+ void resetConnection() {
+ conn = auto_ptr<DBClientConnection>(0);
+ cursor = auto_ptr<DBClientCursor>(0);
+ }
+
+ // make a jsobj from our member fields of the form
+ // { host: ..., source: ..., syncedTo: ... }
+ BSONObj jsobj();
+
+ bool operator==(const ReplSource&r) const {
+ return hostName == r.hostName && sourceName() == r.sourceName();
+ }
+
+ bool haveMoreDbsToSync() const {
+ return !addDbNextPass.empty();
+ }
+ };
+
+ /* Write operation to the log (local.oplog.$main)
+ "i" insert
+ "u" update
+ "d" delete
+ "c" db cmd
+ "db" declares presence of a database (ns is set to the db name + '.')
+ */
+ void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt, bool *b);
+ inline void logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt = 0, bool *b = 0) {
+ if ( master )
+ _logOp(opstr, ns, obj, patt, b);
}
-};
-
-/* Write operation to the log (local.oplog.$main)
- "i" insert
- "u" update
- "d" delete
- "c" db cmd
- "db" declares presence of a database (ns is set to the db name + '.')
-*/
-void _logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt, bool *b);
-inline void logOp(const char *opstr, const char *ns, BSONObj& obj, BSONObj *patt = 0, bool *b = 0) {
- if ( master )
- _logOp(opstr, ns, obj, patt, b);
-}
} // namespace mongo
diff --git a/db/replset.h b/db/replset.h
index c08372aeb47..7cb5b4fd386 100644
--- a/db/replset.h
+++ b/db/replset.h
@@ -23,157 +23,157 @@
namespace mongo {
-extern int port;
-extern const char *allDead;
-
-/* ReplPair is a pair of db servers replicating to one another and cooperating.
-
- Only one member of the pair is active at a time; so this is a smart master/slave
- configuration basically.
-
- You may read from the slave at anytime though (if you don't mind the slight lag).
-
- todo: Could be extended to be more than a pair, thus the name 'Set' -- for example,
- a set of 3...
-*/
-
-class ReplPair {
-public:
- enum {
- State_CantArb = -3,
- State_Confused = -2,
- State_Negotiating = -1,
- State_Slave = 0,
- State_Master = 1
- };
-
- int state;
- string info; // commentary about our current state
- string arbHost; // "-" for no arbiter. "host[:port]"
- int remotePort;
- string remoteHost;
- string remote; // host:port if port specified.
+ extern int port;
+ extern const char *allDead;
+
+ /* ReplPair is a pair of db servers replicating to one another and cooperating.
+
+ Only one member of the pair is active at a time; so this is a smart master/slave
+ configuration basically.
+
+ You may read from the slave at anytime though (if you don't mind the slight lag).
+
+ todo: Could be extended to be more than a pair, thus the name 'Set' -- for example,
+ a set of 3...
+ */
+
+ class ReplPair {
+ public:
+ enum {
+ State_CantArb = -3,
+ State_Confused = -2,
+ State_Negotiating = -1,
+ State_Slave = 0,
+ State_Master = 1
+ };
+
+ int state;
+ string info; // commentary about our current state
+ string arbHost; // "-" for no arbiter. "host[:port]"
+ int remotePort;
+ string remoteHost;
+ string remote; // host:port if port specified.
// int date; // -1 not yet set; 0=slave; 1=master
- string getInfo() {
- stringstream ss;
- ss << " state: ";
- if ( state == 1 ) ss << "1 State_Master ";
- else if ( state == 0 ) ss << "0 State_Slave";
- else
- ss << "<b>" << state << "</b>";
- ss << '\n';
- ss << " info: " << info << '\n';
- ss << " arbhost: " << arbHost << '\n';
- ss << " remote: " << remoteHost << ':' << remotePort << '\n';
+ string getInfo() {
+ stringstream ss;
+ ss << " state: ";
+ if ( state == 1 ) ss << "1 State_Master ";
+ else if ( state == 0 ) ss << "0 State_Slave";
+ else
+ ss << "<b>" << state << "</b>";
+ ss << '\n';
+ ss << " info: " << info << '\n';
+ ss << " arbhost: " << arbHost << '\n';
+ ss << " remote: " << remoteHost << ':' << remotePort << '\n';
// ss << " date: " << date << '\n';
- return ss.str();
- }
+ return ss.str();
+ }
- ReplPair(const char *remoteEnd, const char *arbiter);
+ ReplPair(const char *remoteEnd, const char *arbiter);
- bool dominant(const string& myname) {
- if ( myname == remoteHost )
- return port > remotePort;
- return myname > remoteHost;
- }
+ bool dominant(const string& myname) {
+ if ( myname == remoteHost )
+ return port > remotePort;
+ return myname > remoteHost;
+ }
- void setMasterLocked( int n, const char *_comment = "" ) {
- dblock p;
- setMaster( n, _comment );
- }
+ void setMasterLocked( int n, const char *_comment = "" ) {
+ dblock p;
+ setMaster( n, _comment );
+ }
- void setMaster(int n, const char *_comment = "");
+ void setMaster(int n, const char *_comment = "");
- /* negotiate with our peer who is master */
- void negotiate(DBClientConnection *conn);
-
- /* peer unreachable, try our arbitrator */
- void arbitrate();
-
- virtual
- DBClientConnection *newClientConnection() const {
- return new DBClientConnection();
- }
-};
+ /* negotiate with our peer who is master */
+ void negotiate(DBClientConnection *conn);
-extern ReplPair *replPair;
+ /* peer unreachable, try our arbitrator */
+ void arbitrate();
-/* note we always return true for the "local" namespace.
+ virtual
+ DBClientConnection *newClientConnection() const {
+ return new DBClientConnection();
+ }
+ };
- we should not allow most operations when not the master
- also we report not master if we are "dead".
+ extern ReplPair *replPair;
- See also CmdIsMaster.
+ /* note we always return true for the "local" namespace.
-*/
-inline bool isMaster() {
- if ( allDead ) {
- return database->name == "local";
- }
+ we should not allow most operations when not the master
+ also we report not master if we are "dead".
- if ( replPair == 0 || replPair->state == ReplPair::State_Master )
- return true;
-
- return database->name == "local";
-}
-
-inline ReplPair::ReplPair(const char *remoteEnd, const char *arb) {
- state = -1;
- remote = remoteEnd;
- remotePort = DBPort;
- remoteHost = remoteEnd;
- const char *p = strchr(remoteEnd, ':');
- if ( p ) {
- remoteHost = string(remoteEnd, p-remoteEnd);
- remotePort = atoi(p+1);
- uassert("bad port #", remotePort > 0 && remotePort < 0x10000 );
- if ( remotePort == DBPort )
- remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
- }
+ See also CmdIsMaster.
- uassert("arbiter parm is missing, use '-' for none", arb);
- arbHost = arb;
- uassert("arbiter parm is empty", !arbHost.empty());
-}
-
-/* This is set to true if we have EVER been up to date -- this way a new pair member
- which is a replacement won't go online as master until we have initially fully synced.
- */
-class PairSync {
- int initialsynccomplete;
-public:
- PairSync() {
- initialsynccomplete = -1;
- }
+ */
+ inline bool isMaster() {
+ if ( allDead ) {
+ return database->name == "local";
+ }
- /* call before using the class. from dbmutex */
- void init() {
- BSONObj o;
- initialsynccomplete = 0;
- if ( getSingleton("local.pair.sync", o) )
- initialsynccomplete = 1;
- }
+ if ( replPair == 0 || replPair->state == ReplPair::State_Master )
+ return true;
- bool initialSyncCompleted() {
- return initialsynccomplete != 0;
+ return database->name == "local";
}
- void setInitialSyncCompleted() {
- BSONObj o = fromjson("{\"initialsynccomplete\":1}");
- putSingleton("local.pair.sync", o);
- initialsynccomplete = 1;
+ inline ReplPair::ReplPair(const char *remoteEnd, const char *arb) {
+ state = -1;
+ remote = remoteEnd;
+ remotePort = DBPort;
+ remoteHost = remoteEnd;
+ const char *p = strchr(remoteEnd, ':');
+ if ( p ) {
+ remoteHost = string(remoteEnd, p-remoteEnd);
+ remotePort = atoi(p+1);
+ uassert("bad port #", remotePort > 0 && remotePort < 0x10000 );
+ if ( remotePort == DBPort )
+ remote = remoteHost; // don't include ":27017" as it is default; in case ran in diff ways over time to normalizke the hostname format in sources collection
+ }
+
+ uassert("arbiter parm is missing, use '-' for none", arb);
+ arbHost = arb;
+ uassert("arbiter parm is empty", !arbHost.empty());
}
- void setInitialSyncCompletedLocking() {
- if ( initialsynccomplete == 1 )
- return;
- dblock lk;
- BSONObj o = fromjson("{\"initialsynccomplete\":1}");
- putSingleton("local.pair.sync", o);
- initialsynccomplete = 1;
- }
-};
+ /* This is set to true if we have EVER been up to date -- this way a new pair member
+ which is a replacement won't go online as master until we have initially fully synced.
+ */
+ class PairSync {
+ int initialsynccomplete;
+ public:
+ PairSync() {
+ initialsynccomplete = -1;
+ }
+
+ /* call before using the class. from dbmutex */
+ void init() {
+ BSONObj o;
+ initialsynccomplete = 0;
+ if ( getSingleton("local.pair.sync", o) )
+ initialsynccomplete = 1;
+ }
+
+ bool initialSyncCompleted() {
+ return initialsynccomplete != 0;
+ }
+
+ void setInitialSyncCompleted() {
+ BSONObj o = fromjson("{\"initialsynccomplete\":1}");
+ putSingleton("local.pair.sync", o);
+ initialsynccomplete = 1;
+ }
+
+ void setInitialSyncCompletedLocking() {
+ if ( initialsynccomplete == 1 )
+ return;
+ dblock lk;
+ BSONObj o = fromjson("{\"initialsynccomplete\":1}");
+ putSingleton("local.pair.sync", o);
+ initialsynccomplete = 1;
+ }
+ };
} // namespace mongo
diff --git a/db/scanandorder.h b/db/scanandorder.h
index 695672b7682..061c86ea9a4 100644
--- a/db/scanandorder.h
+++ b/db/scanandorder.h
@@ -22,120 +22,120 @@
namespace mongo {
-/* todo:
- _ handle compound keys with differing directions. we don't handle this yet: neither here nor in indexes i think!!!
- _ limit amount of data
-*/
-
-/* see also IndexDetails::getKeysFromObject, which needs some merging with this. */
-
-class KeyType : boost::noncopyable {
-public:
- BSONObj pattern; // e.g., { ts : -1 }
-public:
- KeyType(BSONObj _keyPattern) {
- pattern = _keyPattern;
- assert( !pattern.isEmpty() );
- }
-
- // returns the key value for o
- BSONObj getKeyFromObject(BSONObj o) {
- return o.extractFields(pattern);
- }
-};
+ /* todo:
+ _ handle compound keys with differing directions. we don't handle this yet: neither here nor in indexes i think!!!
+ _ limit amount of data
+ */
+
+ /* see also IndexDetails::getKeysFromObject, which needs some merging with this. */
+
+ class KeyType : boost::noncopyable {
+ public:
+ BSONObj pattern; // e.g., { ts : -1 }
+ public:
+ KeyType(BSONObj _keyPattern) {
+ pattern = _keyPattern;
+ assert( !pattern.isEmpty() );
+ }
-/* todo:
- _ respect limit
- _ check for excess mem usage
- _ response size limit from runquery; push it up a bit.
-*/
+ // returns the key value for o
+ BSONObj getKeyFromObject(BSONObj o) {
+ return o.extractFields(pattern);
+ }
+ };
+
+ /* todo:
+ _ respect limit
+ _ check for excess mem usage
+ _ response size limit from runquery; push it up a bit.
+ */
+
+ inline bool fillQueryResultFromObj(BufBuilder& b, set<string> *filter, BSONObj& js) {
+ if ( filter ) {
+ BSONObj x;
+ bool ok = x.addFields(js, *filter) > 0;
+ if ( ok )
+ b.append((void*) x.objdata(), x.objsize());
+ return ok;
+ }
-inline bool fillQueryResultFromObj(BufBuilder& b, set<string> *filter, BSONObj& js) {
- if ( filter ) {
- BSONObj x;
- bool ok = x.addFields(js, *filter) > 0;
- if ( ok )
- b.append((void*) x.objdata(), x.objsize());
- return ok;
+ b.append((void*) js.objdata(), js.objsize());
+ return true;
}
- b.append((void*) js.objdata(), js.objsize());
- return true;
-}
-
-typedef multimap<BSONObj,BSONObj,BSONObjCmp> BestMap;
-class ScanAndOrder {
- BestMap best; // key -> full object
- int startFrom;
- int limit; // max to send back.
- KeyType order;
- unsigned approxSize;
-
- void _add(BSONObj& k, BSONObj o) {
- best.insert(make_pair(k,o));
- }
+ typedef multimap<BSONObj,BSONObj,BSONObjCmp> BestMap;
+ class ScanAndOrder {
+ BestMap best; // key -> full object
+ int startFrom;
+ int limit; // max to send back.
+ KeyType order;
+ unsigned approxSize;
- void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i) {
- const BSONObj& worstBestKey = i->first;
- int c = worstBestKey.woCompare(k, order.pattern);
- if ( c > 0 ) {
- // k is better, 'upgrade'
- best.erase(i);
- _add(k, o);
+ void _add(BSONObj& k, BSONObj o) {
+ best.insert(make_pair(k,o));
}
- }
-public:
- ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
- best( BSONObjCmp( _order ) ),
- startFrom(_startFrom), order(_order) {
- limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
- approxSize = 0;
- }
+ void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i) {
+ const BSONObj& worstBestKey = i->first;
+ int c = worstBestKey.woCompare(k, order.pattern);
+ if ( c > 0 ) {
+ // k is better, 'upgrade'
+ best.erase(i);
+ _add(k, o);
+ }
+ }
- int size() const {
- return best.size();
- }
+ public:
+ ScanAndOrder(int _startFrom, int _limit, BSONObj _order) :
+ best( BSONObjCmp( _order ) ),
+ startFrom(_startFrom), order(_order) {
+ limit = _limit > 0 ? _limit + startFrom : 0x7fffffff;
+ approxSize = 0;
+ }
- void add(BSONObj o) {
- BSONObj k = order.getKeyFromObject(o);
- if ( (int) best.size() < limit ) {
- approxSize += k.objsize();
- uassert( "too much key data for sort() with no index", approxSize < 1 * 1024 * 1024 );
- _add(k, o);
- return;
+ int size() const {
+ return best.size();
}
- BestMap::iterator i;
- assert( best.end() != best.begin() );
- i = best.end();
- i--;
- _addIfBetter(k, o, i);
- }
- void _fill(BufBuilder& b, set<string> *filter, int& nout, BestMap::iterator begin, BestMap::iterator end) {
- int n = 0;
- int nFilled = 0;
- for ( BestMap::iterator i = begin; i != end; i++ ) {
- n++;
- if ( n <= startFrom )
- continue;
- BSONObj& o = i->second;
- if ( fillQueryResultFromObj(b, filter, o) ) {
- nFilled++;
- if ( nFilled >= limit )
- goto done;
- uassert( "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
+ void add(BSONObj o) {
+ BSONObj k = order.getKeyFromObject(o);
+ if ( (int) best.size() < limit ) {
+ approxSize += k.objsize();
+ uassert( "too much key data for sort() with no index", approxSize < 1 * 1024 * 1024 );
+ _add(k, o);
+ return;
}
+ BestMap::iterator i;
+ assert( best.end() != best.begin() );
+ i = best.end();
+ i--;
+ _addIfBetter(k, o, i);
}
+
+ void _fill(BufBuilder& b, set<string> *filter, int& nout, BestMap::iterator begin, BestMap::iterator end) {
+ int n = 0;
+ int nFilled = 0;
+ for ( BestMap::iterator i = begin; i != end; i++ ) {
+ n++;
+ if ( n <= startFrom )
+ continue;
+ BSONObj& o = i->second;
+ if ( fillQueryResultFromObj(b, filter, o) ) {
+ nFilled++;
+ if ( nFilled >= limit )
+ goto done;
+ uassert( "too much data for sort() with no index", b.len() < 4000000 ); // appserver limit
+ }
+ }
done:
- nout = nFilled;
- }
+ nout = nFilled;
+ }
- /* scanning complete. stick the query result in b for n objects. */
- void fill(BufBuilder& b, set<string> *filter, int& nout) {
- _fill(b, filter, nout, best.begin(), best.end());
- }
+ /* scanning complete. stick the query result in b for n objects. */
+ void fill(BufBuilder& b, set<string> *filter, int& nout) {
+ _fill(b, filter, nout, best.begin(), best.end());
+ }
-};
+ };
} // namespace mongo
diff --git a/db/security.cpp b/db/security.cpp
index b8da6553d8b..09df4a61955 100644
--- a/db/security.cpp
+++ b/db/security.cpp
@@ -6,38 +6,38 @@
namespace mongo {
-extern "C" int do_md5_test(void);
+ extern "C" int do_md5_test(void);
-boost::thread_specific_ptr<AuthenticationInfo> authInfo;
+ boost::thread_specific_ptr<AuthenticationInfo> authInfo;
-typedef unsigned long long nonce;
+ typedef unsigned long long nonce;
-struct Security {
- ifstream *devrandom;
+ struct Security {
+ ifstream *devrandom;
- nonce getNonce() {
- nonce n;
+ nonce getNonce() {
+ nonce n;
#if defined(__linux__)
- devrandom->read((char*)&n, sizeof(n));
- massert("devrandom failed", !devrandom->fail());
+ devrandom->read((char*)&n, sizeof(n));
+ massert("devrandom failed", !devrandom->fail());
#else
- n = ((unsigned long long)rand())<<32 | rand();
+ n = ((unsigned long long)rand())<<32 | rand();
#endif
- return n;
- }
+ return n;
+ }
- Security()
- {
+ Security()
+ {
#if defined(__linux__)
- devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
- massert( "can't open dev/urandom", devrandom->is_open() );
+ devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
+ massert( "can't open dev/urandom", devrandom->is_open() );
#endif
- assert( sizeof(nonce) == 8 );
+ assert( sizeof(nonce) == 8 );
- if ( do_md5_test() )
- massert("md5 unit test fails", false);
- }
-} security;
+ if ( do_md5_test() )
+ massert("md5 unit test fails", false);
+ }
+ } security;
} // namespace mongo
@@ -46,34 +46,34 @@ struct Security {
namespace mongo {
-class CmdGetNonce : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdGetNonce() : Command("getnonce") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- result.append("nonce", (double) security.getNonce());
- return true;
- }
-} cmdGetNonce;
-
-class CmdAuthenticate : public Command {
-public:
- virtual bool logTheOp() {
- return false;
- }
- virtual bool slaveOk() {
- return true;
- }
- CmdAuthenticate() : Command("authenticate") {}
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- return false;
- }
-} cmdAuthenticate;
+ class CmdGetNonce : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdGetNonce() : Command("getnonce") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.append("nonce", (double) security.getNonce());
+ return true;
+ }
+ } cmdGetNonce;
+
+ class CmdAuthenticate : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdAuthenticate() : Command("authenticate") {}
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ return false;
+ }
+ } cmdAuthenticate;
} // namespace mongo
diff --git a/db/security.h b/db/security.h
index d827fc87624..1bb4d1a5ca7 100644
--- a/db/security.h
+++ b/db/security.h
@@ -1,8 +1,8 @@
// security.h
/**
-* Copyright (C) 2009 10gen Inc.
-*
+* Copyright (C) 2009 10gen Inc.
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
@@ -22,14 +22,14 @@
namespace mongo {
-class AuthenticationInfo : boost::noncopyable {
-public:
- AuthenticationInfo() { }
- ~AuthenticationInfo() {
-// cout << "TEMP: auth info was cleaned up ********************************************" << endl;
- }
-};
+ class AuthenticationInfo : boost::noncopyable {
+ public:
+ AuthenticationInfo() { }
+ ~AuthenticationInfo() {
+// stdout() << "TEMP: auth info was cleaned up ********************************************" << endl;
+ }
+ };
-extern boost::thread_specific_ptr<AuthenticationInfo> authInfo;
+ extern boost::thread_specific_ptr<AuthenticationInfo> authInfo;
} // namespace mongo
diff --git a/db/storage.h b/db/storage.h
index 64aaf776e3d..9a0e8b55750 100644
--- a/db/storage.h
+++ b/db/storage.h
@@ -26,123 +26,123 @@ namespace mongo {
#pragma pack(push,1)
-class Record;
-class DeletedRecord;
-class Extent;
-class BtreeBucket;
-class BSONObj;
-class PhysicalDataFile;
-
-class DiskLoc {
- int fileNo; /* this will be volume, file #, etc. */
- int ofs;
-public:
- enum { NullOfs = -1, MaxFiles=4000 };
- int a() const {
- return fileNo;
- }
- DiskLoc(int a, int b) : fileNo(a), ofs(b) {
- assert(ofs!=0);
- }
- DiskLoc() {
- fileNo = -1;
- ofs = NullOfs;
- }
-
- DiskLoc(const DiskLoc& l) {
- fileNo=l.fileNo;
- ofs=l.ofs;
- }
-
- bool questionable() {
- return ofs < -1 ||
- fileNo < -1 ||
- fileNo > 20;
- }
-
- bool isNull() const {
- return ofs == NullOfs;
- }
- void Null() {
- fileNo = -1;
- ofs = NullOfs;
- }
- void assertOk() {
- assert(!isNull());
- }
- void setInvalid() {
- fileNo = -2;
- }
- bool isValid() const {
- return fileNo != -2;
- }
-
- string toString() const {
- if ( isNull() )
- return "null";
- stringstream ss;
- ss << hex << fileNo << ':' << ofs;
- return ss.str();
- }
-
- int& GETOFS() {
- return ofs;
- }
- int getOfs() const {
- return ofs;
- }
- void set(int a, int b) {
- fileNo=a;
- ofs=b;
- }
- void setOfs(int _fileNo, int _ofs) {
- fileNo = _fileNo;
- ofs = _ofs;
- }
-
- void inc(int amt) {
- assert( !isNull() );
- ofs += amt;
- }
-
- bool sameFile(DiskLoc b) {
- return fileNo == b.fileNo;
- }
-
- bool operator==(const DiskLoc& b) const {
- return fileNo==b.fileNo && ofs == b.ofs;
- }
- bool operator!=(const DiskLoc& b) const {
- return !(*this==b);
- }
- const DiskLoc& operator=(const DiskLoc& b) {
- fileNo=b.fileNo;
- ofs = b.ofs;
- assert(ofs!=0);
- return *this;
- }
- int compare(const DiskLoc& b) const {
- int x = fileNo - b.fileNo;
- if ( x )
- return x;
- return ofs - b.ofs;
- }
- bool operator<(const DiskLoc& b) const {
- return compare(b) < 0;
- }
-
- /* get the "thing" associated with this disk location.
- it is assumed the object is what it is -- you must asure that:
- think of this as an unchecked type cast.
- */
- BSONObj obj() const;
- Record* rec() const;
- DeletedRecord* drec() const;
- Extent* ext() const;
- BtreeBucket* btree() const;
-
- PhysicalDataFile& pdf() const;
-};
+ class Record;
+ class DeletedRecord;
+ class Extent;
+ class BtreeBucket;
+ class BSONObj;
+ class PhysicalDataFile;
+
+ class DiskLoc {
+ int fileNo; /* this will be volume, file #, etc. */
+ int ofs;
+ public:
+ enum { NullOfs = -1, MaxFiles=4000 };
+ int a() const {
+ return fileNo;
+ }
+ DiskLoc(int a, int b) : fileNo(a), ofs(b) {
+ assert(ofs!=0);
+ }
+ DiskLoc() {
+ fileNo = -1;
+ ofs = NullOfs;
+ }
+
+ DiskLoc(const DiskLoc& l) {
+ fileNo=l.fileNo;
+ ofs=l.ofs;
+ }
+
+ bool questionable() {
+ return ofs < -1 ||
+ fileNo < -1 ||
+ fileNo > 20;
+ }
+
+ bool isNull() const {
+ return ofs == NullOfs;
+ }
+ void Null() {
+ fileNo = -1;
+ ofs = NullOfs;
+ }
+ void assertOk() {
+ assert(!isNull());
+ }
+ void setInvalid() {
+ fileNo = -2;
+ }
+ bool isValid() const {
+ return fileNo != -2;
+ }
+
+ string toString() const {
+ if ( isNull() )
+ return "null";
+ stringstream ss;
+ ss << hex << fileNo << ':' << ofs;
+ return ss.str();
+ }
+
+ int& GETOFS() {
+ return ofs;
+ }
+ int getOfs() const {
+ return ofs;
+ }
+ void set(int a, int b) {
+ fileNo=a;
+ ofs=b;
+ }
+ void setOfs(int _fileNo, int _ofs) {
+ fileNo = _fileNo;
+ ofs = _ofs;
+ }
+
+ void inc(int amt) {
+ assert( !isNull() );
+ ofs += amt;
+ }
+
+ bool sameFile(DiskLoc b) {
+ return fileNo == b.fileNo;
+ }
+
+ bool operator==(const DiskLoc& b) const {
+ return fileNo==b.fileNo && ofs == b.ofs;
+ }
+ bool operator!=(const DiskLoc& b) const {
+ return !(*this==b);
+ }
+ const DiskLoc& operator=(const DiskLoc& b) {
+ fileNo=b.fileNo;
+ ofs = b.ofs;
+ assert(ofs!=0);
+ return *this;
+ }
+ int compare(const DiskLoc& b) const {
+ int x = fileNo - b.fileNo;
+ if ( x )
+ return x;
+ return ofs - b.ofs;
+ }
+ bool operator<(const DiskLoc& b) const {
+ return compare(b) < 0;
+ }
+
+ /* get the "thing" associated with this disk location.
+ it is assumed the object is what it is -- you must asure that:
+ think of this as an unchecked type cast.
+ */
+ BSONObj obj() const;
+ Record* rec() const;
+ DeletedRecord* drec() const;
+ Extent* ext() const;
+ BtreeBucket* btree() const;
+
+ PhysicalDataFile& pdf() const;
+ };
#pragma pack(pop)
diff --git a/db/tests.cpp b/db/tests.cpp
index a26487e52a7..9011414e3fb 100644
--- a/db/tests.cpp
+++ b/db/tests.cpp
@@ -24,44 +24,44 @@
namespace mongo {
-int test2_old() {
- cout << "test2" << endl;
- printStackTrace();
- if ( 1 )
- return 1;
+ int test2_old() {
+ out() << "test2" << endl;
+ printStackTrace();
+ if ( 1 )
+ return 1;
- MemoryMappedFile f;
+ MemoryMappedFile f;
- char *p = (char *) f.map("/tmp/test.dat", 64*1024*1024);
- char *start = p;
- char *end = p + 64*1024*1024-2;
- end[1] = 'z';
- int i;
- while ( p < end ) {
- *p++ = ' ';
- if ( ++i%64 == 0 ) {
- *p++ = '\n';
- *p++ = 'x';
+ char *p = (char *) f.map("/tmp/test.dat", 64*1024*1024);
+ char *start = p;
+ char *end = p + 64*1024*1024-2;
+ end[1] = 'z';
+ int i;
+ while ( p < end ) {
+ *p++ = ' ';
+ if ( ++i%64 == 0 ) {
+ *p++ = '\n';
+ *p++ = 'x';
+ }
}
- }
- *p = 'a';
+ *p = 'a';
- f.flush(true);
- cout << "done" << endl;
+ f.flush(true);
+ out() << "done" << endl;
- char *x = start + 32 * 1024 * 1024;
- char *y = start + 48 * 1024 * 1024;
- char *z = start + 62 * 1024 * 1024;
+ char *x = start + 32 * 1024 * 1024;
+ char *y = start + 48 * 1024 * 1024;
+ char *z = start + 62 * 1024 * 1024;
- strcpy(z, "zfoo");
- cout << "y" << endl;
- strcpy(y, "yfoo");
- strcpy(x, "xfoo");
- strcpy(start, "xfoo");
+ strcpy(z, "zfoo");
+ out() << "y" << endl;
+ strcpy(y, "yfoo");
+ strcpy(x, "xfoo");
+ strcpy(start, "xfoo");
- exit(3);
+ exit(3);
- return 1;
-}
+ return 1;
+ }
} // namespace mongo
diff --git a/dbgrid/dbgrid.cpp b/dbgrid/dbgrid.cpp
index 5d5cc6f4540..872416caac2 100644
--- a/dbgrid/dbgrid.cpp
+++ b/dbgrid/dbgrid.cpp
@@ -24,15 +24,15 @@
namespace mongo {
-bool dashDashInfer = false;
-vector<string> dashDashGridDb;
-int port = 27017;
-const char *curNs = "";
-Database *database = 0;
-
-string getDbContext() {
- return "?";
-}
+ bool dashDashInfer = false;
+ vector<string> dashDashGridDb;
+ int port = 27017;
+ const char *curNs = "";
+ Database *database = 0;
+
+ string getDbContext() {
+ return "?";
+ }
#if !defined(_WIN32)
@@ -42,93 +42,93 @@ string getDbContext() {
namespace mongo {
-void pipeSigHandler( int signal ) {
- psignal( signal, "Signal Received : ");
-}
+ void pipeSigHandler( int signal ) {
+ psignal( signal, "Signal Received : ");
+ }
#else
-void setupSignals() {}
+ void setupSignals() {}
#endif
-void usage() {
- cout << "Mongo dbgrid usage:\n\n";
- cout << " --port <portno>\n";
- cout << " --griddb <griddbname> [<griddbname>...]\n";
- cout << " --infer infer griddbname by replacing \"-n<n>\"\n";
- cout << " in our hostname with \"-grid\".\n";
- cout << endl;
-}
-
-MessagingPort *grab = 0;
-void processRequest(Message&, MessagingPort&);
-
-void _dbGridConnThread() {
- MessagingPort& dbMsgPort = *grab;
- grab = 0;
- Message m;
- while ( 1 ) {
- m.reset();
+ void usage() {
+ out() << "Mongo dbgrid usage:\n\n";
+ out() << " --port <portno>\n";
+ out() << " --griddb <griddbname> [<griddbname>...]\n";
+ out() << " --infer infer griddbname by replacing \"-n<n>\"\n";
+ out() << " in our hostname with \"-grid\".\n";
+ out() << endl;
+ }
- if ( !dbMsgPort.recv(m) ) {
- log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
- dbMsgPort.shutdown();
- break;
- }
+ MessagingPort *grab = 0;
+ void processRequest(Message&, MessagingPort&);
- processRequest(m, dbMsgPort);
- }
+ void _dbGridConnThread() {
+ MessagingPort& dbMsgPort = *grab;
+ grab = 0;
+ Message m;
+ while ( 1 ) {
+ m.reset();
-}
+ if ( !dbMsgPort.recv(m) ) {
+ log() << "end connection " << dbMsgPort.farEnd.toString() << endl;
+ dbMsgPort.shutdown();
+ break;
+ }
-void dbGridConnThread() {
- MessagingPort *p = grab;
- try {
- _dbGridConnThread();
- } catch ( ... ) {
- problem() << "uncaught exception in dbgridconnthread, closing connection" << endl;
- delete p;
- }
-}
+ processRequest(m, dbMsgPort);
+ }
-class DbGridListener : public Listener {
-public:
- DbGridListener(int p) : Listener(p) { }
- virtual void accepted(MessagingPort *mp) {
- assert( grab == 0 );
- grab = mp;
- boost::thread thr(dbGridConnThread);
- while ( grab )
- sleepmillis(1);
}
-};
-void start() {
- gridDatabase.init();
- /*
+ void dbGridConnThread() {
+ MessagingPort *p = grab;
try {
- cout << "TEMP" << endl;
- {
- ScopedDbConnection c("localhost");
- cout << c.conn().findOne("dwight.bar", emptyObj).toString() << endl;
- c.done();
- cout << "OK1" << endl;
- }
- {
- ScopedDbConnection c("localhost");
- c.conn().findOne("dwight.bar", emptyObj);
- c.done();
- cout << "OK1" << endl;
+ _dbGridConnThread();
+ } catch ( ... ) {
+ problem() << "uncaught exception in dbgridconnthread, closing connection" << endl;
+ delete p;
+ }
}
- cout << "OK2" << endl;
- } catch(...) {
- cout << "exception" << endl;
+
+ class DbGridListener : public Listener {
+ public:
+ DbGridListener(int p) : Listener(p) { }
+ virtual void accepted(MessagingPort *mp) {
+ assert( grab == 0 );
+ grab = mp;
+ boost::thread thr(dbGridConnThread);
+ while ( grab )
+ sleepmillis(1);
+ }
+ };
+
+ void start() {
+ gridDatabase.init();
+ /*
+ try {
+ out() << "TEMP" << endl;
+ {
+ ScopedDbConnection c("localhost");
+ out() << c.conn().findOne("dwight.bar", emptyObj).toString() << endl;
+ c.done();
+ out() << "OK1" << endl;
}
- */
+ {
+ ScopedDbConnection c("localhost");
+ c.conn().findOne("dwight.bar", emptyObj);
+ c.done();
+ out() << "OK1" << endl;
+ }
+ out() << "OK2" << endl;
+ } catch(...) {
+ out() << "exception" << endl;
+ }
+ */
- log() << "waiting for connections on port " << port << "..." << endl;
- DbGridListener l(port);
- l.listen();
-}
+ log() << "waiting for connections on port " << port << "..." << endl;
+ DbGridListener l(port);
+ l.listen();
+ }
} // namespace mongo
@@ -161,11 +161,11 @@ int main(int argc, char* argv[], char *envp[] ) {
n++;
}
if ( n == 0 ) {
- cout << "error: no args for --griddb\n";
+ out() << "error: no args for --griddb\n";
return 4;
}
if ( n > 2 ) {
- cout << "error: --griddb does not support more than 2 parameters yet\n";
+ out() << "error: --griddb does not support more than 2 parameters yet\n";
return 5;
}
}
@@ -192,9 +192,9 @@ int main(int argc, char* argv[], char *envp[] ) {
namespace mongo {
#undef exit
-void dbexit(int rc, const char *why) {
- log() << "dbexit: " << why << " rc:" << rc << endl;
- exit(rc);
-}
+ void dbexit(int rc, const char *why) {
+ log() << "dbexit: " << why << " rc:" << rc << endl;
+ exit(rc);
+ }
} // namespace mongo
diff --git a/dbgrid/dbgrid_commands.cpp b/dbgrid/dbgrid_commands.cpp
index fa62f299f1b..1cd3fd7f026 100644
--- a/dbgrid/dbgrid_commands.cpp
+++ b/dbgrid/dbgrid_commands.cpp
@@ -38,49 +38,49 @@
namespace mongo {
-extern string ourHostname;
+ extern string ourHostname;
-namespace dbgrid_cmds {
+ namespace dbgrid_cmds {
-class NetStatCmd : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- NetStatCmd() : Command("netstat") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- result.append("griddb", gridDatabase.toString());
- result.append("isdbgrid", 1);
- return true;
- }
-} netstat;
+ class NetStatCmd : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ NetStatCmd() : Command("netstat") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ result.append("griddb", gridDatabase.toString());
+ result.append("isdbgrid", 1);
+ return true;
+ }
+ } netstat;
-class IsDbGridCmd : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- IsDbGridCmd() : Command("isdbgrid") { }
- bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- result.append("isdbgrid", 1);
- result.append("hostname", ourHostname);
- return true;
- }
-} isdbgrid;
+ class IsDbGridCmd : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ IsDbGridCmd() : Command("isdbgrid") { }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ result.append("isdbgrid", 1);
+ result.append("hostname", ourHostname);
+ return true;
+ }
+ } isdbgrid;
-class CmdIsMaster : public Command {
-public:
- virtual bool slaveOk() {
- return true;
- }
- CmdIsMaster() : Command("ismaster") { }
- virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
- result.append("ismaster", 0.0);
- result.append("msg", "isdbgrid");
- return true;
- }
-} ismaster;
+ class CmdIsMaster : public Command {
+ public:
+ virtual bool slaveOk() {
+ return true;
+ }
+ CmdIsMaster() : Command("ismaster") { }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
+ result.append("ismaster", 0.0);
+ result.append("msg", "isdbgrid");
+ return true;
+ }
+ } ismaster;
-}
+ }
} // namespace mongo
diff --git a/dbgrid/gridconfig.cpp b/dbgrid/gridconfig.cpp
index 5f6e90121e1..85ac97213d9 100644
--- a/dbgrid/gridconfig.cpp
+++ b/dbgrid/gridconfig.cpp
@@ -26,44 +26,44 @@
namespace mongo {
-/* --- Machine --- */
+ /* --- Machine --- */
-map<string, Machine*> Machine::machines;
+ map<string, Machine*> Machine::machines;
-/* --- GridConfig --- */
+ /* --- GridConfig --- */
//static boost::mutex loc_mutex;
-Grid grid;
+ Grid grid;
-ClientConfig* GridConfig::getClientConfig(string database) {
- ClientConfig*& cc = databases[database];
- if ( cc == 0 ) {
- cc = new ClientConfig();
- if ( !cc->loadByName(database.c_str()) ) {
- log() << "couldn't find database " << database << " in grid db" << endl;
- // note here that cc->primary == 0.
+ ClientConfig* GridConfig::getClientConfig(string database) {
+ ClientConfig*& cc = databases[database];
+ if ( cc == 0 ) {
+ cc = new ClientConfig();
+ if ( !cc->loadByName(database.c_str()) ) {
+ log() << "couldn't find database " << database << " in grid db" << endl;
+ // note here that cc->primary == 0.
+ }
}
+ return cc;
}
- return cc;
-}
-/* --- Grid --- */
+ /* --- Grid --- */
-Machine* Grid::owner(const char *ns, BSONObj& objOrKey) {
- ClientConfig *cc = gc.getClientConfig( nsToClient(ns) );
- if ( cc == 0 ) {
- throw UserAssertionException(
- string("dbgrid: no config for db for ") + ns);
- }
+ Machine* Grid::owner(const char *ns, BSONObj& objOrKey) {
+ ClientConfig *cc = gc.getClientConfig( nsToClient(ns) );
+ if ( cc == 0 ) {
+ throw UserAssertionException(
+ string("dbgrid: no config for db for ") + ns);
+ }
- if ( !cc->partitioned ) {
- if ( !cc->primary )
- throw UserAssertionException(string("dbgrid: no primary for ")+ns);
- return cc->primary;
- }
+ if ( !cc->partitioned ) {
+ if ( !cc->primary )
+ throw UserAssertionException(string("dbgrid: no primary for ")+ns);
+ return cc->primary;
+ }
- uassert("dbgrid: not implemented 100", false);
- return 0;
-}
+ uassert("dbgrid: not implemented 100", false);
+ return 0;
+ }
} // namespace mongo
diff --git a/dbgrid/gridconfig.h b/dbgrid/gridconfig.h
index 54fce505517..cec04f155ff 100644
--- a/dbgrid/gridconfig.h
+++ b/dbgrid/gridconfig.h
@@ -29,81 +29,81 @@
namespace mongo {
-/* Machine is the concept of a host that runs the db process.
-*/
-class Machine {
- static map<string, Machine*> machines;
- string name;
-public:
- string getName() const {
- return name;
- }
-
- Machine(string _name) : name(_name) { }
-
- enum {
- Port = 27018 /* default port # for dbs that are downstream of a dbgrid */
+ /* Machine is the concept of a host that runs the db process.
+ */
+ class Machine {
+ static map<string, Machine*> machines;
+ string name;
+ public:
+ string getName() const {
+ return name;
+ }
+
+ Machine(string _name) : name(_name) { }
+
+ enum {
+ Port = 27018 /* default port # for dbs that are downstream of a dbgrid */
+ };
+
+ static Machine* get(string name) {
+ map<string,Machine*>::iterator i = machines.find(name);
+ if ( i != machines.end() )
+ return i->second;
+ return machines[name] = new Machine(name);
+ }
};
- static Machine* get(string name) {
- map<string,Machine*>::iterator i = machines.find(name);
- if ( i != machines.end() )
- return i->second;
- return machines[name] = new Machine(name);
- }
-};
-
//typedef map<string,Machine*> ObjLocs;
-/* top level grid configuration for an entire database */
-class ClientConfig : public Model {
-public:
- string name; // e.g. "alleyinsider"
- Machine *primary;
- bool partitioned;
-
- ClientConfig() : primary(0), partitioned(false) { }
-
- virtual const char * getNS() {
- return "grid.db.database";
- }
- virtual void serialize(BSONObjBuilder& to) {
- to.append("name", name);
- to.appendBool("partitioned", partitioned);
- if ( primary )
- to.append("primary", primary->getName());
- }
- virtual void unserialize(BSONObj& from) {
- name = from.getStringField("name");
- partitioned = from.getBoolField("partitioned");
- string p = from.getStringField("primary");
- if ( !p.empty() )
- primary = Machine::get(p);
- }
-
- bool loadByName(const char *nm) {
- BSONObjBuilder b;
- b.append("name", nm);
- BSONObj q = b.done();
- return load(q);
- }
-};
-
-class GridConfig {
- map<string,ClientConfig*> databases;
-public:
- ClientConfig* getClientConfig(string database);
-};
-
-class Grid {
- GridConfig gc;
-public:
- /* return which machine "owns" the object in question -- ie which partition
- we should go to.
- */
- Machine* owner(const char *ns, BSONObj& objOrKey);
-};
-
-extern Grid grid;
+ /* top level grid configuration for an entire database */
+ class ClientConfig : public Model {
+ public:
+ string name; // e.g. "alleyinsider"
+ Machine *primary;
+ bool partitioned;
+
+ ClientConfig() : primary(0), partitioned(false) { }
+
+ virtual const char * getNS() {
+ return "grid.db.database";
+ }
+ virtual void serialize(BSONObjBuilder& to) {
+ to.append("name", name);
+ to.appendBool("partitioned", partitioned);
+ if ( primary )
+ to.append("primary", primary->getName());
+ }
+ virtual void unserialize(BSONObj& from) {
+ name = from.getStringField("name");
+ partitioned = from.getBoolField("partitioned");
+ string p = from.getStringField("primary");
+ if ( !p.empty() )
+ primary = Machine::get(p);
+ }
+
+ bool loadByName(const char *nm) {
+ BSONObjBuilder b;
+ b.append("name", nm);
+ BSONObj q = b.done();
+ return load(q);
+ }
+ };
+
+ class GridConfig {
+ map<string,ClientConfig*> databases;
+ public:
+ ClientConfig* getClientConfig(string database);
+ };
+
+ class Grid {
+ GridConfig gc;
+ public:
+ /* return which machine "owns" the object in question -- ie which partition
+ we should go to.
+ */
+ Machine* owner(const char *ns, BSONObj& objOrKey);
+ };
+
+ extern Grid grid;
} // namespace mongo
diff --git a/dbgrid/griddatabase.cpp b/dbgrid/griddatabase.cpp
index d038a5f8691..0dacb180ae1 100644
--- a/dbgrid/griddatabase.cpp
+++ b/dbgrid/griddatabase.cpp
@@ -27,125 +27,125 @@
namespace mongo {
-static boost::mutex griddb_mutex;
-GridDatabase gridDatabase;
-DBClientWithCommands *Model::globalConn = gridDatabase.conn;
-string ourHostname;
-extern vector<string> dashDashGridDb;
-extern bool dashDashInfer;
-
-GridDatabase::GridDatabase() {
- conn = 0;
-}
-
-GridDatabase::~GridDatabase() {
- delete conn;
- conn = 0; // defensive
-}
-
-void GridDatabase::init() {
- string hn = getHostName();
- if ( hn.empty() ) {
- sleepsecs(5);
- exit(16);
+ static boost::mutex griddb_mutex;
+ GridDatabase gridDatabase;
+ DBClientWithCommands *Model::globalConn = gridDatabase.conn;
+ string ourHostname;
+ extern vector<string> dashDashGridDb;
+ extern bool dashDashInfer;
+
+ GridDatabase::GridDatabase() {
+ conn = 0;
}
- ourHostname = hn;
- char buf[256];
- strcpy(buf, hn.c_str());
+ GridDatabase::~GridDatabase() {
+ delete conn;
+ conn = 0; // defensive
+ }
- if ( dashDashGridDb.empty() ) {
- char *p = strchr(buf, '-');
- if ( p )
- p = strchr(p+1, '-');
- if ( !p ) {
- log() << "can't parse server's hostname, expect <city>-<locname>-n<nodenum>, got: " << buf << endl;
+ void GridDatabase::init() {
+ string hn = getHostName();
+ if ( hn.empty() ) {
sleepsecs(5);
- exit(17);
+ exit(16);
+ }
+ ourHostname = hn;
+
+ char buf[256];
+ strcpy(buf, hn.c_str());
+
+ if ( dashDashGridDb.empty() ) {
+ char *p = strchr(buf, '-');
+ if ( p )
+ p = strchr(p+1, '-');
+ if ( !p ) {
+ log() << "can't parse server's hostname, expect <city>-<locname>-n<nodenum>, got: " << buf << endl;
+ sleepsecs(5);
+ exit(17);
+ }
+ p[1] = 0;
}
- p[1] = 0;
- }
- string left, right; // with :port#
- string hostLeft, hostRight;
+ string left, right; // with :port#
+ string hostLeft, hostRight;
- if ( dashDashGridDb.empty() ) {
- if ( !dashDashInfer ) {
- cout << "--griddb or --infer required\n";
- exit(7);
- }
- stringstream sl, sr;
- sl << buf << "grid-l";
- sr << buf << "grid-r";
- hostLeft = sl.str();
- hostRight = sr.str();
- sl << ":" << Port;
- sr << ":" << Port;
- left = sl.str();
- right = sr.str();
- }
- else {
- stringstream sl, sr;
- sl << dashDashGridDb[0];
- hostLeft = sl.str();
- sl << ":" << Port;
- left = sl.str();
-
- if ( dashDashGridDb.size() > 1 ) {
- sr << dashDashGridDb[1];
+ if ( dashDashGridDb.empty() ) {
+ if ( !dashDashInfer ) {
+ out() << "--griddb or --infer required\n";
+ exit(7);
+ }
+ stringstream sl, sr;
+ sl << buf << "grid-l";
+ sr << buf << "grid-r";
+ hostLeft = sl.str();
hostRight = sr.str();
+ sl << ":" << Port;
sr << ":" << Port;
+ left = sl.str();
right = sr.str();
}
- }
+ else {
+ stringstream sl, sr;
+ sl << dashDashGridDb[0];
+ hostLeft = sl.str();
+ sl << ":" << Port;
+ left = sl.str();
+
+ if ( dashDashGridDb.size() > 1 ) {
+ sr << dashDashGridDb[1];
+ hostRight = sr.str();
+ sr << ":" << Port;
+ right = sr.str();
+ }
+ }
- if ( !isdigit(left[0]) )
- /* this loop is not really necessary, we we print out if we can't connect
- but it gives much prettier error msg this way if the config is totally
- wrong so worthwhile.
- */
- while ( 1 ) {
- if ( hostbyname(hostLeft.c_str()).empty() ) {
- log() << "can't resolve DNS for " << hostLeft << ", sleeping and then trying again" << endl;
- sleepsecs(15);
- continue;
- }
- if ( !hostRight.empty() && hostbyname(hostRight.c_str()).empty() ) {
- log() << "can't resolve DNS for " << hostRight << ", sleeping and then trying again" << endl;
- sleepsecs(15);
- continue;
+ if ( !isdigit(left[0]) )
+ /* this loop is not really necessary, we we print out if we can't connect
+ but it gives much prettier error msg this way if the config is totally
+ wrong so worthwhile.
+ */
+ while ( 1 ) {
+ if ( hostbyname(hostLeft.c_str()).empty() ) {
+ log() << "can't resolve DNS for " << hostLeft << ", sleeping and then trying again" << endl;
+ sleepsecs(15);
+ continue;
+ }
+ if ( !hostRight.empty() && hostbyname(hostRight.c_str()).empty() ) {
+ log() << "can't resolve DNS for " << hostRight << ", sleeping and then trying again" << endl;
+ sleepsecs(15);
+ continue;
+ }
+ break;
}
- break;
- }
- Logstream& l = log();
- l << "connecting to griddb ";
-
- bool ok;
- if ( !hostRight.empty() ) {
- // connect in paired mode
- l << "L:" << left << " R:" << right << "...";
- l.flush();
- DBClientPaired *dbp = new DBClientPaired();
- conn = dbp;
- ok = dbp->connect(left.c_str(),right.c_str());
- }
- else {
- l << left << "...";
- l.flush();
- DBClientConnection *dcc = new DBClientConnection(/*autoreconnect=*/true);
- conn = dcc;
- string errmsg;
- ok = dcc->connect(left.c_str(), errmsg);
- }
+ Logstream& l = log();
+ l << "connecting to griddb ";
+
+ bool ok;
+ if ( !hostRight.empty() ) {
+ // connect in paired mode
+ l << "L:" << left << " R:" << right << "...";
+ l.flush();
+ DBClientPaired *dbp = new DBClientPaired();
+ conn = dbp;
+ ok = dbp->connect(left.c_str(),right.c_str());
+ }
+ else {
+ l << left << "...";
+ l.flush();
+ DBClientConnection *dcc = new DBClientConnection(/*autoreconnect=*/true);
+ conn = dcc;
+ string errmsg;
+ ok = dcc->connect(left.c_str(), errmsg);
+ }
- if ( !ok ) {
- l << '\n';
- log() << " griddb connect failure at startup (will retry)" << endl;
- } else {
- l << "ok" << endl;
+ if ( !ok ) {
+ l << '\n';
+ log() << " griddb connect failure at startup (will retry)" << endl;
+ } else {
+ l << "ok" << endl;
+ }
}
-}
} // namespace mongo
diff --git a/dbgrid/griddatabase.h b/dbgrid/griddatabase.h
index 5b274234d77..2c739d530fd 100644
--- a/dbgrid/griddatabase.h
+++ b/dbgrid/griddatabase.h
@@ -27,21 +27,21 @@
namespace mongo {
-class GridDatabase {
-public:
- DBClientWithCommands *conn;
+ class GridDatabase {
+ public:
+ DBClientWithCommands *conn;
// DBClientPaired conn;
- enum { Port = 27016 }; /* standard port # for a grid db */
- GridDatabase();
- ~GridDatabase();
- string toString() {
- return conn->toString();
- }
-
- /* call at startup, this will initiate connection to the grid db */
- void init();
-};
-extern GridDatabase gridDatabase;
+ enum { Port = 27016 }; /* standard port # for a grid db */
+ GridDatabase();
+ ~GridDatabase();
+ string toString() {
+ return conn->toString();
+ }
+
+ /* call at startup, this will initiate connection to the grid db */
+ void init();
+ };
+ extern GridDatabase gridDatabase;
} // namespace mongo
diff --git a/dbgrid/request.cpp b/dbgrid/request.cpp
index 3b13ec2a59e..eb7af085c59 100644
--- a/dbgrid/request.cpp
+++ b/dbgrid/request.cpp
@@ -43,93 +43,93 @@
namespace mongo {
-const char *tempHost = "localhost:27018";
-
-void getMore(Message& m, MessagingPort& p) {
- DbMessage d(m);
- const char *ns = d.getns();
-
- cout << "TEMP: getmore: " << ns << endl;
-
- ScopedDbConnection dbcon(tempHost);
- DBClientConnection &c = dbcon.conn();
-
- Message response;
- bool ok = c.port().call(m, response);
- uassert("dbgrid: getmore: error calling db", ok);
- p.reply(m, response, m.data->id);
-
- dbcon.done();
-}
-
-/* got query operation from a database */
-void queryOp(Message& m, MessagingPort& p) {
- DbMessage d(m);
- QueryMessage q(d);
- bool lateAssert = false;
- try {
- if ( q.ntoreturn == -1 && strstr(q.ns, ".$cmd") ) {
- BSONObjBuilder builder;
- cout << q.query.toString() << endl;
- bool ok = runCommandAgainstRegistered(q.ns, q.query, builder);
- if ( ok ) {
- BSONObj x = builder.done();
- replyToQuery(0, p, m, x);
- return;
- }
- }
+ const char *tempHost = "localhost:27018";
+
+ void getMore(Message& m, MessagingPort& p) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+
+ out() << "TEMP: getmore: " << ns << endl;
ScopedDbConnection dbcon(tempHost);
DBClientConnection &c = dbcon.conn();
+
Message response;
bool ok = c.port().call(m, response);
- uassert("dbgrid: error calling db", ok);
- lateAssert = true;
+ uassert("dbgrid: getmore: error calling db", ok);
p.reply(m, response, m.data->id);
+
dbcon.done();
}
- catch ( AssertionException& e ) {
- assert( !lateAssert );
- BSONObjBuilder err;
- err.append("$err", string("dbgrid ") + (e.msg.empty() ? "dbgrid assertion during query" : e.msg));
- BSONObj errObj = err.done();
- replyToQuery(QueryResult::ResultFlag_ErrSet, p, m, errObj);
- return;
+
+ /* got query operation from a database */
+ void queryOp(Message& m, MessagingPort& p) {
+ DbMessage d(m);
+ QueryMessage q(d);
+ bool lateAssert = false;
+ try {
+ if ( q.ntoreturn == -1 && strstr(q.ns, ".$cmd") ) {
+ BSONObjBuilder builder;
+ out() << q.query.toString() << endl;
+ bool ok = runCommandAgainstRegistered(q.ns, q.query, builder);
+ if ( ok ) {
+ BSONObj x = builder.done();
+ replyToQuery(0, p, m, x);
+ return;
+ }
+ }
+
+ ScopedDbConnection dbcon(tempHost);
+ DBClientConnection &c = dbcon.conn();
+ Message response;
+ bool ok = c.port().call(m, response);
+ uassert("dbgrid: error calling db", ok);
+ lateAssert = true;
+ p.reply(m, response, m.data->id);
+ dbcon.done();
+ }
+ catch ( AssertionException& e ) {
+ assert( !lateAssert );
+ BSONObjBuilder err;
+ err.append("$err", string("dbgrid ") + (e.msg.empty() ? "dbgrid assertion during query" : e.msg));
+ BSONObj errObj = err.done();
+ replyToQuery(QueryResult::ResultFlag_ErrSet, p, m, errObj);
+ return;
+ }
}
-}
-void writeOp(int op, Message& m, MessagingPort& p) {
- DbMessage d(m);
- const char *ns = d.getns();
- assert( *ns );
+ void writeOp(int op, Message& m, MessagingPort& p) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ assert( *ns );
- ScopedDbConnection dbcon(tempHost);
- DBClientConnection &c = dbcon.conn();
+ ScopedDbConnection dbcon(tempHost);
+ DBClientConnection &c = dbcon.conn();
- c.port().say(m);
+ c.port().say(m);
- dbcon.done();
- /*
- while( d.moreJSObjs() ) {
- BSONObj js = d.nextJsObj();
- const char *ns = d.getns();
- assert(*ns);
- }
- */
-}
-
-void processRequest(Message& m, MessagingPort& p) {
- int op = m.data->operation();
- assert( op > dbMsg );
- if ( op == dbQuery ) {
- queryOp(m,p);
- }
- else if ( op == dbGetMore ) {
- getMore(m,p);
+ dbcon.done();
+ /*
+ while( d.moreJSObjs() ) {
+ BSONObj js = d.nextJsObj();
+ const char *ns = d.getns();
+ assert(*ns);
+ }
+ */
}
- else {
- writeOp(op, m, p);
+
+ void processRequest(Message& m, MessagingPort& p) {
+ int op = m.data->operation();
+ assert( op > dbMsg );
+ if ( op == dbQuery ) {
+ queryOp(m,p);
+ }
+ else if ( op == dbGetMore ) {
+ getMore(m,p);
+ }
+ else {
+ writeOp(op, m, p);
+ }
}
-}
} // namespace mongo
diff --git a/dbgrid/shard.cpp b/dbgrid/shard.cpp
index 97aed9e4b20..36ae66bb384 100644
--- a/dbgrid/shard.cpp
+++ b/dbgrid/shard.cpp
@@ -22,17 +22,17 @@
namespace mongo {
-DBClientWithCommands* Shard::conn() {
- return gridDatabase.conn;
-}
+ DBClientWithCommands* Shard::conn() {
+ return gridDatabase.conn;
+ }
-void Shard::serialize(BSONObjBuilder& to) {
- to.append("name", name);
-}
+ void Shard::serialize(BSONObjBuilder& to) {
+ to.append("name", name);
+ }
-void Shard::unserialize(BSONObj& from) {
- name = from.getStringField("name");
- uassert("bad grid.shards.name", !name.empty());
-}
+ void Shard::unserialize(BSONObj& from) {
+ name = from.getStringField("name");
+ uassert("bad grid.shards.name", !name.empty());
+ }
} // namespace mongo
diff --git a/dbgrid/shard.h b/dbgrid/shard.h
index 00467322452..f4967be2638 100644
--- a/dbgrid/shard.h
+++ b/dbgrid/shard.h
@@ -27,20 +27,20 @@
namespace mongo {
-/* grid.shards
- { name: 'hostname'
- }
-*/
-class Shard : public Model {
-public:
- string name; // hostname (less -l, -r)
-
- virtual const char * getNS() {
- return "grid.shards";
- }
- virtual void serialize(BSONObjBuilder& to);
- virtual void unserialize(BSONObj& from);
- virtual DBClientWithCommands* conn();
-};
+ /* grid.shards
+ { name: 'hostname'
+ }
+ */
+ class Shard : public Model {
+ public:
+ string name; // hostname (less -l, -r)
+
+ virtual const char * getNS() {
+ return "grid.shards";
+ }
+ virtual void serialize(BSONObjBuilder& to);
+ virtual void unserialize(BSONObj& from);
+ virtual DBClientWithCommands* conn();
+ };
} // namespace mongo
diff --git a/dbtests/btreetests.cpp b/dbtests/btreetests.cpp
index f7bca1953e1..f0d02d66f45 100644
--- a/dbtests/btreetests.cpp
+++ b/dbtests/btreetests.cpp
@@ -24,206 +24,206 @@
namespace BtreeTests {
-class Base {
-public:
- Base() {
- {
- dblock lk;
- setClient( ns() );
- }
- BSONObjBuilder builder;
- builder.append( "ns", ns() );
- builder.append( "name", "testIndex" );
- BSONObj bobj = builder.done();
- idx_.info =
- theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
- idx_.head = BtreeBucket::addHead( idx_ );
- }
- ~Base() {
- // FIXME cleanup all btree buckets.
- theDataFileMgr.deleteRecord( ns(), idx_.info.rec(), idx_.info );
- ASSERT( theDataFileMgr.findAll( ns() )->eof() );
- }
-protected:
- BtreeBucket* bt() const {
- return idx_.head.btree();
- }
- DiskLoc dl() const {
- return idx_.head;
- }
- IndexDetails& id() {
- return idx_;
- }
- static const char* ns() {
- return "sys.unittest.btreetests";
- }
- // dummy, valid record loc
- static DiskLoc recordLoc() {
- return DiskLoc( 0, 2 );
- }
- void checkValid( int nKeys ) const {
- ASSERT( bt() );
- ASSERT( bt()->isHead() );
- bt()->assertValid( order(), true );
- ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order() ) );
- }
- void insert( BSONObj &key ) {
- bt()->insert( dl(), recordLoc(), key, order(), true, id(), true );
- }
- void unindex( BSONObj &key ) {
- bt()->unindex( dl(), id(), key, recordLoc() );
- }
- static BSONObj simpleKey( char c, int n = 1 ) {
- BSONObjBuilder builder;
- string val( n, c );
- builder.append( "a", val );
- return builder.doneAndDecouple();
- }
- void locate( BSONObj &key, int expectedPos,
- bool expectedFound, const DiskLoc &expectedLocation,
- int direction = 1 ) {
- int pos;
- bool found;
- DiskLoc location =
- bt()->locate( dl(), key, order(), pos, found, recordLoc(), direction );
- ASSERT_EQUALS( expectedFound, found );
- ASSERT( location == expectedLocation );
- ASSERT_EQUALS( expectedPos, pos );
- }
- BSONObj order() const {
- return idx_.keyPattern();
- }
-private:
- IndexDetails idx_;
-};
-
-class Create : public Base {
-public:
- void run() {
- checkValid( 0 );
- }
-};
-
-class SimpleInsertDelete : public Base {
-public:
- void run() {
- BSONObj key = simpleKey( 'z' );
- insert( key );
-
- checkValid( 1 );
- locate( key, 0, true, dl() );
-
- unindex( key );
-
- checkValid( 0 );
- locate( key, 0, false, DiskLoc() );
- }
-};
-
-class SplitUnevenBucketBase : public Base {
-public:
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- BSONObj shortKey = simpleKey( shortToken( i ), 1 );
- insert( shortKey );
- BSONObj longKey = simpleKey( longToken( i ), 800 );
- insert( longKey );
- }
- checkValid( 20 );
- }
-protected:
- virtual char shortToken( int i ) const = 0;
- virtual char longToken( int i ) const = 0;
- static char leftToken( int i ) {
- return 'a' + i;
- }
- static char rightToken( int i ) {
- return 'z' - i;
- }
-};
-
-class SplitRightHeavyBucket : public SplitUnevenBucketBase {
-private:
- virtual char shortToken( int i ) const {
- return leftToken( i );
- }
- virtual char longToken( int i ) const {
- return rightToken( i );
- }
-};
-
-class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
-private:
- virtual char shortToken( int i ) const {
- return rightToken( i );
- }
- virtual char longToken( int i ) const {
- return leftToken( i );
- }
-};
-
-class MissingLocate : public Base {
-public:
- void run() {
- for ( int i = 0; i < 3; ++i ) {
- BSONObj k = simpleKey( 'b' + 2 * i );
- insert( k );
- }
-
- locate( 1, 'a', 'b', dl() );
- locate( 1, 'c', 'd', dl() );
- locate( 1, 'e', 'f', dl() );
- locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
-
- // old behavior
- // locate( -1, 'a', 'b', dl() );
- // locate( -1, 'c', 'd', dl() );
- // locate( -1, 'e', 'f', dl() );
- // locate( -1, 'g', 'f', dl() );
-
- locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
- locate( -1, 'c', 'b', dl() );
- locate( -1, 'e', 'd', dl() );
- locate( -1, 'g', 'f', dl() );
- }
-private:
- void locate( int direction, char token, char expectedMatch,
- DiskLoc expectedLocation ) {
- BSONObj k = simpleKey( token );
- int expectedPos = ( expectedMatch - 'b' ) / 2;
- Base::locate( k, expectedPos, false, expectedLocation, direction );
- }
-};
-
-class MissingLocateMultiBucket : public Base {
-public:
- void run() {
- for ( int i = 0; i < 10; ++i ) {
- BSONObj k = key( 'b' + 2 * i );
- insert( k );
- }
- BSONObj straddle = key( 'i' );
- locate( straddle, 0, false, dl(), 1 );
- straddle = key( 'k' );
- locate( straddle, 0, false, dl(), -1 );
- }
-private:
- BSONObj key( char c ) {
- return simpleKey( c, 800 );
- }
-};
-
-class All : public UnitTest::Suite {
-public:
- All() {
- add< Create >();
- add< SimpleInsertDelete >();
- add< SplitRightHeavyBucket >();
- add< SplitLeftHeavyBucket >();
- add< MissingLocate >();
- add< MissingLocateMultiBucket >();
- }
-};
+ class Base {
+ public:
+ Base() {
+ {
+ dblock lk;
+ setClient( ns() );
+ }
+ BSONObjBuilder builder;
+ builder.append( "ns", ns() );
+ builder.append( "name", "testIndex" );
+ BSONObj bobj = builder.done();
+ idx_.info =
+ theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
+ idx_.head = BtreeBucket::addHead( idx_ );
+ }
+ ~Base() {
+ // FIXME cleanup all btree buckets.
+ theDataFileMgr.deleteRecord( ns(), idx_.info.rec(), idx_.info );
+ ASSERT( theDataFileMgr.findAll( ns() )->eof() );
+ }
+ protected:
+ BtreeBucket* bt() const {
+ return idx_.head.btree();
+ }
+ DiskLoc dl() const {
+ return idx_.head;
+ }
+ IndexDetails& id() {
+ return idx_;
+ }
+ static const char* ns() {
+ return "sys.unittest.btreetests";
+ }
+ // dummy, valid record loc
+ static DiskLoc recordLoc() {
+ return DiskLoc( 0, 2 );
+ }
+ void checkValid( int nKeys ) const {
+ ASSERT( bt() );
+ ASSERT( bt()->isHead() );
+ bt()->assertValid( order(), true );
+ ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order() ) );
+ }
+ void insert( BSONObj &key ) {
+ bt()->insert( dl(), recordLoc(), key, order(), true, id(), true );
+ }
+ void unindex( BSONObj &key ) {
+ bt()->unindex( dl(), id(), key, recordLoc() );
+ }
+ static BSONObj simpleKey( char c, int n = 1 ) {
+ BSONObjBuilder builder;
+ string val( n, c );
+ builder.append( "a", val );
+ return builder.doneAndDecouple();
+ }
+ void locate( BSONObj &key, int expectedPos,
+ bool expectedFound, const DiskLoc &expectedLocation,
+ int direction = 1 ) {
+ int pos;
+ bool found;
+ DiskLoc location =
+ bt()->locate( dl(), key, order(), pos, found, recordLoc(), direction );
+ ASSERT_EQUALS( expectedFound, found );
+ ASSERT( location == expectedLocation );
+ ASSERT_EQUALS( expectedPos, pos );
+ }
+ BSONObj order() const {
+ return idx_.keyPattern();
+ }
+ private:
+ IndexDetails idx_;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ checkValid( 0 );
+ }
+ };
+
+ class SimpleInsertDelete : public Base {
+ public:
+ void run() {
+ BSONObj key = simpleKey( 'z' );
+ insert( key );
+
+ checkValid( 1 );
+ locate( key, 0, true, dl() );
+
+ unindex( key );
+
+ checkValid( 0 );
+ locate( key, 0, false, DiskLoc() );
+ }
+ };
+
+ class SplitUnevenBucketBase : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ BSONObj shortKey = simpleKey( shortToken( i ), 1 );
+ insert( shortKey );
+ BSONObj longKey = simpleKey( longToken( i ), 800 );
+ insert( longKey );
+ }
+ checkValid( 20 );
+ }
+ protected:
+ virtual char shortToken( int i ) const = 0;
+ virtual char longToken( int i ) const = 0;
+ static char leftToken( int i ) {
+ return 'a' + i;
+ }
+ static char rightToken( int i ) {
+ return 'z' - i;
+ }
+ };
+
+ class SplitRightHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return rightToken( i );
+ }
+ };
+
+ class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return leftToken( i );
+ }
+ };
+
+ class MissingLocate : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 3; ++i ) {
+ BSONObj k = simpleKey( 'b' + 2 * i );
+ insert( k );
+ }
+
+ locate( 1, 'a', 'b', dl() );
+ locate( 1, 'c', 'd', dl() );
+ locate( 1, 'e', 'f', dl() );
+ locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
+
+ // old behavior
+ // locate( -1, 'a', 'b', dl() );
+ // locate( -1, 'c', 'd', dl() );
+ // locate( -1, 'e', 'f', dl() );
+ // locate( -1, 'g', 'f', dl() );
+
+ locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
+ locate( -1, 'c', 'b', dl() );
+ locate( -1, 'e', 'd', dl() );
+ locate( -1, 'g', 'f', dl() );
+ }
+ private:
+ void locate( int direction, char token, char expectedMatch,
+ DiskLoc expectedLocation ) {
+ BSONObj k = simpleKey( token );
+ int expectedPos = ( expectedMatch - 'b' ) / 2;
+ Base::locate( k, expectedPos, false, expectedLocation, direction );
+ }
+ };
+
+ class MissingLocateMultiBucket : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ insert( k );
+ }
+ BSONObj straddle = key( 'i' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'k' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ };
+
+ class All : public UnitTest::Suite {
+ public:
+ All() {
+ add< Create >();
+ add< SimpleInsertDelete >();
+ add< SplitRightHeavyBucket >();
+ add< SplitLeftHeavyBucket >();
+ add< MissingLocate >();
+ add< MissingLocateMultiBucket >();
+ }
+ };
}
UnitTest::TestPtr btreeTests() {
diff --git a/dbtests/dbtests.cpp b/dbtests/dbtests.cpp
index c290a6b51b7..45e1f0d3456 100644
--- a/dbtests/dbtests.cpp
+++ b/dbtests/dbtests.cpp
@@ -26,7 +26,7 @@
using namespace std;
namespace mongo {
-extern const char* dbpath;
+ extern const char* dbpath;
} // namespace mongo
string dbpathSpec = "/tmp/unittest/";
@@ -39,7 +39,7 @@ void usage() {
" -debug run tests with verbose output\n"
" -list list available test suites\n"
" <suite> run the specified test suite only";
- cout << instructions << endl;
+ out() << instructions << endl;
}
int main( int argc, char** argv ) {
diff --git a/dbtests/jsobjtests.cpp b/dbtests/jsobjtests.cpp
index c3c0c2d6d7f..20be4bafd7a 100644
--- a/dbtests/jsobjtests.cpp
+++ b/dbtests/jsobjtests.cpp
@@ -25,934 +25,934 @@
#include <limits>
namespace JsobjTests {
-namespace BSONObjTests {
-class Create {
-public:
- void run() {
- BSONObj b;
- ASSERT_EQUALS( 0, b.nFields() );
- }
-};
-
-class Base {
-protected:
- static BSONObj basic( const char *name, int val ) {
- BSONObjBuilder b;
- b.appendInt( name, val );
- return b.doneAndDecouple();
- }
- static BSONObj basic( const char *name, vector< int > val ) {
- BSONObjBuilder b;
- b.appendIntArray( name, val );
- return b.doneAndDecouple();
- }
- template< class T >
- static BSONObj basic( const char *name, T val ) {
- BSONObjBuilder b;
- b.append( name, val );
- return b.doneAndDecouple();
- }
-};
-
-class WoCompareBasic : public Base {
-public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ) ) > 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ) ) < 0 );
- // field name comparison
- ASSERT( basic( "a", 1 ).woCompare( basic( "b", 1 ) ) < 0 );
- }
-};
-
-class NumericCompareBasic : public Base {
-public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1.0 ) ) == 0 );
- }
-};
-
-class WoCompareEmbeddedObject : public Base {
-public:
- void run() {
- ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
- ( basic( "a", basic( "b", 1.0 ) ) ) == 0 );
- ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
- ( basic( "a", basic( "b", 2 ) ) ) < 0 );
- }
-};
-
-class WoCompareEmbeddedArray : public Base {
-public:
- void run() {
- vector< int > i;
- i.push_back( 1 );
- i.push_back( 2 );
- vector< double > d;
- d.push_back( 1 );
- d.push_back( 2 );
- ASSERT( basic( "a", i ).woCompare( basic( "a", d ) ) == 0 );
-
- vector< int > j;
- j.push_back( 1 );
- j.push_back( 3 );
- ASSERT( basic( "a", i ).woCompare( basic( "a", j ) ) < 0 );
- }
-};
-
-class WoCompareOrdered : public Base {
-public:
- void run() {
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) > 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", 1 ) ) < 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) == 0 );
- ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) < 0 );
- ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", -1 ) ) > 0 );
- }
-};
-
-namespace JsonStringTests {
-class Empty {
-public:
- void run() {
- BSONObjBuilder b;
- ASSERT_EQUALS( "{}", b.done().jsonString( Strict ) );
- }
-};
-
-class SingleStringMember {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "b" );
- ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
- }
-};
-
-class EscapedCharacters {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\" \\ / \b \f \n \r \t" );
- ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
- }
-};
+ namespace BSONObjTests {
+ class Create {
+ public:
+ void run() {
+ BSONObj b;
+ ASSERT_EQUALS( 0, b.nFields() );
+ }
+ };
+
+ class Base {
+ protected:
+ static BSONObj basic( const char *name, int val ) {
+ BSONObjBuilder b;
+ b.appendInt( name, val );
+ return b.doneAndDecouple();
+ }
+ static BSONObj basic( const char *name, vector< int > val ) {
+ BSONObjBuilder b;
+ b.appendIntArray( name, val );
+ return b.doneAndDecouple();
+ }
+ template< class T >
+ static BSONObj basic( const char *name, T val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.doneAndDecouple();
+ }
+ };
+
+ class WoCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ) ) < 0 );
+ // field name comparison
+ ASSERT( basic( "a", 1 ).woCompare( basic( "b", 1 ) ) < 0 );
+ }
+ };
+
+ class NumericCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1.0 ) ) == 0 );
+ }
+ };
+
+ class WoCompareEmbeddedObject : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 1.0 ) ) ) == 0 );
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 2 ) ) ) < 0 );
+ }
+ };
+
+ class WoCompareEmbeddedArray : public Base {
+ public:
+ void run() {
+ vector< int > i;
+ i.push_back( 1 );
+ i.push_back( 2 );
+ vector< double > d;
+ d.push_back( 1 );
+ d.push_back( 2 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", d ) ) == 0 );
+
+ vector< int > j;
+ j.push_back( 1 );
+ j.push_back( 3 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", j ) ) < 0 );
+ }
+ };
+
+ class WoCompareOrdered : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", 1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", -1 ) ) > 0 );
+ }
+ };
+
+ namespace JsonStringTests {
+ class Empty {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ ASSERT_EQUALS( "{}", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleStringMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "b" );
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EscapedCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
+ }
+ };
// per http://www.ietf.org/rfc/rfc4627.txt, control characters are
// (U+0000 through U+001F). U+007F is not mentioned as a control character.
-class AdditionalControlCharacters {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\x1 \x1f" );
- ASSERT_EQUALS( "{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString( Strict ) );
- }
-};
-
-class ExtendedAscii {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", "\x80" );
- ASSERT_EQUALS( "{ \"a\" : \"\x80\" }", b.done().jsonString( Strict ) );
- }
-};
-
-class EscapeFieldName {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "\t", "b" );
- ASSERT_EQUALS( "{ \"\\t\" : \"b\" }", b.done().jsonString( Strict ) );
- }
-};
-
-class SingleIntMember {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendInt( "a", 1 );
- ASSERT_EQUALS( "{ \"a\" : 1 }", b.done().jsonString( Strict ) );
- }
-};
-
-class SingleNumberMember {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 1.5 );
- ASSERT_EQUALS( "{ \"a\" : 1.5 }", b.done().jsonString( Strict ) );
- }
-};
-
-class InvalidNumbers {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", numeric_limits< double >::infinity() );
- ASSERT_EXCEPTION( b.done().jsonString( Strict ), AssertionException );
-
- BSONObjBuilder c;
- c.append( "a", numeric_limits< double >::quiet_NaN() );
- ASSERT_EXCEPTION( c.done().jsonString( Strict ), AssertionException );
-
- BSONObjBuilder d;
- d.append( "a", numeric_limits< double >::signaling_NaN() );
- ASSERT_EXCEPTION( d.done().jsonString( Strict ), AssertionException );
- }
-};
-
-class NumberPrecision {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 123456789 );
- ASSERT_EQUALS( "{ \"a\" : 123456789 }", b.done().jsonString( Strict ) );
- }
-};
-
-class NegativeNumber {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", -1 );
- ASSERT_EQUALS( "{ \"a\" : -1 }", b.done().jsonString( Strict ) );
- }
-};
-
-class SingleBoolMember {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendBool( "a", true );
- ASSERT_EQUALS( "{ \"a\" : true }", b.done().jsonString( Strict ) );
-
- BSONObjBuilder c;
- c.appendBool( "a", false );
- ASSERT_EQUALS( "{ \"a\" : false }", c.done().jsonString( Strict ) );
- }
-};
-
-class SingleNullMember {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendNull( "a" );
- ASSERT_EQUALS( "{ \"a\" : null }", b.done().jsonString( Strict ) );
- }
-};
-
-class SingleObjectMember {
-public:
- void run() {
- BSONObjBuilder b, c;
- b.append( "a", c.done() );
- ASSERT_EQUALS( "{ \"a\" : {} }", b.done().jsonString( Strict ) );
- }
-};
-
-class TwoMembers {
-public:
- void run() {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.append( "b", 2 );
- ASSERT_EQUALS( "{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString( Strict ) );
- }
-};
-
-class EmptyArray {
-public:
- void run() {
- vector< int > arr;
- BSONObjBuilder b;
- b.append( "a", arr );
- ASSERT_EQUALS( "{ \"a\" : [] }", b.done().jsonString( Strict ) );
- }
-};
-
-class Array {
-public:
- void run() {
- vector< int > arr;
- arr.push_back( 1 );
- arr.push_back( 2 );
- BSONObjBuilder b;
- b.append( "a", arr );
- ASSERT_EQUALS( "{ \"a\" : [ 1, 2 ] }", b.done().jsonString( Strict ) );
- }
-};
-
-class DBRef {
-public:
- void run() {
- OID oid;
- memset( &oid, 0xff, 12 );
- BSONObjBuilder b;
- b.appendDBRef( "a", "namespace", oid );
- ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
- b.done().jsonString( JS ) );
- ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
- b.done().jsonString( TenGen ) );
- }
-};
-
-class DBRefZero {
-public:
- void run() {
- OID oid;
- memset( &oid, 0, 12 );
- BSONObjBuilder b;
- b.appendDBRef( "a", "namespace", oid );
- ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
- b.done().jsonString( Strict ) );
- }
-};
-
-class ObjectId {
-public:
- void run() {
- OID oid;
- memset( &oid, 0xff, 12 );
- BSONObjBuilder b;
- b.appendOID( "a", &oid );
- ASSERT_EQUALS( "{ \"a\" : \"ffffffffffffffffffffffff\" }",
- b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
- b.done().jsonString( TenGen ) );
- }
-};
-
-class BinData {
-public:
- void run() {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, ByteArray, z );
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }",
- b.done().jsonString( Strict ) );
-
- BSONObjBuilder c;
- c.appendBinData( "a", 2, ByteArray, z );
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"02\" } }",
- c.done().jsonString( Strict ) );
-
- BSONObjBuilder d;
- d.appendBinData( "a", 1, ByteArray, z );
- ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"02\" } }",
- d.done().jsonString( Strict ) );
- }
-};
-
-class Symbol {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendSymbol( "a", "b" );
- ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
- }
-};
-
-class Date {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendDate( "a", 0 );
- ASSERT_EQUALS( "{ \"a\" : { \"$date\" : 0 } }", b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", b.done().jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", b.done().jsonString( JS ) );
- }
-};
-
-class Regex {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "abc", "i" );
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
- b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /abc/i }", b.done().jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /abc/i }", b.done().jsonString( JS ) );
- }
-};
-
-class RegexEscape {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "/\"", "i" );
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"\\/\\\"\", \"$options\" : \"i\" } }",
- b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", b.done().jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", b.done().jsonString( JS ) );
- }
-};
-
-class RegexManyOptions {
-public:
- void run() {
- BSONObjBuilder b;
- b.appendRegex( "a", "z", "abcgimx" );
- ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
- b.done().jsonString( Strict ) );
- ASSERT_EQUALS( "{ \"a\" : /z/gim }", b.done().jsonString( TenGen ) );
- ASSERT_EQUALS( "{ \"a\" : /z/gim }", b.done().jsonString( JS ) );
- }
-};
-
-} // namespace JsonStringTests
-} // namespace BSONObjTests
-
-
-namespace FromJsonTests {
-
-class Base {
-public:
- void run() {
- assertEquals( bson(), fromjson( json() ) );
- assertEquals( bson(), fromjson( bson().jsonString( Strict ) ) );
- assertEquals( bson(), fromjson( bson().jsonString( TenGen ) ) );
- assertEquals( bson(), fromjson( bson().jsonString( JS ) ) );
- }
-protected:
- virtual BSONObj bson() const = 0;
- virtual string json() const = 0;
-private:
- static void assertEquals( const BSONObj &expected, const BSONObj &actual ) {
- if ( expected.woCompare( actual ) ) {
- cout << "Expected: " << expected.toString()
- << ", got: " << actual.toString();
+ class AdditionalControlCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x1 \x1f" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ExtendedAscii {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x80" );
+ ASSERT_EQUALS( "{ \"a\" : \"\x80\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EscapeFieldName {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "\t", "b" );
+ ASSERT_EQUALS( "{ \"\\t\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleIntMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendInt( "a", 1 );
+ ASSERT_EQUALS( "{ \"a\" : 1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNumberMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1.5 );
+ ASSERT_EQUALS( "{ \"a\" : 1.5 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class InvalidNumbers {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", numeric_limits< double >::infinity() );
+ ASSERT_EXCEPTION( b.done().jsonString( Strict ), AssertionException );
+
+ BSONObjBuilder c;
+ c.append( "a", numeric_limits< double >::quiet_NaN() );
+ ASSERT_EXCEPTION( c.done().jsonString( Strict ), AssertionException );
+
+ BSONObjBuilder d;
+ d.append( "a", numeric_limits< double >::signaling_NaN() );
+ ASSERT_EXCEPTION( d.done().jsonString( Strict ), AssertionException );
+ }
+ };
+
+ class NumberPrecision {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 123456789 );
+ ASSERT_EQUALS( "{ \"a\" : 123456789 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class NegativeNumber {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", -1 );
+ ASSERT_EQUALS( "{ \"a\" : -1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleBoolMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ ASSERT_EQUALS( "{ \"a\" : true }", b.done().jsonString( Strict ) );
+
+ BSONObjBuilder c;
+ c.appendBool( "a", false );
+ ASSERT_EQUALS( "{ \"a\" : false }", c.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNullMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ ASSERT_EQUALS( "{ \"a\" : null }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleObjectMember {
+ public:
+ void run() {
+ BSONObjBuilder b, c;
+ b.append( "a", c.done() );
+ ASSERT_EQUALS( "{ \"a\" : {} }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class TwoMembers {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", 2 );
+ ASSERT_EQUALS( "{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EmptyArray {
+ public:
+ void run() {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Array {
+ public:
+ void run() {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [ 1, 2 ] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class DBRef {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ b.done().jsonString( JS ) );
+ ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
+ b.done().jsonString( TenGen ) );
+ }
+ };
+
+ class DBRefZero {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ns\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
+ b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ObjectId {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendOID( "a", &oid );
+ ASSERT_EQUALS( "{ \"a\" : \"ffffffffffffffffffffffff\" }",
+ b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
+ b.done().jsonString( TenGen ) );
+ }
+ };
+
+ class BinData {
+ public:
+ void run() {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, ByteArray, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }",
+ b.done().jsonString( Strict ) );
+
+ BSONObjBuilder c;
+ c.appendBinData( "a", 2, ByteArray, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"02\" } }",
+ c.done().jsonString( Strict ) );
+
+ BSONObjBuilder d;
+ d.appendBinData( "a", 1, ByteArray, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"02\" } }",
+ d.done().jsonString( Strict ) );
+ }
+ };
+
+ class Symbol {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendSymbol( "a", "b" );
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Date {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ ASSERT_EQUALS( "{ \"a\" : { \"$date\" : 0 } }", b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", b.done().jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", b.done().jsonString( JS ) );
+ }
+ };
+
+ class Regex {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc", "i" );
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
+ b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", b.done().jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", b.done().jsonString( JS ) );
+ }
+ };
+
+ class RegexEscape {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "/\"", "i" );
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"\\/\\\"\", \"$options\" : \"i\" } }",
+ b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", b.done().jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", b.done().jsonString( JS ) );
+ }
+ };
+
+ class RegexManyOptions {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "z", "abcgimx" );
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
+ b.done().jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", b.done().jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", b.done().jsonString( JS ) );
+ }
+ };
+
+ } // namespace JsonStringTests
+ } // namespace BSONObjTests
+
+
+ namespace FromJsonTests {
+
+ class Base {
+ public:
+ void run() {
+ assertEquals( bson(), fromjson( json() ) );
+ assertEquals( bson(), fromjson( bson().jsonString( Strict ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( TenGen ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( JS ) ) );
+ }
+ protected:
+ virtual BSONObj bson() const = 0;
+ virtual string json() const = 0;
+ private:
+ static void assertEquals( const BSONObj &expected, const BSONObj &actual ) {
+ if ( expected.woCompare( actual ) ) {
+ out() << "Expected: " << expected.toString()
+ << ", got: " << actual.toString();
+ }
+ ASSERT( !expected.woCompare( actual ) );
+ }
+ };
+
+ class Bad {
+ public:
+ void run() {
+ ASSERT_EXCEPTION( fromjson( json() ), MsgAssertionException );
+ }
+ protected:
+ virtual string json() const = 0;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{}";
+ }
+ };
+
+ class EmptyWithSpace : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ }";
+ }
+ };
+
+ class SingleString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "b" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"b\" }";
+ }
+ };
+
+ class EmptyStrings : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "", "" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"\" : \"\" }";
+ }
+ };
+
+ class ReservedFieldName : public Bad {
+ virtual string json() const {
+ return "{ \"$ns\" : \"b\" }";
+ }
+ };
+
+ class OkDollarFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "$where", 1 );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"$where\" : 1 }";
+ }
+ };
+
+ class SingleNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1 }";
+ }
+ };
+
+ class FancyNumber {
+ public:
+ void run() {
+ ASSERT_EQUALS( bson().firstElement().number(),
+ fromjson( json() ).firstElement().number() );
+ }
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", -4.4433e-2 );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : -4.4433e-2 }";
+ }
+ };
+
+ class TwoElements : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", "foo" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1, \"b\" : \"foo\" }";
+ }
+ };
+
+ class Subobject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ BSONObjBuilder c;
+ c.append( "z", b.done() );
+ return c.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"z\" : { \"a\" : 1 } }";
+ }
+ };
+
+ class ArrayEmpty : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [] }";
+ }
+ };
+
+ class Array : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ arr.push_back( 3 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [ 1, 2, 3 ] }";
+ }
+ };
+
+ class True : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : true }";
+ }
+ };
+
+ class False : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", false );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : false }";
+ }
+ };
+
+ class Null : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : null }";
+ }
+ };
+
+ class EscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t\" }";
+ }
+ };
+
+ class AllowedControlCharacter : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\x7f" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\x7f\" }";
+ }
+ };
+
+ class EscapeFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "\n", "b" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"\\n\" : \"b\" }";
+ }
+ };
+
+ class EscapedUnicodeToUtf8 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ char u[ 7 ];
+ u[ 0 ] = 0xe0 | 0x0a;
+ u[ 1 ] = 0x80;
+ u[ 2 ] = 0x80;
+ u[ 3 ] = 0xe0 | 0x0a;
+ u[ 4 ] = 0x80;
+ u[ 5 ] = 0x80;
+ u[ 6 ] = 0;
+ b.append( "a", u );
+ ASSERT_EQUALS( string( u ), b.done().firstElement().valuestr() );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\ua000\\uA000\" }";
+ }
+ };
+
+ class Utf8AllOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ char u[ 8 ];
+ u[ 0 ] = 0x01;
+
+ u[ 1 ] = 0x7f;
+
+ u[ 2 ] = 0xdf;
+ u[ 3 ] = 0xbf;
+
+ u[ 4 ] = 0xef;
+ u[ 5 ] = 0xbf;
+ u[ 6 ] = 0xbf;
+
+ u[ 7 ] = 0;
+
+ b.append( "a", u );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
+ }
+ };
+
+ class Utf8FirstByteOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ char u[ 6 ];
+ u[ 0 ] = 0xdc;
+ u[ 1 ] = 0x80;
+
+ u[ 2 ] = 0xef;
+ u[ 3 ] = 0xbc;
+ u[ 4 ] = 0x80;
+
+ u[ 5 ] = 0;
+
+ b.append( "a", u );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0700\\uff00\" }";
+ }
+ };
+
+ class DBRef : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0, 12 );
+ b.appendDBRef( "a", "foo", o );
+ return b.doneAndDecouple();
+ }
+ // NOTE Testing other formats handled by by Base class.
+ virtual string json() const {
+ return "{ \"a\" : { \"$ns\" : \"foo\", \"$id\" : \"000000000000000000000000\" } }";
+ }
+ };
+
+ class Oid : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendOID( "_id" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : \"000000000000000000000000\" }";
+ }
+ };
+
+ class BinData : public Base {
+ virtual BSONObj bson() const {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, ByteArray, z );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }";
+ }
+ };
+
+ class BinDataPaddedSingle : public Base {
+ virtual BSONObj bson() const {
+ char z[ 2 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 2, ByteArray, z );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"02\" } }";
+ }
+ };
+
+ class BinDataPaddedDouble : public Base {
+ virtual BSONObj bson() const {
+ char z[ 1 ];
+ z[ 0 ] = 'a';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 1, ByteArray, z );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"02\" } }";
+ }
+ };
+
+ class BinDataAllChars : public Base {
+ virtual BSONObj bson() const {
+ char z[] = {
+ 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
+ };
+ BSONObjBuilder b;
+ b.appendBinData( "a", 48, ByteArray, z );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : \"02\" } }";
+ }
+ };
+
+ class Date : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 0 } }";
+ }
+ };
+
+ class DateNonzero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 100 );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 100 } }";
+ }
+ };
+
+ class DateTooLong : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : " << ~(0LL) << "0" << " } }";
+ return ss.str();
+ }
+ };
+
+ class Regex : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "b", "i" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexEscape : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\t", "i" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexWithQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\"", "" );
+ return b.doneAndDecouple();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /\"/ }";
+ }
+ };
+
+ class RegexInvalidOption : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
+ }
+ };
+
+ class RegexInvalidOption2 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/c }";
+ }
+ };
+
+ class Malformed : public Bad {
+ string json() const {
+ return "{";
+ }
+ };
+
+ } // namespace FromJsonTests
+
+ class All : public UnitTest::Suite {
+ public:
+ All() {
+ add< BSONObjTests::Create >();
+ add< BSONObjTests::WoCompareBasic >();
+ add< BSONObjTests::NumericCompareBasic >();
+ add< BSONObjTests::WoCompareEmbeddedObject >();
+ add< BSONObjTests::WoCompareEmbeddedArray >();
+ add< BSONObjTests::WoCompareOrdered >();
+ add< BSONObjTests::JsonStringTests::Empty >();
+ add< BSONObjTests::JsonStringTests::SingleStringMember >();
+ add< BSONObjTests::JsonStringTests::EscapedCharacters >();
+ add< BSONObjTests::JsonStringTests::AdditionalControlCharacters >();
+ add< BSONObjTests::JsonStringTests::ExtendedAscii >();
+ add< BSONObjTests::JsonStringTests::EscapeFieldName >();
+ add< BSONObjTests::JsonStringTests::SingleIntMember >();
+ add< BSONObjTests::JsonStringTests::SingleNumberMember >();
+ add< BSONObjTests::JsonStringTests::InvalidNumbers >();
+ add< BSONObjTests::JsonStringTests::NumberPrecision >();
+ add< BSONObjTests::JsonStringTests::NegativeNumber >();
+ add< BSONObjTests::JsonStringTests::SingleBoolMember >();
+ add< BSONObjTests::JsonStringTests::SingleNullMember >();
+ add< BSONObjTests::JsonStringTests::SingleObjectMember >();
+ add< BSONObjTests::JsonStringTests::TwoMembers >();
+ add< BSONObjTests::JsonStringTests::EmptyArray >();
+ add< BSONObjTests::JsonStringTests::Array >();
+ add< BSONObjTests::JsonStringTests::DBRef >();
+ add< BSONObjTests::JsonStringTests::DBRefZero >();
+ add< BSONObjTests::JsonStringTests::ObjectId >();
+ add< BSONObjTests::JsonStringTests::BinData >();
+ add< BSONObjTests::JsonStringTests::Symbol >();
+ add< BSONObjTests::JsonStringTests::Date >();
+ add< BSONObjTests::JsonStringTests::Regex >();
+ add< BSONObjTests::JsonStringTests::RegexEscape >();
+ add< BSONObjTests::JsonStringTests::RegexManyOptions >();
+ add< FromJsonTests::Empty >();
+ add< FromJsonTests::EmptyWithSpace >();
+ add< FromJsonTests::SingleString >();
+ add< FromJsonTests::EmptyStrings >();
+ add< FromJsonTests::ReservedFieldName >();
+ add< FromJsonTests::OkDollarFieldName >();
+ add< FromJsonTests::SingleNumber >();
+ add< FromJsonTests::FancyNumber >();
+ add< FromJsonTests::TwoElements >();
+ add< FromJsonTests::Subobject >();
+ add< FromJsonTests::ArrayEmpty >();
+ add< FromJsonTests::Array >();
+ add< FromJsonTests::True >();
+ add< FromJsonTests::False >();
+ add< FromJsonTests::Null >();
+ add< FromJsonTests::EscapedCharacters >();
+ add< FromJsonTests::AllowedControlCharacter >();
+ add< FromJsonTests::EscapeFieldName >();
+ add< FromJsonTests::EscapedUnicodeToUtf8 >();
+ add< FromJsonTests::Utf8AllOnes >();
+ add< FromJsonTests::Utf8FirstByteOnes >();
+ add< FromJsonTests::DBRef >();
+ add< FromJsonTests::Oid >();
+ add< FromJsonTests::BinData >();
+ add< FromJsonTests::BinDataPaddedSingle >();
+ add< FromJsonTests::BinDataPaddedDouble >();
+ add< FromJsonTests::BinDataAllChars >();
+ add< FromJsonTests::Date >();
+ add< FromJsonTests::DateNonzero >();
+ add< FromJsonTests::DateTooLong >();
+ add< FromJsonTests::Regex >();
+ add< FromJsonTests::RegexEscape >();
+ add< FromJsonTests::RegexWithQuotes >();
+ add< FromJsonTests::RegexInvalidOption >();
+ add< FromJsonTests::RegexInvalidOption2 >();
+ add< FromJsonTests::Malformed >();
}
- ASSERT( !expected.woCompare( actual ) );
- }
-};
-
-class Bad {
-public:
- void run() {
- ASSERT_EXCEPTION( fromjson( json() ), MsgAssertionException );
- }
-protected:
- virtual string json() const = 0;
-};
-
-class Empty : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{}";
- }
-};
-
-class EmptyWithSpace : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ }";
- }
-};
-
-class SingleString : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "b" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"b\" }";
- }
-};
-
-class EmptyStrings : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "", "" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"\" : \"\" }";
- }
-};
-
-class ReservedFieldName : public Bad {
- virtual string json() const {
- return "{ \"$ns\" : \"b\" }";
- }
-};
-
-class OkDollarFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "$where", 1 );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"$where\" : 1 }";
- }
-};
-
-class SingleNumber : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : 1 }";
- }
-};
-
-class FancyNumber {
-public:
- void run() {
- ASSERT_EQUALS( bson().firstElement().number(),
- fromjson( json() ).firstElement().number() );
- }
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", -4.4433e-2 );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : -4.4433e-2 }";
- }
-};
-
-class TwoElements : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- b.append( "b", "foo" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : 1, \"b\" : \"foo\" }";
- }
-};
-
-class Subobject : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", 1 );
- BSONObjBuilder c;
- c.append( "z", b.done() );
- return c.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"z\" : { \"a\" : 1 } }";
- }
-};
-
-class ArrayEmpty : public Base {
- virtual BSONObj bson() const {
- vector< int > arr;
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : [] }";
- }
-};
-
-class Array : public Base {
- virtual BSONObj bson() const {
- vector< int > arr;
- arr.push_back( 1 );
- arr.push_back( 2 );
- arr.push_back( 3 );
- BSONObjBuilder b;
- b.append( "a", arr );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : [ 1, 2, 3 ] }";
- }
-};
-
-class True : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", true );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : true }";
- }
-};
-
-class False : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendBool( "a", false );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : false }";
- }
-};
-
-class Null : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendNull( "a" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : null }";
- }
-};
-
-class EscapedCharacters : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\" \\ / \b \f \n \r \t" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t\" }";
- }
-};
-
-class AllowedControlCharacter : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "a", "\x7f" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"\x7f\" }";
- }
-};
-
-class EscapeFieldName : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.append( "\n", "b" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"\\n\" : \"b\" }";
- }
-};
-
-class EscapedUnicodeToUtf8 : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- char u[ 7 ];
- u[ 0 ] = 0xe0 | 0x0a;
- u[ 1 ] = 0x80;
- u[ 2 ] = 0x80;
- u[ 3 ] = 0xe0 | 0x0a;
- u[ 4 ] = 0x80;
- u[ 5 ] = 0x80;
- u[ 6 ] = 0;
- b.append( "a", u );
- ASSERT_EQUALS( string( u ), b.done().firstElement().valuestr() );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\ua000\\uA000\" }";
- }
-};
-
-class Utf8AllOnes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- char u[ 8 ];
- u[ 0 ] = 0x01;
-
- u[ 1 ] = 0x7f;
-
- u[ 2 ] = 0xdf;
- u[ 3 ] = 0xbf;
-
- u[ 4 ] = 0xef;
- u[ 5 ] = 0xbf;
- u[ 6 ] = 0xbf;
-
- u[ 7 ] = 0;
-
- b.append( "a", u );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
- }
-};
-
-class Utf8FirstByteOnes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- char u[ 6 ];
- u[ 0 ] = 0xdc;
- u[ 1 ] = 0x80;
-
- u[ 2 ] = 0xef;
- u[ 3 ] = 0xbc;
- u[ 4 ] = 0x80;
-
- u[ 5 ] = 0;
-
- b.append( "a", u );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : \"\\u0700\\uff00\" }";
- }
-};
-
-class DBRef : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- OID o;
- memset( &o, 0, 12 );
- b.appendDBRef( "a", "foo", o );
- return b.doneAndDecouple();
- }
- // NOTE Testing other formats handled by by Base class.
- virtual string json() const {
- return "{ \"a\" : { \"$ns\" : \"foo\", \"$id\" : \"000000000000000000000000\" } }";
- }
-};
-
-class Oid : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendOID( "_id" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"_id\" : \"000000000000000000000000\" }";
- }
-};
-
-class BinData : public Base {
- virtual BSONObj bson() const {
- char z[ 3 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- z[ 2 ] = 'c';
- BSONObjBuilder b;
- b.appendBinData( "a", 3, ByteArray, z );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"02\" } }";
- }
-};
-
-class BinDataPaddedSingle : public Base {
- virtual BSONObj bson() const {
- char z[ 2 ];
- z[ 0 ] = 'a';
- z[ 1 ] = 'b';
- BSONObjBuilder b;
- b.appendBinData( "a", 2, ByteArray, z );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"02\" } }";
- }
-};
-
-class BinDataPaddedDouble : public Base {
- virtual BSONObj bson() const {
- char z[ 1 ];
- z[ 0 ] = 'a';
- BSONObjBuilder b;
- b.appendBinData( "a", 1, ByteArray, z );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"02\" } }";
- }
-};
-
-class BinDataAllChars : public Base {
- virtual BSONObj bson() const {
- char z[] = {
- 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
- 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
- 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
- 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
- 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
- };
- BSONObjBuilder b;
- b.appendBinData( "a", 48, ByteArray, z );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$binary\" : \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : \"02\" } }";
- }
-};
-
-class Date : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 0 );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : 0 } }";
- }
-};
-
-class DateNonzero : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendDate( "a", 100 );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$date\" : 100 } }";
- }
-};
-
-class DateTooLong : public Bad {
- virtual string json() const {
- stringstream ss;
- ss << "{ \"a\" : { \"$date\" : " << ~(0LL) << "0" << " } }";
- return ss.str();
- }
-};
-
-class Regex : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "b", "i" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
- }
-};
-
-class RegexEscape : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\t", "i" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
- }
-};
-
-class RegexWithQuotes : public Base {
- virtual BSONObj bson() const {
- BSONObjBuilder b;
- b.appendRegex( "a", "\"", "" );
- return b.doneAndDecouple();
- }
- virtual string json() const {
- return "{ \"a\" : /\"/ }";
- }
-};
-
-class RegexInvalidOption : public Bad {
- virtual string json() const {
- return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
- }
-};
-
-class RegexInvalidOption2 : public Bad {
- virtual string json() const {
- return "{ \"a\" : /b/c }";
- }
-};
-
-class Malformed : public Bad {
- string json() const {
- return "{";
- }
-};
-
-} // namespace FromJsonTests
-
-class All : public UnitTest::Suite {
-public:
- All() {
- add< BSONObjTests::Create >();
- add< BSONObjTests::WoCompareBasic >();
- add< BSONObjTests::NumericCompareBasic >();
- add< BSONObjTests::WoCompareEmbeddedObject >();
- add< BSONObjTests::WoCompareEmbeddedArray >();
- add< BSONObjTests::WoCompareOrdered >();
- add< BSONObjTests::JsonStringTests::Empty >();
- add< BSONObjTests::JsonStringTests::SingleStringMember >();
- add< BSONObjTests::JsonStringTests::EscapedCharacters >();
- add< BSONObjTests::JsonStringTests::AdditionalControlCharacters >();
- add< BSONObjTests::JsonStringTests::ExtendedAscii >();
- add< BSONObjTests::JsonStringTests::EscapeFieldName >();
- add< BSONObjTests::JsonStringTests::SingleIntMember >();
- add< BSONObjTests::JsonStringTests::SingleNumberMember >();
- add< BSONObjTests::JsonStringTests::InvalidNumbers >();
- add< BSONObjTests::JsonStringTests::NumberPrecision >();
- add< BSONObjTests::JsonStringTests::NegativeNumber >();
- add< BSONObjTests::JsonStringTests::SingleBoolMember >();
- add< BSONObjTests::JsonStringTests::SingleNullMember >();
- add< BSONObjTests::JsonStringTests::SingleObjectMember >();
- add< BSONObjTests::JsonStringTests::TwoMembers >();
- add< BSONObjTests::JsonStringTests::EmptyArray >();
- add< BSONObjTests::JsonStringTests::Array >();
- add< BSONObjTests::JsonStringTests::DBRef >();
- add< BSONObjTests::JsonStringTests::DBRefZero >();
- add< BSONObjTests::JsonStringTests::ObjectId >();
- add< BSONObjTests::JsonStringTests::BinData >();
- add< BSONObjTests::JsonStringTests::Symbol >();
- add< BSONObjTests::JsonStringTests::Date >();
- add< BSONObjTests::JsonStringTests::Regex >();
- add< BSONObjTests::JsonStringTests::RegexEscape >();
- add< BSONObjTests::JsonStringTests::RegexManyOptions >();
- add< FromJsonTests::Empty >();
- add< FromJsonTests::EmptyWithSpace >();
- add< FromJsonTests::SingleString >();
- add< FromJsonTests::EmptyStrings >();
- add< FromJsonTests::ReservedFieldName >();
- add< FromJsonTests::OkDollarFieldName >();
- add< FromJsonTests::SingleNumber >();
- add< FromJsonTests::FancyNumber >();
- add< FromJsonTests::TwoElements >();
- add< FromJsonTests::Subobject >();
- add< FromJsonTests::ArrayEmpty >();
- add< FromJsonTests::Array >();
- add< FromJsonTests::True >();
- add< FromJsonTests::False >();
- add< FromJsonTests::Null >();
- add< FromJsonTests::EscapedCharacters >();
- add< FromJsonTests::AllowedControlCharacter >();
- add< FromJsonTests::EscapeFieldName >();
- add< FromJsonTests::EscapedUnicodeToUtf8 >();
- add< FromJsonTests::Utf8AllOnes >();
- add< FromJsonTests::Utf8FirstByteOnes >();
- add< FromJsonTests::DBRef >();
- add< FromJsonTests::Oid >();
- add< FromJsonTests::BinData >();
- add< FromJsonTests::BinDataPaddedSingle >();
- add< FromJsonTests::BinDataPaddedDouble >();
- add< FromJsonTests::BinDataAllChars >();
- add< FromJsonTests::Date >();
- add< FromJsonTests::DateNonzero >();
- add< FromJsonTests::DateTooLong >();
- add< FromJsonTests::Regex >();
- add< FromJsonTests::RegexEscape >();
- add< FromJsonTests::RegexWithQuotes >();
- add< FromJsonTests::RegexInvalidOption >();
- add< FromJsonTests::RegexInvalidOption2 >();
- add< FromJsonTests::Malformed >();
- }
-};
+ };
} // namespace JsobjTests
diff --git a/dbtests/namespacetests.cpp b/dbtests/namespacetests.cpp
index 6bfbdaa77f6..1dffd7f4bc3 100644
--- a/dbtests/namespacetests.cpp
+++ b/dbtests/namespacetests.cpp
@@ -26,543 +26,543 @@
#include "dbtests.h"
namespace NamespaceTests {
-namespace IndexDetailsTests {
-class Base {
-public:
- Base() {
- dblock lk;
- setClient( ns() );
- }
- ~Base() {
- if ( id_.info.isNull() )
- return;
- theDataFileMgr.deleteRecord( ns(), id_.info.rec(), id_.info );
- ASSERT( theDataFileMgr.findAll( ns() )->eof() );
- }
-protected:
- void create() {
- BSONObjBuilder builder;
- builder.append( "ns", ns() );
- builder.append( "name", "testIndex" );
- builder.append( "key", key() );
- BSONObj bobj = builder.done();
- id_.info = theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
- // head not needed for current tests
- // idx_.head = BtreeBucket::addHead( id_ );
- }
- static const char* ns() {
- return "sys.unittest.indexdetailstests";
- }
- const IndexDetails& id() {
- return id_;
- }
- virtual BSONObj key() const {
- BSONObjBuilder k;
- k.append( "a", 1 );
- return k.doneAndDecouple();
- }
- BSONObj aDotB() const {
- BSONObjBuilder k;
- k.append( "a.b", 1 );
- return k.doneAndDecouple();
- }
- BSONObj aAndB() const {
- BSONObjBuilder k;
- k.append( "a", 1 );
- k.append( "b", 1 );
- return k.doneAndDecouple();
- }
- static vector< int > shortArray() {
- vector< int > a;
- a.push_back( 1 );
- a.push_back( 2 );
- a.push_back( 3 );
- return a;
- }
- static BSONObj simpleBC( int i ) {
- BSONObjBuilder b;
- b.append( "b", i );
- b.append( "c", 4 );
- return b.doneAndDecouple();
- }
- static void checkSize( int expected, const BSONObjSetDefaultOrder &objs ) {
- ASSERT_EQUALS( BSONObjSetDefaultOrder::size_type( expected ), objs.size() );
- }
- static void assertEquals( const BSONObj &a, const BSONObj &b ) {
- if ( a.woCompare( b ) != 0 ) {
- cout << "expected: " << a.toString()
- << ", got: " << b.toString() << endl;
- }
- ASSERT( a.woCompare( b ) == 0 );
- }
-private:
- IndexDetails id_;
-};
-
-class Create : public Base {
-public:
- void run() {
- create();
- ASSERT_EQUALS( "testIndex", id().indexName() );
- ASSERT_EQUALS( ns(), id().parentNS() );
- assertEquals( key(), id().keyPattern() );
- }
-};
-
-class GetKeysFromObjectSimple : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b, e;
- b.append( "b", 4 );
- b.append( "a", 5 );
- e.append( "", 5 );
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 1, keys );
- assertEquals( e.doneAndDecouple(), *keys.begin() );
- }
-};
-
-class GetKeysFromObjectDotted : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder a, e, b;
- b.append( "b", 4 );
- a.append( "a", b.done() );
- a.append( "c", "foo" );
- e.append( "", 4 );
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( a.done(), keys );
- checkSize( 1, keys );
- assertEquals( e.doneAndDecouple(), *keys.begin() );
- }
-private:
- virtual BSONObj key() const {
- return aDotB();
- }
-};
-
-class GetKeysFromArraySimple : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b;
- b.append( "a", shortArray()) ;
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", j );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-};
-
-class GetKeysFromArrayFirstElement : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b;
- b.append( "a", shortArray() );
- b.append( "b", 2 );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", j );
- b.append( "", 2 );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- return aAndB();
- }
-};
-
-class GetKeysFromArraySecondElement : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b;
- b.append( "first", 5 );
- b.append( "a", shortArray()) ;
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", 5 );
- b.append( "", j );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- BSONObjBuilder k;
- k.append( "first", 1 );
- k.append( "a", 1 );
- return k.doneAndDecouple();
- }
-};
-
-class GetKeysFromSecondLevelArray : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b;
- b.append( "b", shortArray() );
- BSONObjBuilder a;
- a.append( "a", b.done() );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( a.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", j );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- return aDotB();
- }
-};
-
-class ParallelArraysBasic : public Base {
-public:
- void run() {
- create();
- BSONObjBuilder b;
- b.append( "a", shortArray() );
- b.append( "b", shortArray() );
-
- BSONObjSetDefaultOrder keys;
- ASSERT_EXCEPTION( id().getKeysFromObject( b.done(), keys ),
- UserAssertionException );
- }
-private:
- virtual BSONObj key() const {
- return aAndB();
- }
-};
-
-class ArraySubobjectBasic : public Base {
-public:
- void run() {
- create();
- vector< BSONObj > elts;
- for ( int i = 1; i < 4; ++i )
- elts.push_back( simpleBC( i ) );
- BSONObjBuilder b;
- b.append( "a", elts );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", j );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- return aDotB();
- }
-};
-
-class ArraySubobjectMultiFieldIndex : public Base {
-public:
- void run() {
- create();
- vector< BSONObj > elts;
- for ( int i = 1; i < 4; ++i )
- elts.push_back( simpleBC( i ) );
- BSONObjBuilder b;
- b.append( "a", elts );
- b.append( "d", 99 );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder c;
- c.append( "", j );
- c.append( "", 99 );
- assertEquals( c.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- BSONObjBuilder k;
- k.append( "a.b", 1 );
- k.append( "d", 1 );
- return k.doneAndDecouple();
- }
-};
-
-class ArraySubobjectSingleMissing : public Base {
-public:
- void run() {
- create();
- vector< BSONObj > elts;
- BSONObjBuilder s;
- s.append( "foo", 41 );
- elts.push_back( s.doneAndDecouple() );
- for ( int i = 1; i < 4; ++i )
- elts.push_back( simpleBC( i ) );
- BSONObjBuilder b;
- b.append( "a", elts );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 3, keys );
- int j = 1;
- for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
- BSONObjBuilder b;
- b.append( "", j );
- assertEquals( b.doneAndDecouple(), *i );
- }
- }
-private:
- virtual BSONObj key() const {
- return aDotB();
- }
-};
-
-class ArraySubobjectMissing : public Base {
-public:
- void run() {
- create();
- vector< BSONObj > elts;
- BSONObjBuilder s;
- s.append( "foo", 41 );
- for ( int i = 1; i < 4; ++i )
- elts.push_back( s.done() );
- BSONObjBuilder b;
- b.append( "a", elts );
-
- BSONObjSetDefaultOrder keys;
- id().getKeysFromObject( b.done(), keys );
- checkSize( 0, keys );
- }
-private:
- virtual BSONObj key() const {
- return aDotB();
- }
-};
+ namespace IndexDetailsTests {
+ class Base {
+ public:
+ Base() {
+ dblock lk;
+ setClient( ns() );
+ }
+ ~Base() {
+ if ( id_.info.isNull() )
+ return;
+ theDataFileMgr.deleteRecord( ns(), id_.info.rec(), id_.info );
+ ASSERT( theDataFileMgr.findAll( ns() )->eof() );
+ }
+ protected:
+ void create() {
+ BSONObjBuilder builder;
+ builder.append( "ns", ns() );
+ builder.append( "name", "testIndex" );
+ builder.append( "key", key() );
+ BSONObj bobj = builder.done();
+ id_.info = theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
+ // head not needed for current tests
+ // idx_.head = BtreeBucket::addHead( id_ );
+ }
+ static const char* ns() {
+ return "sys.unittest.indexdetailstests";
+ }
+ const IndexDetails& id() {
+ return id_;
+ }
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ return k.doneAndDecouple();
+ }
+ BSONObj aDotB() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ return k.doneAndDecouple();
+ }
+ BSONObj aAndB() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ k.append( "b", 1 );
+ return k.doneAndDecouple();
+ }
+ static vector< int > shortArray() {
+ vector< int > a;
+ a.push_back( 1 );
+ a.push_back( 2 );
+ a.push_back( 3 );
+ return a;
+ }
+ static BSONObj simpleBC( int i ) {
+ BSONObjBuilder b;
+ b.append( "b", i );
+ b.append( "c", 4 );
+ return b.doneAndDecouple();
+ }
+ static void checkSize( int expected, const BSONObjSetDefaultOrder &objs ) {
+ ASSERT_EQUALS( BSONObjSetDefaultOrder::size_type( expected ), objs.size() );
+ }
+ static void assertEquals( const BSONObj &a, const BSONObj &b ) {
+ if ( a.woCompare( b ) != 0 ) {
+ out() << "expected: " << a.toString()
+ << ", got: " << b.toString() << endl;
+ }
+ ASSERT( a.woCompare( b ) == 0 );
+ }
+ private:
+ IndexDetails id_;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( "testIndex", id().indexName() );
+ ASSERT_EQUALS( ns(), id().parentNS() );
+ assertEquals( key(), id().keyPattern() );
+ }
+ };
+
+ class GetKeysFromObjectSimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b, e;
+ b.append( "b", 4 );
+ b.append( "a", 5 );
+ e.append( "", 5 );
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( e.doneAndDecouple(), *keys.begin() );
+ }
+ };
+
+ class GetKeysFromObjectDotted : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder a, e, b;
+ b.append( "b", 4 );
+ a.append( "a", b.done() );
+ a.append( "c", "foo" );
+ e.append( "", 4 );
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( e.doneAndDecouple(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class GetKeysFromArraySimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray()) ;
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ };
+
+ class GetKeysFromArrayFirstElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", 2 );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ b.append( "", 2 );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class GetKeysFromArraySecondElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "first", 5 );
+ b.append( "a", shortArray()) ;
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", 5 );
+ b.append( "", j );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "first", 1 );
+ k.append( "a", 1 );
+ return k.doneAndDecouple();
+ }
+ };
+
+ class GetKeysFromSecondLevelArray : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "b", shortArray() );
+ BSONObjBuilder a;
+ a.append( "a", b.done() );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ParallelArraysBasic : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", shortArray() );
+
+ BSONObjSetDefaultOrder keys;
+ ASSERT_EXCEPTION( id().getKeysFromObject( b.done(), keys ),
+ UserAssertionException );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class ArraySubobjectBasic : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMultiFieldIndex : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+ b.append( "d", 99 );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder c;
+ c.append( "", j );
+ c.append( "", 99 );
+ assertEquals( c.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ k.append( "d", 1 );
+ return k.doneAndDecouple();
+ }
+ };
+
+ class ArraySubobjectSingleMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ elts.push_back( s.doneAndDecouple() );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSetDefaultOrder::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.doneAndDecouple(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( s.done() );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSetDefaultOrder keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 0, keys );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
// TODO
// array subelement complex
// parallel arrays complex
// allowed multi array indexes
-} // namespace IndexDetailsTests
-
-namespace NamespaceDetailsTests {
-
-class Base {
-public:
- Base( const char *ns = "foo" ) : ns_( ns ) {}
- ~Base() {
- if ( !nsd() )
- return;
- string s( ns() );
- dropNS( s );
- }
-protected:
- void create() {
- dblock lk;
- setClient( ns() );
- string err;
- ASSERT( userCreateNS( ns(), fromjson( spec() ), err, false ) );
- }
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512}";
- }
- int nRecords() const {
- int count = 0;
- for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
- for ( DiskLoc j = i.ext()->firstRecord; !j.isNull();
- j.setOfs( j.a(), j.rec()->nextOfs ) ) {
- ++count;
- }
- ASSERT_EQUALS( count, nsd()->nrecords );
- return count;
- }
- int nExtents() const {
- int count = 0;
- for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
- ++count;
- return count;
- }
- static int min( int a, int b ) {
- return a < b ? a : b;
- }
- const char *ns() const {
- return ns_;
- }
- NamespaceDetails *nsd() const {
- return nsdetails( ns() );
- }
-private:
- const char *ns_;
-};
-
-class Create : public Base {
-public:
- void run() {
- create();
- ASSERT( nsd() );
- ASSERT_EQUALS( 0, nRecords() );
- ASSERT( nsd()->firstExtent == nsd()->capExtent );
- DiskLoc initial = DiskLoc();
- initial.setInvalid();
- ASSERT( initial == nsd()->capFirstNewRecord );
- }
-};
-
-class SingleAlloc : public Base {
-public:
- void run() {
- create();
- char ch[ 200 ];
- memset( ch, 0, 200 );
- ASSERT( !theDataFileMgr.insert( ns(), ch, 200 ).isNull() );
- ASSERT_EQUALS( 1, nRecords() );
- }
-};
-
-class Realloc : public Base {
-public:
- void run() {
- create();
- char ch[ 200 ];
-
- DiskLoc l[ 6 ];
- for ( int i = 0; i < 6; ++i ) {
- l[ i ] = theDataFileMgr.insert( ns(), ch, 200 );
- ASSERT( !l[ i ].isNull() );
- ASSERT_EQUALS( 1 + i % 2, nRecords() );
- if ( i > 1 )
- ASSERT( l[ i ] == l[ i - 2 ] );
- }
- }
-};
-
-class TwoExtent : public Base {
-public:
- void run() {
- create();
- ASSERT_EQUALS( 2, nExtents() );
- char ch[ 200 ];
-
- DiskLoc l[ 8 ];
- for ( int i = 0; i < 8; ++i ) {
- l[ i ] = theDataFileMgr.insert( ns(), ch, 200 );
- ASSERT( !l[ i ].isNull() );
- ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- if ( i > 3 )
- ASSERT( l[ i ] == l[ i - 4 ] );
- }
+ } // namespace IndexDetailsTests
+
+ namespace NamespaceDetailsTests {
- // Too big
- char ch2[ 800 ];
- ASSERT( theDataFileMgr.insert( ns(), ch2, 800 ).isNull() );
- ASSERT_EQUALS( 0, nRecords() );
- }
-private:
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
- }
-};
-
-class Migrate : public Base {
-public:
- void run() {
- create();
- nsd()->deletedList[ 2 ] = nsd()->deletedList[ 0 ].drec()->nextDeleted.drec()->nextDeleted;
- nsd()->deletedList[ 0 ].drec()->nextDeleted.drec()->nextDeleted = DiskLoc();
- NamespaceDetails *d = nsd();
- zero( &d->capExtent );
- zero( &d->capFirstNewRecord );
-
- nsd();
-
- ASSERT( nsd()->firstExtent == nsd()->capExtent );
- ASSERT( nsd()->capExtent.getOfs() != 0 );
- ASSERT( !nsd()->capFirstNewRecord.isValid() );
- int nDeleted = 0;
- for ( DiskLoc i = nsd()->deletedList[ 0 ]; !i.isNull(); i = i.drec()->nextDeleted, ++nDeleted );
- ASSERT_EQUALS( 10, nDeleted );
- ASSERT( nsd()->deletedList[ 1 ].isNull() );
- }
-private:
- static void zero( DiskLoc *d ) {
- memset( d, 0, sizeof( DiskLoc ) );
- }
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
- }
-};
-
-class BigCollection : public Base {
-public:
- BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
- void run() {
- create();
- ASSERT_EQUALS( 2, nExtents() );
- }
-private:
- virtual string spec() const {
- // NOTE 256 added to size in _userCreateNS()
- long long big = PhysicalDataFile::maxSize() - PDFHeader::headerSize();
- stringstream ss;
- ss << "{\"capped\":true,\"size\":" << big << "}";
- return ss.str();
- }
-};
-
-} // namespace NamespaceDetailsTests
-
-class All : public UnitTest::Suite {
-public:
- All() {
- add< IndexDetailsTests::Create >();
- add< IndexDetailsTests::GetKeysFromObjectSimple >();
- add< IndexDetailsTests::GetKeysFromObjectDotted >();
- add< IndexDetailsTests::GetKeysFromArraySimple >();
- add< IndexDetailsTests::GetKeysFromArrayFirstElement >();
- add< IndexDetailsTests::GetKeysFromArraySecondElement >();
- add< IndexDetailsTests::GetKeysFromSecondLevelArray >();
- add< IndexDetailsTests::ParallelArraysBasic >();
- add< IndexDetailsTests::ArraySubobjectBasic >();
- add< IndexDetailsTests::ArraySubobjectMultiFieldIndex >();
- add< IndexDetailsTests::ArraySubobjectSingleMissing >();
- add< IndexDetailsTests::ArraySubobjectMissing >();
- add< NamespaceDetailsTests::Create >();
- add< NamespaceDetailsTests::SingleAlloc >();
- add< NamespaceDetailsTests::Realloc >();
- add< NamespaceDetailsTests::TwoExtent >();
- add< NamespaceDetailsTests::Migrate >();
- add< NamespaceDetailsTests::BigCollection >();
- }
-};
+ class Base {
+ public:
+ Base( const char *ns = "foo" ) : ns_( ns ) {}
+ ~Base() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropNS( s );
+ }
+ protected:
+ void create() {
+ dblock lk;
+ setClient( ns() );
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec() ), err, false ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512}";
+ }
+ int nRecords() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ for ( DiskLoc j = i.ext()->firstRecord; !j.isNull();
+ j.setOfs( j.a(), j.rec()->nextOfs ) ) {
+ ++count;
+ }
+ ASSERT_EQUALS( count, nsd()->nrecords );
+ return count;
+ }
+ int nExtents() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ ++count;
+ return count;
+ }
+ static int min( int a, int b ) {
+ return a < b ? a : b;
+ }
+ const char *ns() const {
+ return ns_;
+ }
+ NamespaceDetails *nsd() const {
+ return nsdetails( ns() );
+ }
+ private:
+ const char *ns_;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT( nsd() );
+ ASSERT_EQUALS( 0, nRecords() );
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ DiskLoc initial = DiskLoc();
+ initial.setInvalid();
+ ASSERT( initial == nsd()->capFirstNewRecord );
+ }
+ };
+
+ class SingleAlloc : public Base {
+ public:
+ void run() {
+ create();
+ char ch[ 200 ];
+ memset( ch, 0, 200 );
+ ASSERT( !theDataFileMgr.insert( ns(), ch, 200 ).isNull() );
+ ASSERT_EQUALS( 1, nRecords() );
+ }
+ };
+
+ class Realloc : public Base {
+ public:
+ void run() {
+ create();
+ char ch[ 200 ];
+
+ DiskLoc l[ 6 ];
+ for ( int i = 0; i < 6; ++i ) {
+ l[ i ] = theDataFileMgr.insert( ns(), ch, 200 );
+ ASSERT( !l[ i ].isNull() );
+ ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i > 1 )
+ ASSERT( l[ i ] == l[ i - 2 ] );
+ }
+ }
+ };
+
+ class TwoExtent : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+ char ch[ 200 ];
+
+ DiskLoc l[ 8 ];
+ for ( int i = 0; i < 8; ++i ) {
+ l[ i ] = theDataFileMgr.insert( ns(), ch, 200 );
+ ASSERT( !l[ i ].isNull() );
+ ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ if ( i > 3 )
+ ASSERT( l[ i ] == l[ i - 4 ] );
+ }
+
+ // Too big
+ char ch2[ 800 ];
+ ASSERT( theDataFileMgr.insert( ns(), ch2, 800 ).isNull() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ private:
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ };
+
+ class Migrate : public Base {
+ public:
+ void run() {
+ create();
+ nsd()->deletedList[ 2 ] = nsd()->deletedList[ 0 ].drec()->nextDeleted.drec()->nextDeleted;
+ nsd()->deletedList[ 0 ].drec()->nextDeleted.drec()->nextDeleted = DiskLoc();
+ NamespaceDetails *d = nsd();
+ zero( &d->capExtent );
+ zero( &d->capFirstNewRecord );
+
+ nsd();
+
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ ASSERT( nsd()->capExtent.getOfs() != 0 );
+ ASSERT( !nsd()->capFirstNewRecord.isValid() );
+ int nDeleted = 0;
+ for ( DiskLoc i = nsd()->deletedList[ 0 ]; !i.isNull(); i = i.drec()->nextDeleted, ++nDeleted );
+ ASSERT_EQUALS( 10, nDeleted );
+ ASSERT( nsd()->deletedList[ 1 ].isNull() );
+ }
+ private:
+ static void zero( DiskLoc *d ) {
+ memset( d, 0, sizeof( DiskLoc ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
+ }
+ };
+
+ class BigCollection : public Base {
+ public:
+ BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
+ void run() {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+ }
+ private:
+ virtual string spec() const {
+ // NOTE 256 added to size in _userCreateNS()
+ long long big = PhysicalDataFile::maxSize() - PDFHeader::headerSize();
+ stringstream ss;
+ ss << "{\"capped\":true,\"size\":" << big << "}";
+ return ss.str();
+ }
+ };
+
+ } // namespace NamespaceDetailsTests
+
+ class All : public UnitTest::Suite {
+ public:
+ All() {
+ add< IndexDetailsTests::Create >();
+ add< IndexDetailsTests::GetKeysFromObjectSimple >();
+ add< IndexDetailsTests::GetKeysFromObjectDotted >();
+ add< IndexDetailsTests::GetKeysFromArraySimple >();
+ add< IndexDetailsTests::GetKeysFromArrayFirstElement >();
+ add< IndexDetailsTests::GetKeysFromArraySecondElement >();
+ add< IndexDetailsTests::GetKeysFromSecondLevelArray >();
+ add< IndexDetailsTests::ParallelArraysBasic >();
+ add< IndexDetailsTests::ArraySubobjectBasic >();
+ add< IndexDetailsTests::ArraySubobjectMultiFieldIndex >();
+ add< IndexDetailsTests::ArraySubobjectSingleMissing >();
+ add< IndexDetailsTests::ArraySubobjectMissing >();
+ add< NamespaceDetailsTests::Create >();
+ add< NamespaceDetailsTests::SingleAlloc >();
+ add< NamespaceDetailsTests::Realloc >();
+ add< NamespaceDetailsTests::TwoExtent >();
+ add< NamespaceDetailsTests::Migrate >();
+ add< NamespaceDetailsTests::BigCollection >();
+ }
+ };
} // namespace NamespaceTests
UnitTest::TestPtr namespaceTests() {
diff --git a/dbtests/pairingtests.cpp b/dbtests/pairingtests.cpp
index 1943b5245be..4c325eee81c 100644
--- a/dbtests/pairingtests.cpp
+++ b/dbtests/pairingtests.cpp
@@ -23,334 +23,334 @@
#include "mockdbclient.h"
namespace mongo {
-extern PairSync *pairSync;
+ extern PairSync *pairSync;
} // namespace mongo
namespace PairingTests {
-class Base {
-protected:
- Base() {
- backup = pairSync;
- setSynced();
- }
- ~Base() {
- pairSync = backup;
- dblock lk;
- emptyCollection( "local.pair.sync" );
- if ( pairSync->initialSyncCompleted() ) {
- // save to db
- pairSync->setInitialSyncCompleted();
- }
- }
- static void setSynced() {
- init();
- pairSync = synced;
- pairSync->setInitialSyncCompletedLocking();
- ASSERT( pairSync->initialSyncCompleted() );
- }
- static void setNotSynced() {
- init();
- pairSync = notSynced;
- ASSERT( !pairSync->initialSyncCompleted() );
- }
- static void flipSync() {
- if ( pairSync->initialSyncCompleted() )
- setNotSynced();
- else
+ class Base {
+ protected:
+ Base() {
+ backup = pairSync;
setSynced();
- }
-private:
- static void init() {
- dblock lk;
- emptyCollection( "local.pair.sync" );
- if ( synced != 0 && notSynced != 0 )
- return;
- notSynced = new PairSync();
- notSynced->init();
- synced = new PairSync();
- synced->init();
- synced->setInitialSyncCompleted();
- emptyCollection( "local.pair.sync" );
- }
- PairSync *backup;
- static PairSync *synced;
- static PairSync *notSynced;
-};
-PairSync *Base::synced = 0;
-PairSync *Base::notSynced = 0;
-
-namespace ReplPairTests {
-class Create : public Base {
-public:
- void run() {
- ReplPair rp1( "foo", "bar" );
- checkFields( rp1, "foo", "foo", DBPort, "bar" );
-
- ReplPair rp2( "foo:1", "bar" );
- checkFields( rp2, "foo:1", "foo", 1, "bar" );
-
- // FIXME Should we accept this input?
- ReplPair rp3( "", "bar" );
- checkFields( rp3, "", "", DBPort, "bar" );
-
- ASSERT_EXCEPTION( ReplPair( "foo:", "bar" ),
- UserAssertionException );
-
- ASSERT_EXCEPTION( ReplPair( "foo:0", "bar" ),
- UserAssertionException );
-
- ASSERT_EXCEPTION( ReplPair( "foo:10000000", "bar" ),
- UserAssertionException );
-
- ASSERT_EXCEPTION( ReplPair( "foo", "" ),
- UserAssertionException );
- }
-private:
- void checkFields( const ReplPair &rp,
- const string &remote,
- const string &remoteHost,
- int remotePort,
- const string &arbHost ) {
- ASSERT( rp.state == ReplPair::State_Negotiating );
- ASSERT_EQUALS( remote, rp.remote );
- ASSERT_EQUALS( remoteHost, rp.remoteHost );
- ASSERT_EQUALS( remotePort, rp.remotePort );
- ASSERT_EQUALS( arbHost, rp.arbHost );
- }
-};
-
-class Dominant : public Base {
-public:
- Dominant() : oldPort_( port ) {
- port = 10;
- }
- ~Dominant() {
- port = oldPort_;
- }
- void run() {
- ASSERT( ReplPair( "b:9", "-" ).dominant( "b" ) );
- ASSERT( !ReplPair( "b:10", "-" ).dominant( "b" ) );
- ASSERT( ReplPair( "b", "-" ).dominant( "c" ) );
- ASSERT( !ReplPair( "b", "-" ).dominant( "a" ) );
- }
-private:
- int oldPort_;
-};
-
-class SetMaster {
-public:
- void run() {
- ReplPair rp( "a", "b" );
- rp.setMaster( ReplPair::State_CantArb, "foo" );
- ASSERT( rp.state == ReplPair::State_CantArb );
- ASSERT_EQUALS( "foo", rp.info );
- rp.setMaster( ReplPair::State_Confused, "foo" );
- ASSERT( rp.state == ReplPair::State_Confused );
- }
-};
-
-class Negotiate : public Base {
-public:
- void run() {
- ReplPair rp( "a", "b" );
- MockDBClientConnection cc;
-
- cc.one( res( 0, 0 ) );
- rp.negotiate( &cc );
- ASSERT( rp.state == ReplPair::State_Confused );
-
- rp.state = ReplPair::State_Negotiating;
- cc.one( res( 1, 2 ) );
- rp.negotiate( &cc );
- ASSERT( rp.state == ReplPair::State_Negotiating );
-
- cc.one( res( 1, ReplPair::State_Slave ) );
- rp.negotiate( &cc );
- ASSERT( rp.state == ReplPair::State_Slave );
-
- cc.one( res( 1, ReplPair::State_Master ) );
- rp.negotiate( &cc );
- ASSERT( rp.state == ReplPair::State_Master );
- }
-private:
- BSONObj res( int ok, int youAre ) {
- BSONObjBuilder b;
- b.appendInt( "ok", ok );
- b.appendInt( "you_are", youAre );
- return b.doneAndDecouple();
- }
-};
-
-class Arbitrate : public Base {
-public:
- void run() {
- ReplPair rp1( "a", "-" );
- rp1.arbitrate();
- ASSERT( rp1.state == ReplPair::State_Master );
-
- TestableReplPair rp2( false, false );
- rp2.arbitrate();
- ASSERT( rp2.state == ReplPair::State_CantArb );
-
- BSONObjBuilder b;
- b.append( "foo", 1 );
- TestableReplPair rp3( true, true );
- rp3.arbitrate();
- ASSERT( rp3.state == ReplPair::State_Master );
- }
-private:
- class TestableReplPair : public ReplPair {
- public:
- TestableReplPair( bool connect, bool isMaster ) :
- ReplPair( "a", "z" ),
- connect_( connect ),
- isMaster_( isMaster ) {
}
- virtual
- DBClientConnection *newClientConnection() const {
- MockDBClientConnection * c = new MockDBClientConnection();
- c->connect( connect_ );
- c->setIsMaster( isMaster_ );
- return c;
+ ~Base() {
+ pairSync = backup;
+ dblock lk;
+ emptyCollection( "local.pair.sync" );
+ if ( pairSync->initialSyncCompleted() ) {
+ // save to db
+ pairSync->setInitialSyncCompleted();
+ }
+ }
+ static void setSynced() {
+ init();
+ pairSync = synced;
+ pairSync->setInitialSyncCompletedLocking();
+ ASSERT( pairSync->initialSyncCompleted() );
+ }
+ static void setNotSynced() {
+ init();
+ pairSync = notSynced;
+ ASSERT( !pairSync->initialSyncCompleted() );
+ }
+ static void flipSync() {
+ if ( pairSync->initialSyncCompleted() )
+ setNotSynced();
+ else
+ setSynced();
}
private:
- bool connect_;
- bool isMaster_;
+ static void init() {
+ dblock lk;
+ emptyCollection( "local.pair.sync" );
+ if ( synced != 0 && notSynced != 0 )
+ return;
+ notSynced = new PairSync();
+ notSynced->init();
+ synced = new PairSync();
+ synced->init();
+ synced->setInitialSyncCompleted();
+ emptyCollection( "local.pair.sync" );
+ }
+ PairSync *backup;
+ static PairSync *synced;
+ static PairSync *notSynced;
};
-};
-} // namespace ReplPairTests
-
-class DirectConnectBase : public Base {
-protected:
- void negotiate( ReplPair &a, ReplPair &b ) {
- auto_ptr< DBClientConnection > c( new DirectDBClientConnection( &b, cc() ) );
- a.negotiate( c.get() );
- }
- class DirectConnectionReplPair : public ReplPair {
+ PairSync *Base::synced = 0;
+ PairSync *Base::notSynced = 0;
+
+ namespace ReplPairTests {
+ class Create : public Base {
+ public:
+ void run() {
+ ReplPair rp1( "foo", "bar" );
+ checkFields( rp1, "foo", "foo", DBPort, "bar" );
+
+ ReplPair rp2( "foo:1", "bar" );
+ checkFields( rp2, "foo:1", "foo", 1, "bar" );
+
+ // FIXME Should we accept this input?
+ ReplPair rp3( "", "bar" );
+ checkFields( rp3, "", "", DBPort, "bar" );
+
+ ASSERT_EXCEPTION( ReplPair( "foo:", "bar" ),
+ UserAssertionException );
+
+ ASSERT_EXCEPTION( ReplPair( "foo:0", "bar" ),
+ UserAssertionException );
+
+ ASSERT_EXCEPTION( ReplPair( "foo:10000000", "bar" ),
+ UserAssertionException );
+
+ ASSERT_EXCEPTION( ReplPair( "foo", "" ),
+ UserAssertionException );
+ }
+ private:
+ void checkFields( const ReplPair &rp,
+ const string &remote,
+ const string &remoteHost,
+ int remotePort,
+ const string &arbHost ) {
+ ASSERT( rp.state == ReplPair::State_Negotiating );
+ ASSERT_EQUALS( remote, rp.remote );
+ ASSERT_EQUALS( remoteHost, rp.remoteHost );
+ ASSERT_EQUALS( remotePort, rp.remotePort );
+ ASSERT_EQUALS( arbHost, rp.arbHost );
+ }
+ };
+
+ class Dominant : public Base {
+ public:
+ Dominant() : oldPort_( port ) {
+ port = 10;
+ }
+ ~Dominant() {
+ port = oldPort_;
+ }
+ void run() {
+ ASSERT( ReplPair( "b:9", "-" ).dominant( "b" ) );
+ ASSERT( !ReplPair( "b:10", "-" ).dominant( "b" ) );
+ ASSERT( ReplPair( "b", "-" ).dominant( "c" ) );
+ ASSERT( !ReplPair( "b", "-" ).dominant( "a" ) );
+ }
+ private:
+ int oldPort_;
+ };
+
+ class SetMaster {
+ public:
+ void run() {
+ ReplPair rp( "a", "b" );
+ rp.setMaster( ReplPair::State_CantArb, "foo" );
+ ASSERT( rp.state == ReplPair::State_CantArb );
+ ASSERT_EQUALS( "foo", rp.info );
+ rp.setMaster( ReplPair::State_Confused, "foo" );
+ ASSERT( rp.state == ReplPair::State_Confused );
+ }
+ };
+
+ class Negotiate : public Base {
+ public:
+ void run() {
+ ReplPair rp( "a", "b" );
+ MockDBClientConnection cc;
+
+ cc.one( res( 0, 0 ) );
+ rp.negotiate( &cc );
+ ASSERT( rp.state == ReplPair::State_Confused );
+
+ rp.state = ReplPair::State_Negotiating;
+ cc.one( res( 1, 2 ) );
+ rp.negotiate( &cc );
+ ASSERT( rp.state == ReplPair::State_Negotiating );
+
+ cc.one( res( 1, ReplPair::State_Slave ) );
+ rp.negotiate( &cc );
+ ASSERT( rp.state == ReplPair::State_Slave );
+
+ cc.one( res( 1, ReplPair::State_Master ) );
+ rp.negotiate( &cc );
+ ASSERT( rp.state == ReplPair::State_Master );
+ }
+ private:
+ BSONObj res( int ok, int youAre ) {
+ BSONObjBuilder b;
+ b.appendInt( "ok", ok );
+ b.appendInt( "you_are", youAre );
+ return b.doneAndDecouple();
+ }
+ };
+
+ class Arbitrate : public Base {
+ public:
+ void run() {
+ ReplPair rp1( "a", "-" );
+ rp1.arbitrate();
+ ASSERT( rp1.state == ReplPair::State_Master );
+
+ TestableReplPair rp2( false, false );
+ rp2.arbitrate();
+ ASSERT( rp2.state == ReplPair::State_CantArb );
+
+ BSONObjBuilder b;
+ b.append( "foo", 1 );
+ TestableReplPair rp3( true, true );
+ rp3.arbitrate();
+ ASSERT( rp3.state == ReplPair::State_Master );
+ }
+ private:
+ class TestableReplPair : public ReplPair {
+ public:
+ TestableReplPair( bool connect, bool isMaster ) :
+ ReplPair( "a", "z" ),
+ connect_( connect ),
+ isMaster_( isMaster ) {
+ }
+ virtual
+ DBClientConnection *newClientConnection() const {
+ MockDBClientConnection * c = new MockDBClientConnection();
+ c->connect( connect_ );
+ c->setIsMaster( isMaster_ );
+ return c;
+ }
+ private:
+ bool connect_;
+ bool isMaster_;
+ };
+ };
+ } // namespace ReplPairTests
+
+ class DirectConnectBase : public Base {
+ protected:
+ void negotiate( ReplPair &a, ReplPair &b ) {
+ auto_ptr< DBClientConnection > c( new DirectDBClientConnection( &b, cc() ) );
+ a.negotiate( c.get() );
+ }
+ class DirectConnectionReplPair : public ReplPair {
+ public:
+ DirectConnectionReplPair( ReplPair *dest ) :
+ ReplPair( "a", "c" ),
+ dest_( dest ) {
+ }
+ virtual DBClientConnection *newClientConnection() const {
+ return new DirectDBClientConnection( dest_ );
+ }
+ private:
+ ReplPair *dest_;
+ };
+ virtual DirectDBClientConnection::ConnectionCallback *cc() {
+ return 0;
+ }
+ void checkNegotiation( const char *host1, const char *arb1, int state1, int newState1,
+ const char *host2, const char *arb2, int state2, int newState2 ) {
+ ReplPair one( host1, arb1 );
+ one.state = state1;
+ ReplPair two( host2, arb2 );
+ two.state = state2;
+ negotiate( one, two );
+ ASSERT( one.state == newState1 );
+ ASSERT( two.state == newState2 );
+ }
+ };
+
+ class Negotiate : public DirectConnectBase {
public:
- DirectConnectionReplPair( ReplPair *dest ) :
- ReplPair( "a", "c" ),
- dest_( dest ) {
+ void run() {
+ checkNegotiation( "a", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
+ "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating );
+ checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
+
+ checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Master,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
+
+ // No change when negotiate() called on a.
+ checkNegotiation( "a", "-", ReplPair::State_Master, ReplPair::State_Master,
+ "b", "-", ReplPair::State_Master, ReplPair::State_Master );
+ // Resolve Master - Master.
+ checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Slave,
+ "a", "-", ReplPair::State_Master, ReplPair::State_Master );
+
+ // FIXME Move from negotiating to master?
+ checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
}
- virtual DBClientConnection *newClientConnection() const {
- return new DirectDBClientConnection( dest_ );
+ };
+
+ class NegotiateWithCatchup : public DirectConnectBase {
+ public:
+ void run() {
+ // a caught up, b not
+ setNotSynced();
+ checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
+ // b caught up, a not
+ setSynced();
+ checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Master,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
+
+ // a caught up, b not
+ setNotSynced();
+ checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
+ // b caught up, a not
+ setSynced();
+ checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Master,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
}
private:
- ReplPair *dest_;
+ class NegateCatchup : public DirectDBClientConnection::ConnectionCallback {
+ virtual void beforeCommand() {
+ Base::flipSync();
+ }
+ virtual void afterCommand() {
+ Base::flipSync();
+ }
+ };
+ virtual DirectDBClientConnection::ConnectionCallback *cc() {
+ return &cc_;
+ }
+ NegateCatchup cc_;
+ };
+
+ class NobodyCaughtUp : public DirectConnectBase {
+ public:
+ void run() {
+ setNotSynced();
+ checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
+ "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
+ }
};
- virtual DirectDBClientConnection::ConnectionCallback *cc() {
- return 0;
- }
- void checkNegotiation( const char *host1, const char *arb1, int state1, int newState1,
- const char *host2, const char *arb2, int state2, int newState2 ) {
- ReplPair one( host1, arb1 );
- one.state = state1;
- ReplPair two( host2, arb2 );
- two.state = state2;
- negotiate( one, two );
- ASSERT( one.state == newState1 );
- ASSERT( two.state == newState2 );
- }
-};
-
-class Negotiate : public DirectConnectBase {
-public:
- void run() {
- checkNegotiation( "a", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
- "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating );
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
-
- checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
-
- // No change when negotiate() called on a.
- checkNegotiation( "a", "-", ReplPair::State_Master, ReplPair::State_Master,
- "b", "-", ReplPair::State_Master, ReplPair::State_Master );
- // Resolve Master - Master.
- checkNegotiation( "b", "-", ReplPair::State_Master, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Master, ReplPair::State_Master );
-
- // FIXME Move from negotiating to master?
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- }
-};
-
-class NegotiateWithCatchup : public DirectConnectBase {
-public:
- void run() {
- // a caught up, b not
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- // b caught up, a not
- setSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
-
- // a caught up, b not
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Slave,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Master );
- // b caught up, a not
- setSynced();
- checkNegotiation( "b", "-", ReplPair::State_Slave, ReplPair::State_Master,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
- }
-private:
- class NegateCatchup : public DirectDBClientConnection::ConnectionCallback {
- virtual void beforeCommand() {
- Base::flipSync();
+
+ class Arbitrate : public DirectConnectBase {
+ public:
+ void run() {
+ ReplPair arb( "c", "-" );
+ DirectConnectionReplPair m( &arb );
+ m.arbitrate();
+ ASSERT( m.state == ReplPair::State_Master );
+
+ setNotSynced();
+ m.state = ReplPair::State_Negotiating;
+ m.arbitrate();
+ ASSERT( m.state == ReplPair::State_Negotiating );
}
- virtual void afterCommand() {
- Base::flipSync();
+ };
+
+ class All : public UnitTest::Suite {
+ public:
+ All() {
+ add< ReplPairTests::Create >();
+ add< ReplPairTests::Dominant >();
+ add< ReplPairTests::SetMaster >();
+ add< ReplPairTests::Negotiate >();
+ add< ReplPairTests::Arbitrate >();
+ add< Negotiate >();
+ add< NegotiateWithCatchup >();
+ add< NobodyCaughtUp >();
+ add< Arbitrate >();
}
};
- virtual DirectDBClientConnection::ConnectionCallback *cc() {
- return &cc_;
- }
- NegateCatchup cc_;
-};
-
-class NobodyCaughtUp : public DirectConnectBase {
-public:
- void run() {
- setNotSynced();
- checkNegotiation( "b", "-", ReplPair::State_Negotiating, ReplPair::State_Negotiating,
- "a", "-", ReplPair::State_Negotiating, ReplPair::State_Slave );
- }
-};
-
-class Arbitrate : public DirectConnectBase {
-public:
- void run() {
- ReplPair arb( "c", "-" );
- DirectConnectionReplPair m( &arb );
- m.arbitrate();
- ASSERT( m.state == ReplPair::State_Master );
-
- setNotSynced();
- m.state = ReplPair::State_Negotiating;
- m.arbitrate();
- ASSERT( m.state == ReplPair::State_Negotiating );
- }
-};
-
-class All : public UnitTest::Suite {
-public:
- All() {
- add< ReplPairTests::Create >();
- add< ReplPairTests::Dominant >();
- add< ReplPairTests::SetMaster >();
- add< ReplPairTests::Negotiate >();
- add< ReplPairTests::Arbitrate >();
- add< Negotiate >();
- add< NegotiateWithCatchup >();
- add< NobodyCaughtUp >();
- add< Arbitrate >();
- }
-};
} // namespace PairingTests
UnitTest::TestPtr pairingTests() {
diff --git a/dbtests/pdfiletests.cpp b/dbtests/pdfiletests.cpp
index 8739d54f7aa..8968844e228 100644
--- a/dbtests/pdfiletests.cpp
+++ b/dbtests/pdfiletests.cpp
@@ -26,262 +26,262 @@
namespace PdfileTests {
-namespace ScanCapped {
+ namespace ScanCapped {
-class Base {
-public:
- Base() {
- dblock lk;
- setClient( ns() );
- }
- ~Base() {
- if ( !nsd() )
- return;
- string n( ns() );
- dropNS( n );
- }
- void run() {
- stringstream spec;
- spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
- string err;
- ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) );
- prepare();
- int j = 0;
- for ( auto_ptr< Cursor > i = theDataFileMgr.findAll( ns() );
- i->ok(); i->advance(), ++j )
- ASSERT_EQUALS( j, i->current().firstElement().number() );
- ASSERT_EQUALS( count(), j );
+ class Base {
+ public:
+ Base() {
+ dblock lk;
+ setClient( ns() );
+ }
+ ~Base() {
+ if ( !nsd() )
+ return;
+ string n( ns() );
+ dropNS( n );
+ }
+ void run() {
+ stringstream spec;
+ spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) );
+ prepare();
+ int j = 0;
+ for ( auto_ptr< Cursor > i = theDataFileMgr.findAll( ns() );
+ i->ok(); i->advance(), ++j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( count(), j );
- j = count() - 1;
- for ( auto_ptr< Cursor > i =
- findTableScan( ns(), fromjson( "{\"$natural\":-1}" ) );
- i->ok(); i->advance(), --j )
- ASSERT_EQUALS( j, i->current().firstElement().number() );
- ASSERT_EQUALS( -1, j );
- }
-protected:
- virtual void prepare() = 0;
- virtual int count() const = 0;
- virtual int nExtents() const {
- return 0;
- }
- // bypass standard alloc/insert routines to use the extent we want.
- static DiskLoc insert( DiskLoc ext, int i ) {
- BSONObjBuilder b;
- b.append( "a", i );
- BSONObj o = b.done();
- int len = o.objsize();
- Extent *e = ext.ext();
- int ofs;
- if ( e->lastRecord.isNull() )
- ofs = ext.getOfs() + ( e->extentData - (char *)e );
- else
- ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
- DiskLoc dl( ext.a(), ofs );
- Record *r = dl.rec();
- r->lengthWithHeaders = Record::HeaderSize + len;
- r->extentOfs = e->myLoc.getOfs();
- r->nextOfs = DiskLoc::NullOfs;
- r->prevOfs = e->lastRecord.getOfs();
- memcpy( r->data, o.objdata(), len );
- if ( e->firstRecord.isNull() )
- e->firstRecord = dl;
- else
- e->lastRecord.rec()->nextOfs = ofs;
- e->lastRecord = dl;
- return dl;
- }
- static const char *ns() {
- return "ScanCapped";
- }
- static NamespaceDetails *nsd() {
- return nsdetails( ns() );
- }
-};
+ j = count() - 1;
+ for ( auto_ptr< Cursor > i =
+ findTableScan( ns(), fromjson( "{\"$natural\":-1}" ) );
+ i->ok(); i->advance(), --j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( -1, j );
+ }
+ protected:
+ virtual void prepare() = 0;
+ virtual int count() const = 0;
+ virtual int nExtents() const {
+ return 0;
+ }
+ // bypass standard alloc/insert routines to use the extent we want.
+ static DiskLoc insert( DiskLoc ext, int i ) {
+ BSONObjBuilder b;
+ b.append( "a", i );
+ BSONObj o = b.done();
+ int len = o.objsize();
+ Extent *e = ext.ext();
+ int ofs;
+ if ( e->lastRecord.isNull() )
+ ofs = ext.getOfs() + ( e->extentData - (char *)e );
+ else
+ ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
+ DiskLoc dl( ext.a(), ofs );
+ Record *r = dl.rec();
+ r->lengthWithHeaders = Record::HeaderSize + len;
+ r->extentOfs = e->myLoc.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ r->prevOfs = e->lastRecord.getOfs();
+ memcpy( r->data, o.objdata(), len );
+ if ( e->firstRecord.isNull() )
+ e->firstRecord = dl;
+ else
+ e->lastRecord.rec()->nextOfs = ofs;
+ e->lastRecord = dl;
+ return dl;
+ }
+ static const char *ns() {
+ return "ScanCapped";
+ }
+ static NamespaceDetails *nsd() {
+ return nsdetails( ns() );
+ }
+ };
-class Empty : public Base {
- virtual void prepare() {}
- virtual int count() const {
- return 0;
- }
-};
+ class Empty : public Base {
+ virtual void prepare() {}
+ virtual int count() const {
+ return 0;
+ }
+ };
-class EmptyLooped : public Base {
- virtual void prepare() {
- nsd()->capFirstNewRecord = DiskLoc();
- }
- virtual int count() const {
- return 0;
- }
-};
+ class EmptyLooped : public Base {
+ virtual void prepare() {
+ nsd()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ };
-class EmptyMultiExtentLooped : public Base {
- virtual void prepare() {
- nsd()->capFirstNewRecord = DiskLoc();
- }
- virtual int count() const {
- return 0;
- }
- virtual int nExtents() const {
- return 3;
- }
-};
+ class EmptyMultiExtentLooped : public Base {
+ virtual void prepare() {
+ nsd()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
-class Single : public Base {
- virtual void prepare() {
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
- }
- virtual int count() const {
- return 1;
- }
-};
+ class Single : public Base {
+ virtual void prepare() {
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ }
+ virtual int count() const {
+ return 1;
+ }
+ };
-class NewCapFirst : public Base {
- virtual void prepare() {
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
- insert( nsd()->capExtent, 1 );
- }
- virtual int count() const {
- return 2;
- }
-};
+ class NewCapFirst : public Base {
+ virtual void prepare() {
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
-class NewCapLast : public Base {
- virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 1 );
- }
- virtual int count() const {
- return 2;
- }
-};
+ class NewCapLast : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
-class NewCapMiddle : public Base {
- virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 1 );
- insert( nsd()->capExtent, 2 );
- }
- virtual int count() const {
- return 3;
- }
-};
+ class NewCapMiddle : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 1 );
+ insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ };
-class FirstExtent : public Base {
- virtual void prepare() {
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
- }
- virtual int count() const {
- return 4;
- }
- virtual int nExtents() const {
- return 2;
- }
-};
+ class FirstExtent : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
-class LastExtent : public Base {
- virtual void prepare() {
- nsd()->capExtent = nsd()->lastExtent;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
- }
- virtual int count() const {
- return 4;
- }
- virtual int nExtents() const {
- return 2;
- }
-};
+ class LastExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent = nsd()->lastExtent;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
-class MidExtent : public Base {
- virtual void prepare() {
- nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- insert( nsd()->firstExtent, 2 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 3 );
- insert( nsd()->capExtent, 4 );
- }
- virtual int count() const {
- return 5;
- }
- virtual int nExtents() const {
- return 3;
- }
-};
+ class MidExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 3 );
+ insert( nsd()->capExtent, 4 );
+ }
+ virtual int count() const {
+ return 5;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
-class AloneInExtent : public Base {
- virtual void prepare() {
- nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->lastExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
- }
- virtual int count() const {
- return 3;
- }
- virtual int nExtents() const {
- return 3;
- }
-};
+ class AloneInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
-class FirstInExtent : public Base {
- virtual void prepare() {
- nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->lastExtent, 0 );
- insert( nsd()->firstExtent, 1 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
- insert( nsd()->capExtent, 3 );
- }
- virtual int count() const {
- return 4;
- }
- virtual int nExtents() const {
- return 3;
- }
-};
+ class FirstInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
-class LastInExtent : public Base {
- virtual void prepare() {
- nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
- insert( nsd()->capExtent, 0 );
- insert( nsd()->lastExtent, 1 );
- insert( nsd()->firstExtent, 2 );
- nsd()->capFirstNewRecord = insert( nsd()->capExtent, 3 );
- }
- virtual int count() const {
- return 4;
- }
- virtual int nExtents() const {
- return 3;
- }
-};
+ class LastInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord = insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
-} // namespace ScanCapped
+ } // namespace ScanCapped
-class All : public UnitTest::Suite {
-public:
- All() {
- add< ScanCapped::Empty >();
- add< ScanCapped::EmptyLooped >();
- add< ScanCapped::EmptyMultiExtentLooped >();
- add< ScanCapped::Single >();
- add< ScanCapped::NewCapFirst >();
- add< ScanCapped::NewCapLast >();
- add< ScanCapped::NewCapMiddle >();
- add< ScanCapped::FirstExtent >();
- add< ScanCapped::LastExtent >();
- add< ScanCapped::MidExtent >();
- add< ScanCapped::AloneInExtent >();
- add< ScanCapped::FirstInExtent >();
- add< ScanCapped::LastInExtent >();
- }
-};
+ class All : public UnitTest::Suite {
+ public:
+ All() {
+ add< ScanCapped::Empty >();
+ add< ScanCapped::EmptyLooped >();
+ add< ScanCapped::EmptyMultiExtentLooped >();
+ add< ScanCapped::Single >();
+ add< ScanCapped::NewCapFirst >();
+ add< ScanCapped::NewCapLast >();
+ add< ScanCapped::NewCapMiddle >();
+ add< ScanCapped::FirstExtent >();
+ add< ScanCapped::LastExtent >();
+ add< ScanCapped::MidExtent >();
+ add< ScanCapped::AloneInExtent >();
+ add< ScanCapped::FirstInExtent >();
+ add< ScanCapped::LastInExtent >();
+ }
+ };
} // namespace PdfileTests
diff --git a/grid/message.cpp b/grid/message.cpp
index 543f5511932..fd48c9443b4 100644
--- a/grid/message.cpp
+++ b/grid/message.cpp
@@ -30,129 +30,129 @@ namespace mongo {
// if you want trace output:
#define mmm(x)
-/* listener ------------------------------------------------------------------- */
-
-void Listener::listen() {
- SockAddr me(port);
- int sock = socket(AF_INET, SOCK_STREAM, 0);
- if ( sock == INVALID_SOCKET ) {
- log() << "ERROR: listen(): invalid socket? " << errno << endl;
- return;
- }
- prebindOptions( sock );
- if ( ::bind(sock, (sockaddr *) &me.sa, me.addressSize) != 0 ) {
- log() << "listen(): bind() failed errno:" << errno << endl;
- if ( errno == 98 )
- log() << "98 == addr already in use" << endl;
- closesocket(sock);
- return;
- }
+ /* listener ------------------------------------------------------------------- */
+
+ void Listener::listen() {
+ SockAddr me(port);
+ int sock = socket(AF_INET, SOCK_STREAM, 0);
+ if ( sock == INVALID_SOCKET ) {
+ log() << "ERROR: listen(): invalid socket? " << errno << endl;
+ return;
+ }
+ prebindOptions( sock );
+ if ( ::bind(sock, (sockaddr *) &me.sa, me.addressSize) != 0 ) {
+ log() << "listen(): bind() failed errno:" << errno << endl;
+ if ( errno == 98 )
+ log() << "98 == addr already in use" << endl;
+ closesocket(sock);
+ return;
+ }
- if ( ::listen(sock, 128) != 0 ) {
- log() << "listen(): listen() failed " << errno << endl;
- closesocket(sock);
- return;
- }
+ if ( ::listen(sock, 128) != 0 ) {
+ log() << "listen(): listen() failed " << errno << endl;
+ closesocket(sock);
+ return;
+ }
- SockAddr from;
- while ( 1 ) {
- int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
- if ( s < 0 ) {
- log() << "Listener: accept() returns " << s << " errno:" << errno << endl;
- continue;
+ SockAddr from;
+ while ( 1 ) {
+ int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
+ if ( s < 0 ) {
+ log() << "Listener: accept() returns " << s << " errno:" << errno << endl;
+ continue;
+ }
+ disableNagle(s);
+ log() << "connection accepted from " << from.toString() << endl;
+ accepted( new MessagingPort(s, from) );
}
- disableNagle(s);
- log() << "connection accepted from " << from.toString() << endl;
- accepted( new MessagingPort(s, from) );
}
-}
-/* messagingport -------------------------------------------------------------- */
+ /* messagingport -------------------------------------------------------------- */
-class PiggyBackData {
-public:
- PiggyBackData( MessagingPort * port ) {
- _port = port;
- _buf = new char[1300];
- _cur = _buf;
- }
+ class PiggyBackData {
+ public:
+ PiggyBackData( MessagingPort * port ) {
+ _port = port;
+ _buf = new char[1300];
+ _cur = _buf;
+ }
- ~PiggyBackData() {
- flush();
- delete( _cur );
- }
+ ~PiggyBackData() {
+ flush();
+ delete( _cur );
+ }
- void append( Message& m ) {
- assert( m.data->len <= 1300 );
+ void append( Message& m ) {
+ assert( m.data->len <= 1300 );
- if ( len() + m.data->len > 1300 )
- flush();
+ if ( len() + m.data->len > 1300 )
+ flush();
- memcpy( _cur , m.data , m.data->len );
- _cur += m.data->len;
- }
+ memcpy( _cur , m.data , m.data->len );
+ _cur += m.data->len;
+ }
- int flush() {
- if ( _buf == _cur )
- return 0;
+ int flush() {
+ if ( _buf == _cur )
+ return 0;
- int x = ::send( _port->sock , _buf , len() , 0 );
- _cur = _buf;
- return x;
- }
+ int x = ::send( _port->sock , _buf , len() , 0 );
+ _cur = _buf;
+ return x;
+ }
- int len() {
- return _cur - _buf;
- }
+ int len() {
+ return _cur - _buf;
+ }
-private:
+ private:
- MessagingPort* _port;
+ MessagingPort* _port;
- char * _buf;
- char * _cur;
-};
+ char * _buf;
+ char * _cur;
+ };
-MSGID NextMsgId;
-struct MsgStart {
- MsgStart() {
- NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
- assert(MsgDataHeaderSize == 16);
- }
-} msgstart;
+ MSGID NextMsgId;
+ struct MsgStart {
+ MsgStart() {
+ NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
+ assert(MsgDataHeaderSize == 16);
+ }
+ } msgstart;
// we "new" this so it guaranteed to still be around when other automatic global vars
// are being destructed during termination.
-set<MessagingPort*>& ports = *(new set<MessagingPort*>());
-
-void closeAllSockets() {
- for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
- (*i)->shutdown();
-}
-
-MessagingPort::MessagingPort(int _sock, SockAddr& _far) : sock(_sock), piggyBackData(0), farEnd(_far) {
- ports.insert(this);
-}
-
-MessagingPort::MessagingPort() {
- ports.insert(this);
- sock = -1;
- piggyBackData = 0;
-}
-
-void MessagingPort::shutdown() {
- if ( sock >= 0 ) {
- closesocket(sock);
+ set<MessagingPort*>& ports = *(new set<MessagingPort*>());
+
+ void closeAllSockets() {
+ for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
+ (*i)->shutdown();
+ }
+
+ MessagingPort::MessagingPort(int _sock, SockAddr& _far) : sock(_sock), piggyBackData(0), farEnd(_far) {
+ ports.insert(this);
+ }
+
+ MessagingPort::MessagingPort() {
+ ports.insert(this);
sock = -1;
+ piggyBackData = 0;
}
-}
-MessagingPort::~MessagingPort() {
- if ( piggyBackData )
- delete( piggyBackData );
- shutdown();
- ports.erase(this);
-}
+ void MessagingPort::shutdown() {
+ if ( sock >= 0 ) {
+ closesocket(sock);
+ sock = -1;
+ }
+ }
+
+ MessagingPort::~MessagingPort() {
+ if ( piggyBackData )
+ delete( piggyBackData );
+ shutdown();
+ ports.erase(this);
+ }
} // namespace mongo
@@ -160,222 +160,222 @@ MessagingPort::~MessagingPort() {
namespace mongo {
-class ConnectBG : public BackgroundJob {
-public:
- int sock;
- int res;
- SockAddr farEnd;
- void run() {
- res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
- }
-};
+ class ConnectBG : public BackgroundJob {
+ public:
+ int sock;
+ int res;
+ SockAddr farEnd;
+ void run() {
+ res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
+ }
+ };
-bool MessagingPort::connect(SockAddr& _far)
-{
- farEnd = _far;
+ bool MessagingPort::connect(SockAddr& _far)
+ {
+ farEnd = _far;
- sock = socket(AF_INET, SOCK_STREAM, 0);
- if ( sock == INVALID_SOCKET ) {
- log() << "ERROR: connect(): invalid socket? " << errno << endl;
- return false;
- }
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ if ( sock == INVALID_SOCKET ) {
+ log() << "ERROR: connect(): invalid socket? " << errno << endl;
+ return false;
+ }
#if 0
- long fl = fcntl(sock, F_GETFL, 0);
- assert( fl >= 0 );
- fl |= O_NONBLOCK;
- fcntl(sock, F_SETFL, fl);
-
- int res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
- if ( res ) {
- if ( errno == EINPROGRESS )
- //log() << "connect(): failed errno:" << errno << ' ' << farEnd.getPort() << endl;
- closesocket(sock);
- sock = -1;
- return false;
- }
+ long fl = fcntl(sock, F_GETFL, 0);
+ assert( fl >= 0 );
+ fl |= O_NONBLOCK;
+ fcntl(sock, F_SETFL, fl);
+
+ int res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
+ if ( res ) {
+ if ( errno == EINPROGRESS )
+ //log() << "connect(): failed errno:" << errno << ' ' << farEnd.getPort() << endl;
+ closesocket(sock);
+ sock = -1;
+ return false;
+ }
#endif
- ConnectBG bg;
- bg.sock = sock;
- bg.farEnd = farEnd;
- bg.go();
+ ConnectBG bg;
+ bg.sock = sock;
+ bg.farEnd = farEnd;
+ bg.go();
- // int res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
- if ( bg.wait(5000) ) {
- if ( bg.res ) {
+ // int res = ::connect(sock, (sockaddr *) &farEnd.sa, farEnd.addressSize);
+ if ( bg.wait(5000) ) {
+ if ( bg.res ) {
+ closesocket(sock);
+ sock = -1;
+ return false;
+ }
+ }
+ else {
+ // time out the connect
closesocket(sock);
sock = -1;
+ bg.wait(); // so bg stays in scope until bg thread terminates
return false;
}
- }
- else {
- // time out the connect
- closesocket(sock);
- sock = -1;
- bg.wait(); // so bg stays in scope until bg thread terminates
- return false;
- }
- disableNagle(sock);
- return true;
-}
+ disableNagle(sock);
+ return true;
+ }
-bool MessagingPort::recv(Message& m) {
+ bool MessagingPort::recv(Message& m) {
again:
- mmm( cout << "* recv() sock:" << this->sock << endl; )
- int len = -1;
-
- char *lenbuf = (char *) &len;
- int lft = 4;
- while ( 1 ) {
- int x = ::recv(sock, lenbuf, lft, 0);
- if ( x == 0 ) {
- DEV cout << "MessagingPort recv() conn closed? " << farEnd.toString() << endl;
- m.reset();
+ mmm( out() << "* recv() sock:" << this->sock << endl; )
+ int len = -1;
+
+ char *lenbuf = (char *) &len;
+ int lft = 4;
+ while ( 1 ) {
+ int x = ::recv(sock, lenbuf, lft, 0);
+ if ( x == 0 ) {
+ DEV out() << "MessagingPort recv() conn closed? " << farEnd.toString() << endl;
+ m.reset();
+ return false;
+ }
+ if ( x < 0 ) {
+ log() << "MessagingPort recv() error " << errno << ' ' << farEnd.toString()<<endl;
+ m.reset();
+ return false;
+ }
+ lft -= x;
+ if ( lft == 0 )
+ break;
+ lenbuf += x;
+ log() << "MessagingPort recv() got " << x << " bytes wanted 4, lft=" << lft << endl;
+ assert( lft > 0 );
+ }
+
+ if ( len < 0 || len > 16000000 ) {
+ if ( len == -1 ) {
+ // Endian check from the database, after connecting, to see what mode server is running in.
+ unsigned foo = 0x10203040;
+ int x = ::send(sock, (char *) &foo, 4, 0);
+ if ( x <= 0 ) {
+ log() << "MessagingPort endian send() error " << errno << ' ' << farEnd.toString() << endl;
+ return false;
+ }
+ goto again;
+ }
+ log() << "bad recv() len: " << len << '\n';
return false;
}
- if ( x < 0 ) {
- log() << "MessagingPort recv() error " << errno << ' ' << farEnd.toString()<<endl;
- m.reset();
+
+ int z = (len+1023)&0xfffffc00;
+ assert(z>=len);
+ MsgData *md = (MsgData *) malloc(z);
+ md->len = len;
+
+ if ( len <= 0 ) {
+ out() << "got a length of " << len << ", something is wrong" << endl;
return false;
}
- lft -= x;
- if ( lft == 0 )
- break;
- lenbuf += x;
- log() << "MessagingPort recv() got " << x << " bytes wanted 4, lft=" << lft << endl;
- assert( lft > 0 );
- }
- if ( len < 0 || len > 16000000 ) {
- if ( len == -1 ) {
- // Endian check from the database, after connecting, to see what mode server is running in.
- unsigned foo = 0x10203040;
- int x = ::send(sock, (char *) &foo, 4, 0);
- if ( x <= 0 ) {
- log() << "MessagingPort endian send() error " << errno << ' ' << farEnd.toString() << endl;
+ char *p = (char *) &md->id;
+ int left = len -4;
+ while ( 1 ) {
+ int x = ::recv(sock, p, left, 0);
+ if ( x == 0 ) {
+ DEV out() << "MessagingPort::recv(): conn closed? " << farEnd.toString() << endl;
+ m.reset();
+ return false;
+ }
+ if ( x < 0 ) {
+ log() << "MessagingPort recv() error " << errno << ' ' << farEnd.toString() << endl;
+ m.reset();
return false;
}
- goto again;
+ left -= x;
+ p += x;
+ if ( left <= 0 )
+ break;
}
- log() << "bad recv() len: " << len << '\n';
- return false;
- }
-
- int z = (len+1023)&0xfffffc00;
- assert(z>=len);
- MsgData *md = (MsgData *) malloc(z);
- md->len = len;
- if ( len <= 0 ) {
- cout << "got a length of " << len << ", something is wrong" << endl;
- return false;
+ m.setData(md, true);
+ return true;
}
- char *p = (char *) &md->id;
- int left = len -4;
- while ( 1 ) {
- int x = ::recv(sock, p, left, 0);
- if ( x == 0 ) {
- DEV cout << "MessagingPort::recv(): conn closed? " << farEnd.toString() << endl;
- m.reset();
- return false;
- }
- if ( x < 0 ) {
- log() << "MessagingPort recv() error " << errno << ' ' << farEnd.toString() << endl;
- m.reset();
- return false;
- }
- left -= x;
- p += x;
- if ( left <= 0 )
- break;
+ void MessagingPort::reply(Message& received, Message& response) {
+ say(/*received.from, */response, received.data->id);
}
- m.setData(md, true);
- return true;
-}
-
-void MessagingPort::reply(Message& received, Message& response) {
- say(/*received.from, */response, received.data->id);
-}
-
-void MessagingPort::reply(Message& received, Message& response, MSGID responseTo) {
- say(/*received.from, */response, responseTo);
-}
-
-bool MessagingPort::call(Message& toSend, Message& response) {
- mmm( cout << "*call()" << endl; )
- MSGID old = toSend.data->id;
- say(/*to,*/ toSend);
- while ( 1 ) {
- bool ok = recv(response);
- if ( !ok )
- return false;
- //cout << "got response: " << response.data->responseTo << endl;
- if ( response.data->responseTo == toSend.data->id )
- break;
- cout << "********************" << endl;
- cout << "ERROR: MessagingPort::call() wrong id got:" << response.data->responseTo << " expect:" << toSend.data->id << endl;
- cout << " old:" << old << endl;
- cout << " response msgid:" << response.data->id << endl;
- cout << " response len: " << response.data->len << endl;
- assert(false);
- response.reset();
+ void MessagingPort::reply(Message& received, Message& response, MSGID responseTo) {
+ say(/*received.from, */response, responseTo);
}
- mmm( cout << "*call() end" << endl; )
- return true;
-}
-
-void MessagingPort::say(Message& toSend, int responseTo) {
- mmm( cout << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
- MSGID msgid = NextMsgId;
- ++NextMsgId;
- toSend.data->id = msgid;
- toSend.data->responseTo = responseTo;
-
- int x = -100;
-
- if ( piggyBackData && piggyBackData->len() ) {
- if ( ( piggyBackData->len() + toSend.data->len ) > 1300 ) {
- // won't fit in a packet - so just send it off
- piggyBackData->flush();
- }
- else {
- piggyBackData->append( toSend );
- x = piggyBackData->flush();
+
+ bool MessagingPort::call(Message& toSend, Message& response) {
+ mmm( out() << "*call()" << endl; )
+ MSGID old = toSend.data->id;
+ say(/*to,*/ toSend);
+ while ( 1 ) {
+ bool ok = recv(response);
+ if ( !ok )
+ return false;
+ //out() << "got response: " << response.data->responseTo << endl;
+ if ( response.data->responseTo == toSend.data->id )
+ break;
+ out() << "********************" << endl;
+ out() << "ERROR: MessagingPort::call() wrong id got:" << response.data->responseTo << " expect:" << toSend.data->id << endl;
+ out() << " old:" << old << endl;
+ out() << " response msgid:" << response.data->id << endl;
+ out() << " response len: " << response.data->len << endl;
+ assert(false);
+ response.reset();
}
+ mmm( out() << "*call() end" << endl; )
+ return true;
}
- if ( x == -100 )
- x = ::send(sock, (char*)toSend.data, toSend.data->len , 0);
+ void MessagingPort::say(Message& toSend, int responseTo) {
+ mmm( out() << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
+ MSGID msgid = NextMsgId;
+ ++NextMsgId;
+ toSend.data->id = msgid;
+ toSend.data->responseTo = responseTo;
- if ( x <= 0 ) {
- log() << "MessagingPort say send() error " << errno << ' ' << farEnd.toString() << endl;
- }
+ int x = -100;
+
+ if ( piggyBackData && piggyBackData->len() ) {
+ if ( ( piggyBackData->len() + toSend.data->len ) > 1300 ) {
+ // won't fit in a packet - so just send it off
+ piggyBackData->flush();
+ }
+ else {
+ piggyBackData->append( toSend );
+ x = piggyBackData->flush();
+ }
+ }
-}
+ if ( x == -100 )
+ x = ::send(sock, (char*)toSend.data, toSend.data->len , 0);
-void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
+ if ( x <= 0 ) {
+ log() << "MessagingPort say send() error " << errno << ' ' << farEnd.toString() << endl;
+ }
- if ( toSend.data->len > 1300 ) {
- // not worth saving because its almost an entire packet
- say( toSend );
- return;
}
- // we're going to be storing this, so need to set it up
- MSGID msgid = NextMsgId;
- ++NextMsgId;
- toSend.data->id = msgid;
- toSend.data->responseTo = responseTo;
+ void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
+
+ if ( toSend.data->len > 1300 ) {
+ // not worth saving because its almost an entire packet
+ say( toSend );
+ return;
+ }
- if ( ! piggyBackData )
- piggyBackData = new PiggyBackData( this );
+ // we're going to be storing this, so need to set it up
+ MSGID msgid = NextMsgId;
+ ++NextMsgId;
+ toSend.data->id = msgid;
+ toSend.data->responseTo = responseTo;
- piggyBackData->append( toSend );
-}
+ if ( ! piggyBackData )
+ piggyBackData = new PiggyBackData( this );
+
+ piggyBackData->append( toSend );
+ }
} // namespace mongo
diff --git a/grid/message.h b/grid/message.h
index 3dc27408671..06119df711a 100644
--- a/grid/message.h
+++ b/grid/message.h
@@ -22,159 +22,159 @@
namespace mongo {
-class Message;
-class MessagingPort;
-class PiggyBackData;
-typedef WrappingInt MSGID;
-const int DBPort = 27017;
-
-class Listener {
-public:
- Listener(int p) : port(p) { }
- void listen(); // never returns (start a thread)
-
- /* spawn a thread, etc., then return */
- virtual void accepted(MessagingPort *mp) = 0;
-private:
- int port;
-};
-
-class AbstractMessagingPort {
-public:
- virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
- virtual void reply(Message& received, Message& response) = 0;
-};
-
-class MessagingPort : public AbstractMessagingPort {
-public:
- MessagingPort(int sock, SockAddr& farEnd);
- MessagingPort();
- ~MessagingPort();
-
- void shutdown();
-
- bool connect(SockAddr& farEnd);
-
- /* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
- also, the Message data will go out of scope on the subsequent recv call.
- */
- bool recv(Message& m);
- void reply(Message& received, Message& response, MSGID responseTo);
- void reply(Message& received, Message& response);
- bool call(Message& toSend, Message& response);
- void say(Message& toSend, int responseTo = -1);
-
- void piggyBack( Message& toSend , int responseTo = -1 );
-
-private:
- int sock;
- PiggyBackData * piggyBackData;
-public:
- SockAddr farEnd;
-
- friend class PiggyBackData;
-};
+ class Message;
+ class MessagingPort;
+ class PiggyBackData;
+ typedef WrappingInt MSGID;
+ const int DBPort = 27017;
+
+ class Listener {
+ public:
+ Listener(int p) : port(p) { }
+ void listen(); // never returns (start a thread)
+
+ /* spawn a thread, etc., then return */
+ virtual void accepted(MessagingPort *mp) = 0;
+ private:
+ int port;
+ };
-#pragma pack(push)
-#pragma pack(1)
+ class AbstractMessagingPort {
+ public:
+ virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
+ virtual void reply(Message& received, Message& response) = 0;
+ };
-enum Operations {
- opReply = 1, /* reply. responseTo is set. */
- dbMsg = 1000, /* generic msg command followed by a string */
- dbUpdate = 2001, /* update object */
- dbInsert = 2002,
- //dbGetByOID = 2003,
- dbQuery = 2004,
- dbGetMore = 2005,
- dbDelete = 2006,
- dbKillCursors = 2007
-};
-
-struct MsgData {
- int len; /* len of the msg, including this field */
- MSGID id; /* request/reply id's match... */
- MSGID responseTo; /* id of the message we are responding to */
- int _operation;
- int operation() const {
- return _operation;
- }
- void setOperation(int o) {
- _operation = o;
- }
- char _data[4];
+ class MessagingPort : public AbstractMessagingPort {
+ public:
+ MessagingPort(int sock, SockAddr& farEnd);
+ MessagingPort();
+ ~MessagingPort();
- int& dataAsInt() {
- return *((int *) _data);
- }
+ void shutdown();
- int dataLen(); // len without header
-};
-const int MsgDataHeaderSize = sizeof(MsgData) - 4;
-inline int MsgData::dataLen() {
- return len - MsgDataHeaderSize;
-}
+ bool connect(SockAddr& farEnd);
-#pragma pack(pop)
+ /* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
+ also, the Message data will go out of scope on the subsequent recv call.
+ */
+ bool recv(Message& m);
+ void reply(Message& received, Message& response, MSGID responseTo);
+ void reply(Message& received, Message& response);
+ bool call(Message& toSend, Message& response);
+ void say(Message& toSend, int responseTo = -1);
-class Message {
-public:
- Message() {
- data = 0;
- freeIt = false;
- }
- Message( void * _data , bool _freeIt ) {
- data = (MsgData*)_data;
- freeIt = _freeIt;
+ void piggyBack( Message& toSend , int responseTo = -1 );
+
+ private:
+ int sock;
+ PiggyBackData * piggyBackData;
+ public:
+ SockAddr farEnd;
+
+ friend class PiggyBackData;
};
- ~Message() {
- reset();
- }
- SockAddr from;
- MsgData *data;
-
- Message& operator=(Message& r) {
- assert( data == 0 );
- data = r.data;
- assert( r.freeIt );
- r.freeIt = false;
- r.data = 0;
- freeIt = true;
- return *this;
- }
+#pragma pack(push)
+#pragma pack(1)
- void reset() {
- if ( freeIt && data )
- free(data);
- data = 0;
- freeIt = false;
- }
+ enum Operations {
+ opReply = 1, /* reply. responseTo is set. */
+ dbMsg = 1000, /* generic msg command followed by a string */
+ dbUpdate = 2001, /* update object */
+ dbInsert = 2002,
+ //dbGetByOID = 2003,
+ dbQuery = 2004,
+ dbGetMore = 2005,
+ dbDelete = 2006,
+ dbKillCursors = 2007
+ };
- void setData(MsgData *d, bool _freeIt) {
- assert( data == 0 );
- freeIt = _freeIt;
- data = d;
- }
- void setData(int operation, const char *msgtxt) {
- setData(operation, msgtxt, strlen(msgtxt)+1);
- }
- void setData(int operation, const char *msgdata, int len) {
- assert(data == 0);
- int dataLen = len + sizeof(MsgData) - 4;
- MsgData *d = (MsgData *) malloc(dataLen);
- memcpy(d->_data, msgdata, len);
- d->len = fixEndian(dataLen);
- d->setOperation(operation);
- freeIt= true;
- data = d;
+ struct MsgData {
+ int len; /* len of the msg, including this field */
+ MSGID id; /* request/reply id's match... */
+ MSGID responseTo; /* id of the message we are responding to */
+ int _operation;
+ int operation() const {
+ return _operation;
+ }
+ void setOperation(int o) {
+ _operation = o;
+ }
+ char _data[4];
+
+ int& dataAsInt() {
+ return *((int *) _data);
+ }
+
+ int dataLen(); // len without header
+ };
+ const int MsgDataHeaderSize = sizeof(MsgData) - 4;
+ inline int MsgData::dataLen() {
+ return len - MsgDataHeaderSize;
}
- bool doIFreeIt() {
- return freeIt;
- }
+#pragma pack(pop)
-private:
- bool freeIt;
-};
+ class Message {
+ public:
+ Message() {
+ data = 0;
+ freeIt = false;
+ }
+ Message( void * _data , bool _freeIt ) {
+ data = (MsgData*)_data;
+ freeIt = _freeIt;
+ };
+ ~Message() {
+ reset();
+ }
+
+ SockAddr from;
+ MsgData *data;
+
+ Message& operator=(Message& r) {
+ assert( data == 0 );
+ data = r.data;
+ assert( r.freeIt );
+ r.freeIt = false;
+ r.data = 0;
+ freeIt = true;
+ return *this;
+ }
+
+ void reset() {
+ if ( freeIt && data )
+ free(data);
+ data = 0;
+ freeIt = false;
+ }
+
+ void setData(MsgData *d, bool _freeIt) {
+ assert( data == 0 );
+ freeIt = _freeIt;
+ data = d;
+ }
+ void setData(int operation, const char *msgtxt) {
+ setData(operation, msgtxt, strlen(msgtxt)+1);
+ }
+ void setData(int operation, const char *msgdata, int len) {
+ assert(data == 0);
+ int dataLen = len + sizeof(MsgData) - 4;
+ MsgData *d = (MsgData *) malloc(dataLen);
+ memcpy(d->_data, msgdata, len);
+ d->len = fixEndian(dataLen);
+ d->setOperation(operation);
+ freeIt= true;
+ data = d;
+ }
+
+ bool doIFreeIt() {
+ return freeIt;
+ }
+
+ private:
+ bool freeIt;
+ };
} // namespace mongo
diff --git a/stdafx.cpp b/stdafx.cpp
index b4485e281f5..1985bb94f79 100644
--- a/stdafx.cpp
+++ b/stdafx.cpp
@@ -25,7 +25,7 @@ namespace mongo {
// TODO: reference any additional headers you need in STDAFX.H
// and not in this file
-Assertion lastAssert[4];
+ Assertion lastAssert[4];
#undef assert
@@ -38,64 +38,64 @@ Assertion lastAssert[4];
namespace mongo {
-string getDbContext();
-
-/* "warning" assert -- safe to continue, so we don't throw exception. */
-void wasserted(const char *msg, const char *file, unsigned line) {
- problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
- sayDbContext();
- raiseError(msg && *msg ? msg : "wassertion failure");
- lastAssert[1].set(msg, getDbContext().c_str(), file, line);
-}
-
-void asserted(const char *msg, const char *file, unsigned line) {
- problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
- sayDbContext();
- raiseError(msg && *msg ? msg : "assertion failure");
- lastAssert[0].set(msg, getDbContext().c_str(), file, line);
- throw AssertionException();
-}
-
-int uacount = 0;
-void uasserted(const char *msg) {
- if ( ++uacount < 100 )
- problem() << "User Assertion " << msg << endl;
- else
- RARELY problem() << "User Assertion " << msg << endl;
- lastAssert[3].set(msg, getDbContext().c_str(), "", 0);
- raiseError(msg);
- throw UserAssertionException(msg);
-}
-
-void msgasserted(const char *msg) {
- log() << "Assertion: " << msg << '\n';
- lastAssert[2].set(msg, getDbContext().c_str(), "", 0);
- raiseError(msg && *msg ? msg : "massert failure");
- throw MsgAssertionException(msg);
-}
-
-string Assertion::toString() {
- if ( !isSet() )
- return "";
-
- stringstream ss;
- ss << msg << '\n';
- if ( *context )
- ss << context << '\n';
- if ( *file )
- ss << file << ' ' << line << '\n';
- return ss.str();
-}
-
-/* this is a good place to set a breakpoint when debugging, as lots of warning things
- (assert, wassert) call it.
-*/
-void sayDbContext(const char *errmsg) {
- if ( errmsg ) {
- problem() << errmsg << endl;
+ string getDbContext();
+
+ /* "warning" assert -- safe to continue, so we don't throw exception. */
+ void wasserted(const char *msg, const char *file, unsigned line) {
+ problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
+ sayDbContext();
+ raiseError(msg && *msg ? msg : "wassertion failure");
+ lastAssert[1].set(msg, getDbContext().c_str(), file, line);
+ }
+
+ void asserted(const char *msg, const char *file, unsigned line) {
+ problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
+ sayDbContext();
+ raiseError(msg && *msg ? msg : "assertion failure");
+ lastAssert[0].set(msg, getDbContext().c_str(), file, line);
+ throw AssertionException();
+ }
+
+ int uacount = 0;
+ void uasserted(const char *msg) {
+ if ( ++uacount < 100 )
+ problem() << "User Assertion " << msg << endl;
+ else
+ RARELY problem() << "User Assertion " << msg << endl;
+ lastAssert[3].set(msg, getDbContext().c_str(), "", 0);
+ raiseError(msg);
+ throw UserAssertionException(msg);
+ }
+
+ void msgasserted(const char *msg) {
+ log() << "Assertion: " << msg << '\n';
+ lastAssert[2].set(msg, getDbContext().c_str(), "", 0);
+ raiseError(msg && *msg ? msg : "massert failure");
+ throw MsgAssertionException(msg);
+ }
+
+ string Assertion::toString() {
+ if ( !isSet() )
+ return "";
+
+ stringstream ss;
+ ss << msg << '\n';
+ if ( *context )
+ ss << context << '\n';
+ if ( *file )
+ ss << file << ' ' << line << '\n';
+ return ss.str();
+ }
+
+ /* this is a good place to set a breakpoint when debugging, as lots of warning things
+ (assert, wassert) call it.
+ */
+ void sayDbContext(const char *errmsg) {
+ if ( errmsg ) {
+ problem() << errmsg << endl;
+ }
+ printStackTrace();
}
- printStackTrace();
-}
} // namespace mongo
diff --git a/stdafx.h b/stdafx.h
index 68e1597285c..ca62e94e826 100644
--- a/stdafx.h
+++ b/stdafx.h
@@ -26,9 +26,9 @@ namespace mongo {
#define NOMINMAX
#if defined(_WIN32)
-const bool debug=true;
+ const bool debug=true;
#else
-const bool debug=false;
+ const bool debug=false;
#endif
} // namespace mongo
@@ -40,20 +40,20 @@ const bool debug=false;
namespace mongo {
-void sayDbContext(const char *msg = 0);
-void dbexit(int returnCode, const char *whyMsg = "");
+ void sayDbContext(const char *msg = 0);
+ void dbexit(int returnCode, const char *whyMsg = "");
-inline void * ourmalloc(size_t size) {
- void *x = malloc(size);
- if ( x == 0 ) dbexit(42, "malloc fails");
- return x;
-}
+ inline void * ourmalloc(size_t size) {
+ void *x = malloc(size);
+ if ( x == 0 ) dbexit(42, "malloc fails");
+ return x;
+ }
-inline void * ourrealloc(void *ptr, size_t size) {
- void *x = realloc(ptr, size);
- if ( x == 0 ) dbexit(43, "realloc fails");
- return x;
-}
+ inline void * ourrealloc(void *ptr, size_t size) {
+ void *x = realloc(ptr, size);
+ if ( x == 0 ) dbexit(43, "realloc fails");
+ return x;
+ }
#define malloc ourmalloc
#define realloc ourrealloc
@@ -69,96 +69,96 @@ using namespace std;
namespace mongo {
-/* these are manipulated outside of mutexes, so be careful */
-struct Assertion {
- Assertion() {
- msg[0] = msg[127] = 0;
- context[0] = context[127] = 0;
- file = "";
- line = 0;
- when = 0;
- }
- char msg[128];
- char context[128];
- const char *file;
- unsigned line;
- time_t when;
- void set(const char *m, const char *ctxt, const char *f, unsigned l) {
- strncpy(msg, m, 127);
- strncpy(context, ctxt, 127);
- file = f;
- line = l;
- when = time(0);
- }
- string toString();
- bool isSet() {
- return when != 0;
- }
-};
-
-enum {
- AssertRegular = 0,
- AssertW = 1,
- AssertMsg = 2,
- AssertUser = 3
-};
-
-/* last assert of diff types: regular, wassert, msgassert, uassert: */
-extern Assertion lastAssert[4];
+ /* these are manipulated outside of mutexes, so be careful */
+ struct Assertion {
+ Assertion() {
+ msg[0] = msg[127] = 0;
+ context[0] = context[127] = 0;
+ file = "";
+ line = 0;
+ when = 0;
+ }
+ char msg[128];
+ char context[128];
+ const char *file;
+ unsigned line;
+ time_t when;
+ void set(const char *m, const char *ctxt, const char *f, unsigned l) {
+ strncpy(msg, m, 127);
+ strncpy(context, ctxt, 127);
+ file = f;
+ line = l;
+ when = time(0);
+ }
+ string toString();
+ bool isSet() {
+ return when != 0;
+ }
+ };
+
+ enum {
+ AssertRegular = 0,
+ AssertW = 1,
+ AssertMsg = 2,
+ AssertUser = 3
+ };
+
+ /* last assert of diff types: regular, wassert, msgassert, uassert: */
+ extern Assertion lastAssert[4];
// you can catch these
-class AssertionException {
-public:
- string msg;
- AssertionException() { }
- virtual bool severe() {
- return true;
- }
- virtual bool isUserAssertion() {
- return false;
- }
- virtual string toString() {
- return msg;
- }
-};
-
-/* we use the same mechanism for bad things the user does -- which are really just errors */
-class UserAssertionException : public AssertionException {
-public:
- UserAssertionException(const char *_msg) {
- msg = _msg;
- }
- UserAssertionException(string _msg) {
- msg = _msg;
- }
- virtual bool severe() {
- return false;
- }
- virtual bool isUserAssertion() {
- return true;
- }
- virtual string toString() {
- return "userassert:" + msg;
- }
-};
-
-class MsgAssertionException : public AssertionException {
-public:
- MsgAssertionException(const char *_msg) {
- msg = _msg;
- }
- virtual bool severe() {
- return false;
- }
- virtual string toString() {
- return "massert:" + msg;
- }
-};
-
-void asserted(const char *msg, const char *file, unsigned line);
-void wasserted(const char *msg, const char *file, unsigned line);
-void uasserted(const char *msg);
-void msgasserted(const char *msg);
+ class AssertionException {
+ public:
+ string msg;
+ AssertionException() { }
+ virtual bool severe() {
+ return true;
+ }
+ virtual bool isUserAssertion() {
+ return false;
+ }
+ virtual string toString() {
+ return msg;
+ }
+ };
+
+ /* we use the same mechanism for bad things the user does -- which are really just errors */
+ class UserAssertionException : public AssertionException {
+ public:
+ UserAssertionException(const char *_msg) {
+ msg = _msg;
+ }
+ UserAssertionException(string _msg) {
+ msg = _msg;
+ }
+ virtual bool severe() {
+ return false;
+ }
+ virtual bool isUserAssertion() {
+ return true;
+ }
+ virtual string toString() {
+ return "userassert:" + msg;
+ }
+ };
+
+ class MsgAssertionException : public AssertionException {
+ public:
+ MsgAssertionException(const char *_msg) {
+ msg = _msg;
+ }
+ virtual bool severe() {
+ return false;
+ }
+ virtual string toString() {
+ return "massert:" + msg;
+ }
+ };
+
+ void asserted(const char *msg, const char *file, unsigned line);
+ void wasserted(const char *msg, const char *file, unsigned line);
+ void uasserted(const char *msg);
+ void msgasserted(const char *msg);
#ifdef assert
#undef assert
@@ -166,7 +166,7 @@ void msgasserted(const char *msg);
#define assert(_Expression) (void)( (!!(_Expression)) || (asserted(#_Expression, __FILE__, __LINE__), 0) )
-/* "user assert". if asserts, user did something wrong, not our code */
+ /* "user assert". if asserts, user did something wrong, not our code */
//#define uassert(_Expression) (void)( (!!(_Expression)) || (uasserted(#_Expression, __FILE__, __LINE__), 0) )
#define uassert(msg,_Expression) (void)( (!!(_Expression)) || (uasserted(msg), 0) )
@@ -174,19 +174,19 @@ void msgasserted(const char *msg);
#define yassert 1
-/* warning only - keeps going */
+ /* warning only - keeps going */
#define wassert(_Expression) (void)( (!!(_Expression)) || (wasserted(#_Expression, __FILE__, __LINE__), 0) )
-/* display a message, no context, and throw assertionexception
+ /* display a message, no context, and throw assertionexception
- easy way to throw an exception and log something without our stack trace
- display happening.
-*/
+ easy way to throw an exception and log something without our stack trace
+ display happening.
+ */
#define massert(msg,_Expression) (void)( (!!(_Expression)) || (msgasserted(msg), 0) )
-/* dassert is 'debug assert' -- might want to turn off for production as these
- could be slow.
-*/
+ /* dassert is 'debug assert' -- might want to turn off for production as these
+ could be slow.
+ */
#define dassert assert
} // namespace mongo
@@ -197,7 +197,7 @@ void msgasserted(const char *msg);
namespace mongo {
-typedef char _TCHAR;
+ typedef char _TCHAR;
} // namespace mongo
@@ -212,12 +212,12 @@ namespace mongo {
//using namespace std;
#if !defined(_WIN32)
-typedef int HANDLE;
-inline void strcpy_s(char *dst, unsigned len, const char *src) {
- strcpy(dst, src);
-}
+ typedef int HANDLE;
+ inline void strcpy_s(char *dst, unsigned len, const char *src) {
+ strcpy(dst, src);
+ }
#else
-typedef void *HANDLE;
+ typedef void *HANDLE;
#endif
//#if defined(CHAR)
@@ -240,28 +240,28 @@ typedef void *HANDLE;
namespace mongo {
// for debugging
-typedef struct _Ints {
- int i[100];
-} *Ints;
-typedef struct _Chars {
- char c[200];
-} *Chars;
-
-typedef char CHARS[400];
-
-typedef struct _OWS {
- int size;
- char type;
- char string[400];
-} *OWS;
-
-class Database;
+ typedef struct _Ints {
+ int i[100];
+ } *Ints;
+ typedef struct _Chars {
+ char c[200];
+ } *Chars;
+
+ typedef char CHARS[400];
+
+ typedef struct _OWS {
+ int size;
+ char type;
+ char string[400];
+ } *OWS;
+
+ class Database;
//extern Database *database;
-extern const char *curNs;
+ extern const char *curNs;
-/* for now, running on win32 means development not production --
- use this to log things just there.
-*/
+ /* for now, running on win32 means development not production --
+ use this to log things just there.
+ */
#if defined(_WIN32)
#define DEV if( 0 )
#define WIN if( 1 )
@@ -272,20 +272,20 @@ extern const char *curNs;
#define DEBUGGING if( 0 )
-extern unsigned occasion;
-extern unsigned once;
+ extern unsigned occasion;
+ extern unsigned once;
#define OCCASIONALLY if( ++occasion % 16 == 0 )
#define RARELY if( ++occasion % 128 == 0 )
#define ONCE if( ++once == 1 )
#if defined(_WIN32)
-inline void our_debug_free(void *p) {
- unsigned *u = (unsigned *) p;
- u[0] = 0xEEEEEEEE;
- u[1] = 0xEEEEEEEE;
- free(p);
-}
+ inline void our_debug_free(void *p) {
+ unsigned *u = (unsigned *) p;
+ u[0] = 0xEEEEEEEE;
+ u[1] = 0xEEEEEEEE;
+ free(p);
+ }
#define free our_debug_free
#endif
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 60bd926f6ce..a8b4e1fb139 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -23,92 +23,92 @@
namespace mongo {
-namespace po = boost::program_options;
+ namespace po = boost::program_options;
-namespace dump {
+ namespace dump {
-void doCollection( DBClientConnection & conn , const char * coll , path outputFile ) {
- cout << "\t" << coll << " to " << outputFile.string() << endl;
+ void doCollection( DBClientConnection & conn , const char * coll , path outputFile ) {
+ mongo::out() << "\t" << coll << " to " << outputFile.string() << endl;
- int out = open( outputFile.string().c_str() , O_WRONLY | O_CREAT | O_TRUNC , 0666 );
- assert( out );
+ int out = open( outputFile.string().c_str() , O_WRONLY | O_CREAT | O_TRUNC , 0666 );
+ assert( out );
- BSONObjBuilder query;
- auto_ptr<DBClientCursor> cursor = conn.query( coll , query.doneAndDecouple() );
+ BSONObjBuilder query;
+ auto_ptr<DBClientCursor> cursor = conn.query( coll , query.doneAndDecouple() );
- int num = 0;
- while ( cursor->more() ) {
- BSONObj obj = cursor->next();
- write( out , obj.objdata() , obj.objsize() );
- num++;
- }
-
- cout << "\t\t " << num << " objects" << endl;
-
- close( out );
-}
-
-void go( DBClientConnection & conn , const char * db , const path outdir ) {
- cout << "DATABASE: " << db << endl;
-
- create_directories( outdir );
-
- string sns = db;
- sns += ".system.namespaces";
-
- BSONObjBuilder query;
- auto_ptr<DBClientCursor> cursor = conn.query( sns.c_str() , query.doneAndDecouple() );
- while ( cursor->more() ) {
- BSONObj obj = cursor->next();
- if ( obj.toString().find( ".$" ) != string::npos )
- continue;
+ int num = 0;
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ write( out , obj.objdata() , obj.objsize() );
+ num++;
+ }
- const string name = obj.getField( "name" ).valuestr();
- const string filename = name.substr( strlen( db ) + 1 );
+ mongo::out() << "\t\t " << num << " objects" << endl;
- doCollection( conn , name.c_str() , outdir / ( filename + ".bson" ) );
+ close( out );
+ }
- }
+ void go( DBClientConnection & conn , const char * db , const path outdir ) {
+ mongo::out() << "DATABASE: " << db << endl;
-}
+ create_directories( outdir );
-void go( const char * host , const char * db , const char * outdir ) {
- DBClientConnection conn;
- string errmsg;
- if ( ! conn.connect( host , errmsg ) ) {
- cout << "couldn't connect : " << errmsg << endl;
- throw -11;
- }
+ string sns = db;
+ sns += ".system.namespaces";
- path root(outdir);
+ BSONObjBuilder query;
+ auto_ptr<DBClientCursor> cursor = conn.query( sns.c_str() , query.doneAndDecouple() );
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ if ( obj.toString().find( ".$" ) != string::npos )
+ continue;
- if ( strlen( db ) == 1 && db[0] == '*' ) {
- cout << "all dbs" << endl;
+ const string name = obj.getField( "name" ).valuestr();
+ const string filename = name.substr( strlen( db ) + 1 );
- BSONObjBuilder query;
- query.appendBool( "listDatabases" , 1 );
+ doCollection( conn , name.c_str() , outdir / ( filename + ".bson" ) );
- BSONObj res = conn.findOne( "admin.$cmd" , query.doneAndDecouple() );
- BSONObj dbs = res.getField( "databases" ).embeddedObjectUserCheck();
- set<string> keys;
- dbs.getFieldNames( keys );
- for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
- string key = *i;
+ }
- BSONObj db = dbs.getField( key ).embeddedObjectUserCheck();
+ }
- const char * dbName = db.getField( "name" ).valuestr();
- if ( (string)dbName == "local" )
- continue;
- go ( conn , dbName , root / dbName );
+ void go( const char * host , const char * db , const char * outdir ) {
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( host , errmsg ) ) {
+ mongo::out() << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ path root(outdir);
+
+ if ( strlen( db ) == 1 && db[0] == '*' ) {
+ mongo::out() << "all dbs" << endl;
+
+ BSONObjBuilder query;
+ query.appendBool( "listDatabases" , 1 );
+
+ BSONObj res = conn.findOne( "admin.$cmd" , query.doneAndDecouple() );
+ BSONObj dbs = res.getField( "databases" ).embeddedObjectUserCheck();
+ set<string> keys;
+ dbs.getFieldNames( keys );
+ for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
+ string key = *i;
+
+ BSONObj db = dbs.getField( key ).embeddedObjectUserCheck();
+
+ const char * dbName = db.getField( "name" ).valuestr();
+ if ( (string)dbName == "local" )
+ continue;
+ go ( conn , dbName , root / dbName );
+ }
+ }
+ else {
+ go( conn , db , root / db );
+ }
}
- }
- else {
- go( conn , db , root / db );
- }
-}
-} // namespace dump
+ } // namespace dump
} // namespace mongo
@@ -148,10 +148,10 @@ int main( int argc , char ** argv ) {
if ( vm.count( "out" ) )
outdir = vm["out"].as<string>().c_str();
- cout << "mongo dump" << endl;
- cout << "\t host \t" << host << endl;
- cout << "\t db \t" << db << endl;
- cout << "\t output dir \t" << outdir << endl;
+ mongo::out() << "mongo dump" << endl;
+ mongo::out() << "\t host \t" << host << endl;
+ mongo::out() << "\t db \t" << db << endl;
+ mongo::out() << "\t output dir \t" << outdir << endl;
dump::go( host , db , outdir );
}
diff --git a/tools/import.cpp b/tools/import.cpp
index 69d3ffeaeae..3e11c101b2f 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -26,90 +26,90 @@
namespace mongo {
-namespace po = boost::program_options;
+ namespace po = boost::program_options;
-namespace import {
+ namespace import {
-void drillDown( DBClientConnection & conn , path root ) {
+ void drillDown( DBClientConnection & conn , path root ) {
- if ( is_directory( root ) ) {
- directory_iterator end;
- directory_iterator i(root);
- while ( i != end ) {
- path p = *i;
- drillDown( conn , p );
- i++;
- }
- return;
- }
+ if ( is_directory( root ) ) {
+ directory_iterator end;
+ directory_iterator i(root);
+ while ( i != end ) {
+ path p = *i;
+ drillDown( conn , p );
+ i++;
+ }
+ return;
+ }
- if ( ! ( endsWith( root.string().c_str() , ".bson" ) ||
- endsWith( root.string().c_str() , ".bin" ) ) ) {
- cerr << "don't know what to do with [" << root.string() << "]" << endl;
- return;
- }
+ if ( ! ( endsWith( root.string().c_str() , ".bson" ) ||
+ endsWith( root.string().c_str() , ".bin" ) ) ) {
+ cerr << "don't know what to do with [" << root.string() << "]" << endl;
+ return;
+ }
- cout << root.string() << endl;
+ out() << root.string() << endl;
- string ns;
- {
- string dir = root.branch_path().string();
- if ( dir.find( "/" ) == string::npos )
- ns += dir;
- else
- ns += dir.substr( dir.find_last_of( "/" ) + 1 );
- }
+ string ns;
+ {
+ string dir = root.branch_path().string();
+ if ( dir.find( "/" ) == string::npos )
+ ns += dir;
+ else
+ ns += dir.substr( dir.find_last_of( "/" ) + 1 );
+ }
- {
- string l = root.leaf();
- l = l.substr( 0 , l.find_last_of( "." ) );
- ns += "." + l;
- }
+ {
+ string l = root.leaf();
+ l = l.substr( 0 , l.find_last_of( "." ) );
+ ns += "." + l;
+ }
- cout << "\t going into namespace [" << ns << "]" << endl;
+ out() << "\t going into namespace [" << ns << "]" << endl;
- MemoryMappedFile mmf;
- assert( mmf.map( root.string().c_str() ) );
+ MemoryMappedFile mmf;
+ assert( mmf.map( root.string().c_str() ) );
- char * data = (char*)mmf.viewOfs();
- int read = 0;
+ char * data = (char*)mmf.viewOfs();
+ int read = 0;
- int num = 0;
+ int num = 0;
- while ( read < mmf.length() ) {
- if ( ! *data ) {
- cout << "\t ** got unexpected end of file ** continuing..." << endl;
- break;
- }
+ while ( read < mmf.length() ) {
+ if ( ! *data ) {
+ out() << "\t ** got unexpected end of file ** continuing..." << endl;
+ break;
+ }
- BSONObj o( data );
+ BSONObj o( data );
- conn.insert( ns.c_str() , o );
+ conn.insert( ns.c_str() , o );
- read += o.objsize();
- data += o.objsize();
+ read += o.objsize();
+ data += o.objsize();
- if ( ! ( ++num % 1000 ) )
- cout << "read " << read << "/" << mmf.length() << " bytes so far. " << num << " objects" << endl;
- }
+ if ( ! ( ++num % 1000 ) )
+ out() << "read " << read << "/" << mmf.length() << " bytes so far. " << num << " objects" << endl;
+ }
- cout << "\t " << num << " objects" << endl;
+ out() << "\t " << num << " objects" << endl;
-}
+ }
-void go( const char * dbHost , const char * dirRoot ) {
- DBClientConnection conn;
- string errmsg;
- if ( ! conn.connect( dbHost , errmsg ) ) {
- cout << "couldn't connect : " << errmsg << endl;
- throw -11;
- }
+ void go( const char * dbHost , const char * dirRoot ) {
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( dbHost , errmsg ) ) {
+ out() << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
- drillDown( conn , dirRoot );
-}
-} // namespace import
+ drillDown( conn , dirRoot );
+ }
+ } // namespace import
} // namespace mongo
@@ -150,9 +150,9 @@ int main( int argc , char ** argv ) {
if ( vm.count( "dir" ) )
dir = vm["dir"].as<string>().c_str();
- cout << "mongo dump" << endl;
- cout << "\t host \t" << host << endl;
- cout << "\t dir \t" << dir << endl;
+ out() << "mongo dump" << endl;
+ out() << "\t host \t" << host << endl;
+ out() << "\t dir \t" << dir << endl;
import::go( host , dir );
return 0;
diff --git a/util/background.cpp b/util/background.cpp
index a1413d3a5bf..99f5733f399 100644
--- a/util/background.cpp
+++ b/util/background.cpp
@@ -20,44 +20,44 @@
namespace mongo {
-BackgroundJob *BackgroundJob::grab = 0;
-boost::mutex BackgroundJob::mutex;
+ BackgroundJob *BackgroundJob::grab = 0;
+ boost::mutex BackgroundJob::mutex;
-/* static */
-void BackgroundJob::thr() {
- assert( grab );
- BackgroundJob *us = grab;
- assert( us->state == NotStarted );
- us->state = Running;
- grab = 0;
- us->run();
- us->state = Done;
- if ( us->deleteSelf )
- delete us;
-}
+ /* static */
+ void BackgroundJob::thr() {
+ assert( grab );
+ BackgroundJob *us = grab;
+ assert( us->state == NotStarted );
+ us->state = Running;
+ grab = 0;
+ us->run();
+ us->state = Done;
+ if ( us->deleteSelf )
+ delete us;
+ }
-BackgroundJob& BackgroundJob::go() {
- boostlock bl(mutex);
- assert( grab == 0 );
- grab = this;
- boost::thread t(thr);
- while ( grab )
- sleepmillis(2);
- return *this;
-}
+ BackgroundJob& BackgroundJob::go() {
+ boostlock bl(mutex);
+ assert( grab == 0 );
+ grab = this;
+ boost::thread t(thr);
+ while ( grab )
+ sleepmillis(2);
+ return *this;
+ }
-bool BackgroundJob::wait(int msMax) {
- assert( state != NotStarted );
- int ms = 1;
- unsigned long long start = jsTime();
- while ( state != Done ) {
- sleepmillis(ms);
- if ( ms < 1000 )
- ms = ms * 2;
- if ( msMax && ( int( jsTime() - start ) > msMax) )
- return false;
+ bool BackgroundJob::wait(int msMax) {
+ assert( state != NotStarted );
+ int ms = 1;
+ unsigned long long start = jsTime();
+ while ( state != Done ) {
+ sleepmillis(ms);
+ if ( ms < 1000 )
+ ms = ms * 2;
+ if ( msMax && ( int( jsTime() - start ) > msMax) )
+ return false;
+ }
+ return true;
}
- return true;
-}
} // namespace mongo
diff --git a/util/background.h b/util/background.h
index d4b9830c40f..605f524ec15 100644
--- a/util/background.h
+++ b/util/background.h
@@ -18,55 +18,55 @@
namespace mongo {
-/* object-orienty background thread dispatching.
+ /* object-orienty background thread dispatching.
- subclass and define run()
+ subclass and define run()
- It is ok to call go() more than once -- if the previous invocation
- has finished. Thus one pattern of use is to embed a backgroundjob
- in your object and reuse it (or same thing with inheritance).
-*/
+ It is ok to call go() more than once -- if the previous invocation
+ has finished. Thus one pattern of use is to embed a backgroundjob
+ in your object and reuse it (or same thing with inheritance).
+ */
-class BackgroundJob {
-protected:
- /* define this to do your work! */
- virtual void run() = 0;
+ class BackgroundJob {
+ protected:
+ /* define this to do your work! */
+ virtual void run() = 0;
-public:
- enum State {
- NotStarted,
- Running,
- Done
- };
- State getState() const {
- return state;
- }
- bool running() const {
- return state == Running;
- }
+ public:
+ enum State {
+ NotStarted,
+ Running,
+ Done
+ };
+ State getState() const {
+ return state;
+ }
+ bool running() const {
+ return state == Running;
+ }
- bool deleteSelf; // delete self when Done?
+ bool deleteSelf; // delete self when Done?
- BackgroundJob() {
- deleteSelf = false;
- state = NotStarted;
- }
- virtual ~BackgroundJob() { }
+ BackgroundJob() {
+ deleteSelf = false;
+ state = NotStarted;
+ }
+ virtual ~BackgroundJob() { }
- // start job. returns before it's finished.
- BackgroundJob& go();
+ // start job. returns before it's finished.
+ BackgroundJob& go();
- // wait for completion. this spins with sleep() so not terribly efficient.
- // returns true if did not time out.
- //
- // note you can call wait() more than once if the first call times out.
- bool wait(int msMax = 0);
+ // wait for completion. this spins with sleep() so not terribly efficient.
+ // returns true if did not time out.
+ //
+ // note you can call wait() more than once if the first call times out.
+ bool wait(int msMax = 0);
-private:
- static BackgroundJob *grab;
- static boost::mutex mutex;
- static void thr();
- volatile State state;
-};
+ private:
+ static BackgroundJob *grab;
+ static boost::mutex mutex;
+ static void thr();
+ volatile State state;
+ };
} // namespace mongo
diff --git a/util/builder.h b/util/builder.h
index e883ea2f1f2..419685e15d5 100644
--- a/util/builder.h
+++ b/util/builder.h
@@ -24,89 +24,89 @@
namespace mongo {
-class BufBuilder {
-public:
- BufBuilder(int initsize = 512) : size(initsize) {
- data = (char *) malloc(size);
- assert(data);
- l = 0;
- }
- ~BufBuilder() {
- kill();
- }
-
- void kill() {
- if ( data ) {
- free(data);
+ class BufBuilder {
+ public:
+ BufBuilder(int initsize = 512) : size(initsize) {
+ data = (char *) malloc(size);
+ assert(data);
+ l = 0;
+ }
+ ~BufBuilder() {
+ kill();
+ }
+
+ void kill() {
+ if ( data ) {
+ free(data);
+ data = 0;
+ }
+ }
+
+ /* leave room for some stuff later */
+ void skip(int n) {
+ grow(n);
+ }
+
+ /* note this may be deallocated (realloced) if you keep writing. */
+ char* buf() {
+ return data;
+ }
+
+ /* assume ownership of the buffer - you must then free it */
+ void decouple() {
data = 0;
}
- }
-
- /* leave room for some stuff later */
- void skip(int n) {
- grow(n);
- }
-
- /* note this may be deallocated (realloced) if you keep writing. */
- char* buf() {
- return data;
- }
-
- /* assume ownership of the buffer - you must then free it */
- void decouple() {
- data = 0;
- }
-
- template<class T> void append(T j) {
- *((T*)grow(sizeof(T))) = j;
- }
- void append(short j) {
- append<short>(j);
- }
- void append(int j) {
- append<int>(j);
- }
- void append(unsigned j) {
- append<unsigned>(j);
- }
- void append(bool j) {
- append<bool>(j);
- }
- void append(double j) {
- append<double>(j);
- }
-
- void append(void *src, int len) {
- memcpy(grow(len), src, len);
- }
-
- void append(const char *str) {
- append((void*) str, strlen(str)+1);
- }
-
- int len() {
- return l;
- }
-
-private:
- /* returns the pre-grow write position */
- char* grow(int by) {
- int oldlen = l;
- l += by;
- if ( l > size ) {
- int a = size * 2;
- if ( l > a )
- a = l + 16 * 1024;
- assert( a < 64 * 1024 * 1024 );
- data = (char *) realloc(data, a);
- size= a;
- }
- return data + oldlen;
- }
-
- char *data;
- int l;
- int size;
-};
+
+ template<class T> void append(T j) {
+ *((T*)grow(sizeof(T))) = j;
+ }
+ void append(short j) {
+ append<short>(j);
+ }
+ void append(int j) {
+ append<int>(j);
+ }
+ void append(unsigned j) {
+ append<unsigned>(j);
+ }
+ void append(bool j) {
+ append<bool>(j);
+ }
+ void append(double j) {
+ append<double>(j);
+ }
+
+ void append(void *src, int len) {
+ memcpy(grow(len), src, len);
+ }
+
+ void append(const char *str) {
+ append((void*) str, strlen(str)+1);
+ }
+
+ int len() {
+ return l;
+ }
+
+ private:
+ /* returns the pre-grow write position */
+ char* grow(int by) {
+ int oldlen = l;
+ l += by;
+ if ( l > size ) {
+ int a = size * 2;
+ if ( l > a )
+ a = l + 16 * 1024;
+ assert( a < 64 * 1024 * 1024 );
+ data = (char *) realloc(data, a);
+ size= a;
+ }
+ return data + oldlen;
+ }
+
+ char *data;
+ int l;
+ int size;
+ };
} // namespace mongo
diff --git a/util/goodies.h b/util/goodies.h
index 3a9479d4106..040c2e0ed97 100644
--- a/util/goodies.h
+++ b/util/goodies.h
@@ -31,9 +31,9 @@ namespace mongo {
namespace mongo {
-inline pthread_t GetCurrentThreadId() {
- return pthread_self();
-}
+ inline pthread_t GetCurrentThreadId() {
+ return pthread_self();
+ }
} // namespace mongo
@@ -41,72 +41,72 @@ inline pthread_t GetCurrentThreadId() {
namespace mongo {
-/* use "addr2line -CFe <exe>" to parse. */
-inline void printStackTrace() {
- void *b[12];
- size_t size;
- char **strings;
- size_t i;
+ /* use "addr2line -CFe <exe>" to parse. */
+ inline void printStackTrace() {
+ void *b[12];
+ size_t size;
+ char **strings;
+ size_t i;
- size = backtrace(b, 12);
- strings = backtrace_symbols(b, size);
+ size = backtrace(b, 12);
+ strings = backtrace_symbols(b, size);
- for (i = 0; i < size; i++)
- cout << hex << b[i] << ' ';
- cout << '\n';
- for (i = 0; i < size; i++)
- cout << ' ' << strings[i] << '\n';
+ for (i = 0; i < size; i++)
+ cout << hex << b[i] << ' ';
+ cout << '\n';
+ for (i = 0; i < size; i++)
+ cout << ' ' << strings[i] << '\n';
- free (strings);
-}
+ free (strings);
+ }
#else
-inline void printStackTrace() { }
+ inline void printStackTrace() { }
#endif
-/* set to TRUE if we are exiting */
-extern bool goingAway;
+ /* set to TRUE if we are exiting */
+ extern bool goingAway;
-/* find the multimap member which matches a particular key and value.
+ /* find the multimap member which matches a particular key and value.
- note this can be slow if there are a lot with the same key.
-*/
-template<class C,class K,class V> inline typename C::iterator kv_find(C& c, const K& k,const V& v) {
- pair<typename C::iterator,typename C::iterator> p = c.equal_range(k);
-
- for ( typename C::iterator it=p.first; it!=p.second; ++it)
- if ( it->second == v )
- return it;
-
- return c.end();
-}
-
-bool isPrime(int n);
-int nextPrime(int n);
-
-inline void dumpmemory(const char *data, int len) {
- if ( len > 1024 )
- len = 1024;
- try {
- const char *q = data;
- const char *p = q;
- while ( len > 0 ) {
- for ( int i = 0; i < 16; i++ ) {
- if ( *p >= 32 && *p <= 126 )
- cout << *p;
- else
- cout << '.';
- p++;
+ note this can be slow if there are a lot with the same key.
+ */
+ template<class C,class K,class V> inline typename C::iterator kv_find(C& c, const K& k,const V& v) {
+ pair<typename C::iterator,typename C::iterator> p = c.equal_range(k);
+
+ for ( typename C::iterator it=p.first; it!=p.second; ++it)
+ if ( it->second == v )
+ return it;
+
+ return c.end();
+ }
+
+ bool isPrime(int n);
+ int nextPrime(int n);
+
+ inline void dumpmemory(const char *data, int len) {
+ if ( len > 1024 )
+ len = 1024;
+ try {
+ const char *q = data;
+ const char *p = q;
+ while ( len > 0 ) {
+ for ( int i = 0; i < 16; i++ ) {
+ if ( *p >= 32 && *p <= 126 )
+ cout << *p;
+ else
+ cout << '.';
+ p++;
+ }
+ cout << " ";
+ p -= 16;
+ for ( int i = 0; i < 16; i++ )
+ cout << (unsigned) ((unsigned char)*p++) << ' ';
+ cout << endl;
+ len -= 16;
}
- cout << " ";
- p -= 16;
- for ( int i = 0; i < 16; i++ )
- cout << (unsigned) ((unsigned char)*p++) << ' ';
- cout << endl;
- len -= 16;
+ } catch (...) {
}
- } catch (...) {
}
-}
#undef yassert
@@ -121,31 +121,31 @@ namespace mongo {
#define assert xassert
#define yassert 1
-struct WrappingInt {
- WrappingInt() {
- x = 0;
- }
- WrappingInt(unsigned z) : x(z) { }
- unsigned x;
- operator unsigned() const {
- return x;
- }
- WrappingInt& operator++() {
- x++;
- return *this;
- }
- static int diff(unsigned a, unsigned b) {
- return a-b;
- }
- bool operator<=(WrappingInt r) {
- // platform dependent
- int df = (r.x - x);
- return df >= 0;
- }
- bool operator>(WrappingInt r) {
- return !(r<=*this);
- }
-};
+ struct WrappingInt {
+ WrappingInt() {
+ x = 0;
+ }
+ WrappingInt(unsigned z) : x(z) { }
+ unsigned x;
+ operator unsigned() const {
+ return x;
+ }
+ WrappingInt& operator++() {
+ x++;
+ return *this;
+ }
+ static int diff(unsigned a, unsigned b) {
+ return a-b;
+ }
+ bool operator<=(WrappingInt r) {
+ // platform dependent
+ int df = (r.x - x);
+ return df >= 0;
+ }
+ bool operator>(WrappingInt r) {
+ return !(r<=*this);
+ }
+ };
} // namespace mongo
@@ -153,119 +153,119 @@ struct WrappingInt {
namespace mongo {
-inline void time_t_to_String(time_t t, char *buf) {
+ inline void time_t_to_String(time_t t, char *buf) {
#if defined(_WIN32)
- ctime_s(buf, 64, &t);
+ ctime_s(buf, 64, &t);
#else
- ctime_r(&t, buf);
+ ctime_r(&t, buf);
#endif
- buf[24] = 0; // don't want the \n
-}
+ buf[24] = 0; // don't want the \n
+ }
#define asctime _asctime_not_threadsafe_
#define gmtime _gmtime_not_threadsafe_
#define localtime _localtime_not_threadsafe_
#define ctime _ctime_is_not_threadsafe_
-inline void sleepsecs(int s) {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- xt.sec += s;
- boost::thread::sleep(xt);
-}
-inline void sleepmillis(int s) {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- xt.nsec += s * 1000000;
- boost::thread::sleep(xt);
-}
+ inline void sleepsecs(int s) {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += s;
+ boost::thread::sleep(xt);
+ }
+ inline void sleepmillis(int s) {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.nsec += s * 1000000;
+ boost::thread::sleep(xt);
+ }
// note this wraps
-inline int tdiff(unsigned told, unsigned tnew) {
- return WrappingInt::diff(tnew, told);
-}
-inline unsigned curTimeMillis() {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- unsigned t = xt.nsec / 1000000;
- return (xt.sec & 0xfffff) * 1000 + t;
-}
-
-inline unsigned long long jsTime() {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- unsigned long long t = xt.nsec / 1000000;
- return ((unsigned long long) xt.sec * 1000) + t;
-}
-
-inline unsigned long long curTimeMicros64() {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- unsigned long long t = xt.nsec / 1000;
- return (((unsigned long long) xt.sec) * 1000000) + t;
-}
-
-// measures up to 1024 seconds. or, 512 seconds with tdiff that is...
-inline unsigned curTimeMicros() {
- boost::xtime xt;
- boost::xtime_get(&xt, boost::TIME_UTC);
- unsigned t = xt.nsec / 1000;
- unsigned secs = xt.sec % 1024;
- return secs*1000000 + t;
-}
-using namespace boost;
-typedef boost::mutex::scoped_lock boostlock;
-
-// simple scoped timer
-class Timer {
-public:
- Timer() {
- reset();
+ inline int tdiff(unsigned told, unsigned tnew) {
+ return WrappingInt::diff(tnew, told);
}
- int millis() {
- return micros() / 1000;
+ inline unsigned curTimeMillis() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned t = xt.nsec / 1000000;
+ return (xt.sec & 0xfffff) * 1000 + t;
}
- int micros() {
- unsigned n = curTimeMicros();
- return tdiff(old, n);
+
+ inline unsigned long long jsTime() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned long long t = xt.nsec / 1000000;
+ return ((unsigned long long) xt.sec * 1000) + t;
}
- int micros(unsigned& n) { // returns cur time in addition to timer result
- n = curTimeMicros();
- return tdiff(old, n);
+
+ inline unsigned long long curTimeMicros64() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned long long t = xt.nsec / 1000;
+ return (((unsigned long long) xt.sec) * 1000000) + t;
}
- void reset() {
- old = curTimeMicros();
+
+// measures up to 1024 seconds. or, 512 seconds with tdiff that is...
+ inline unsigned curTimeMicros() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned t = xt.nsec / 1000;
+ unsigned secs = xt.sec % 1024;
+ return secs*1000000 + t;
}
-private:
- unsigned old;
-};
+ using namespace boost;
+ typedef boost::mutex::scoped_lock boostlock;
+
+// simple scoped timer
+ class Timer {
+ public:
+ Timer() {
+ reset();
+ }
+ int millis() {
+ return micros() / 1000;
+ }
+ int micros() {
+ unsigned n = curTimeMicros();
+ return tdiff(old, n);
+ }
+ int micros(unsigned& n) { // returns cur time in addition to timer result
+ n = curTimeMicros();
+ return tdiff(old, n);
+ }
+ void reset() {
+ old = curTimeMicros();
+ }
+ private:
+ unsigned old;
+ };
-/*
+ /*
-class DebugMutex : boost::noncopyable {
- friend class lock;
- boost::mutex m;
- int locked;
-public:
- DebugMutex() : locked(0); { }
- bool isLocked() { return locked; }
-};
+ class DebugMutex : boost::noncopyable {
+ friend class lock;
+ boost::mutex m;
+ int locked;
+ public:
+ DebugMutex() : locked(0); { }
+ bool isLocked() { return locked; }
+ };
-*/
+ */
//typedef boostlock lock;
-inline bool startsWith(const char *str, const char *prefix) {
- unsigned l = strlen(prefix);
- if ( strlen(str) < l ) return false;
- return strncmp(str, prefix, l) == 0;
-}
+ inline bool startsWith(const char *str, const char *prefix) {
+ unsigned l = strlen(prefix);
+ if ( strlen(str) < l ) return false;
+ return strncmp(str, prefix, l) == 0;
+ }
-inline bool endsWith(const char *p, const char *suffix) {
- int a = strlen(p);
- int b = strlen(suffix);
- if ( b > a ) return false;
- return strcmp(p + a - b, suffix) == 0;
-}
+ inline bool endsWith(const char *p, const char *suffix) {
+ int a = strlen(p);
+ int b = strlen(suffix);
+ if ( b > a ) return false;
+ return strcmp(p + a - b, suffix) == 0;
+ }
} // namespace mongo
@@ -273,22 +273,22 @@ inline bool endsWith(const char *p, const char *suffix) {
namespace mongo {
-inline unsigned long swapEndian(unsigned long x) {
- return
- ((x & 0xff) << 24) |
- ((x & 0xff00) << 8) |
- ((x & 0xff0000) >> 8) |
- ((x & 0xff000000) >> 24);
-}
+ inline unsigned long swapEndian(unsigned long x) {
+ return
+ ((x & 0xff) << 24) |
+ ((x & 0xff00) << 8) |
+ ((x & 0xff0000) >> 8) |
+ ((x & 0xff000000) >> 24);
+ }
#if defined(BOOST_LITTLE_ENDIAN)
-inline unsigned long fixEndian(unsigned long x) {
- return x;
-}
+ inline unsigned long fixEndian(unsigned long x) {
+ return x;
+ }
#else
-inline unsigned long fixEndian(unsigned long x) {
- return swapEndian(x);
-}
+ inline unsigned long fixEndian(unsigned long x) {
+ return swapEndian(x);
+ }
#endif
diff --git a/util/hashtab.h b/util/hashtab.h
index 80a6ded1030..1f280e29aaa 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -30,105 +30,105 @@ namespace mongo {
#pragma pack(push,1)
-/* you should define:
-
- int Key::hash() return > 0 always.
-*/
-
-template <
-class Key,
-class Type
->
-class HashTable {
-public:
- const char *name;
- struct Node {
- int hash;
- Key k;
- Type value;
- bool inUse() {
- return hash != 0;
- }
- void setUnused() {
- hash = 0;
- }
- } *nodes;
- int n;
-
- int _find(const Key& k, bool& found) {
- found = false;
- int h = k.hash();
- int i = h % n;
- int start = i;
- int chain = 0;
- while ( 1 ) {
- if ( !nodes[i].inUse() ) {
- return i;
+ /* you should define:
+
+ int Key::hash() return > 0 always.
+ */
+
+ template <
+ class Key,
+ class Type
+ >
+ class HashTable {
+ public:
+ const char *name;
+ struct Node {
+ int hash;
+ Key k;
+ Type value;
+ bool inUse() {
+ return hash != 0;
}
- if ( nodes[i].hash == h && nodes[i].k == k ) {
- found = true;
- return i;
+ void setUnused() {
+ hash = 0;
}
- chain++;
- i = (i+1) % n;
- if ( i == start ) {
- cout << "warning: hashtable is full " << name << endl;
- return -1;
+ } *nodes;
+ int n;
+
+ int _find(const Key& k, bool& found) {
+ found = false;
+ int h = k.hash();
+ int i = h % n;
+ int start = i;
+ int chain = 0;
+ while ( 1 ) {
+ if ( !nodes[i].inUse() ) {
+ return i;
+ }
+ if ( nodes[i].hash == h && nodes[i].k == k ) {
+ found = true;
+ return i;
+ }
+ chain++;
+ i = (i+1) % n;
+ if ( i == start ) {
+ out() << "warning: hashtable is full " << name << endl;
+ return -1;
+ }
+ if ( chain == 200 )
+ out() << "warning: hashtable long chain " << name << endl;
}
- if ( chain == 200 )
- cout << "warning: hashtable long chain " << name << endl;
}
- }
-
-public:
- /* buf must be all zeroes on initialization. */
- HashTable(void *buf, int buflen, const char *_name) : name(_name) {
- int m = sizeof(Node);
- // cout << "hashtab init, buflen:" << buflen << " m:" << m << endl;
- n = buflen / m;
- if ( (n & 1) == 0 )
- n--;
- nodes = (Node *) buf;
- assert(nodes[n-1].hash == 0);
- assert(nodes[0].hash == 0);
-
- assert( sizeof(Node) == 628 );
- //cout << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << endl;
- }
-
- Type* get(const Key& k) {
- bool found;
- int i = _find(k, found);
- if ( found )
- return &nodes[i].value;
- return 0;
- }
-
- void kill(const Key& k) {
- bool found;
- int i = _find(k, found);
- if ( i >= 0 && found ) {
- nodes[i].k.kill();
- nodes[i].setUnused();
+
+ public:
+ /* buf must be all zeroes on initialization. */
+ HashTable(void *buf, int buflen, const char *_name) : name(_name) {
+ int m = sizeof(Node);
+ // out() << "hashtab init, buflen:" << buflen << " m:" << m << endl;
+ n = buflen / m;
+ if ( (n & 1) == 0 )
+ n--;
+ nodes = (Node *) buf;
+ assert(nodes[n-1].hash == 0);
+ assert(nodes[0].hash == 0);
+
+ assert( sizeof(Node) == 628 );
+ //out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << endl;
}
- }
-
- void put(const Key& k, const Type& value) {
- bool found;
- int i = _find(k, found);
- if ( i < 0 )
- return;
- if ( !found ) {
- nodes[i].k = k;
- nodes[i].hash = k.hash();
+
+ Type* get(const Key& k) {
+ bool found;
+ int i = _find(k, found);
+ if ( found )
+ return &nodes[i].value;
+ return 0;
}
- else {
- assert( nodes[i].hash == k.hash() );
+
+ void kill(const Key& k) {
+ bool found;
+ int i = _find(k, found);
+ if ( i >= 0 && found ) {
+ nodes[i].k.kill();
+ nodes[i].setUnused();
+ }
+ }
+
+ void put(const Key& k, const Type& value) {
+ bool found;
+ int i = _find(k, found);
+ if ( i < 0 )
+ return;
+ if ( !found ) {
+ nodes[i].k = k;
+ nodes[i].hash = k.hash();
+ }
+ else {
+ assert( nodes[i].hash == k.hash() );
+ }
+ nodes[i].value = value;
}
- nodes[i].value = value;
- }
-};
+ };
#pragma pack(pop)
diff --git a/util/log.h b/util/log.h
index 06ab76758c8..fbbc58beeed 100644
--- a/util/log.h
+++ b/util/log.h
@@ -20,98 +20,98 @@
namespace mongo {
-class Nullstream {
-public:
- Nullstream& operator<<(const char *) {
- return *this;
- }
- Nullstream& operator<<(int) {
- return *this;
- }
- Nullstream& operator<<(unsigned long) {
- return *this;
- }
- Nullstream& operator<<(unsigned) {
- return *this;
- }
- Nullstream& operator<<(double) {
- return *this;
- }
- Nullstream& operator<<(void *) {
- return *this;
- }
- Nullstream& operator<<(long long) {
- return *this;
- }
- Nullstream& operator<<(unsigned long long) {
- return *this;
- }
- Nullstream& operator<<(const string&) {
- return *this;
- }
- Nullstream& operator<< (ostream& ( *endl )(ostream&)) {
- return *this;
- }
- Nullstream& operator<< (ios_base& (*hex)(ios_base&)) {
- return *this;
- }
-};
-extern Nullstream nullstream;
+ class Nullstream {
+ public:
+ Nullstream& operator<<(const char *) {
+ return *this;
+ }
+ Nullstream& operator<<(int) {
+ return *this;
+ }
+ Nullstream& operator<<(unsigned long) {
+ return *this;
+ }
+ Nullstream& operator<<(unsigned) {
+ return *this;
+ }
+ Nullstream& operator<<(double) {
+ return *this;
+ }
+ Nullstream& operator<<(void *) {
+ return *this;
+ }
+ Nullstream& operator<<(long long) {
+ return *this;
+ }
+ Nullstream& operator<<(unsigned long long) {
+ return *this;
+ }
+ Nullstream& operator<<(const string&) {
+ return *this;
+ }
+ Nullstream& operator<< (ostream& ( *endl )(ostream&)) {
+ return *this;
+ }
+ Nullstream& operator<< (ios_base& (*hex)(ios_base&)) {
+ return *this;
+ }
+ };
+ extern Nullstream nullstream;
#define LOGIT { boostlock lk(mutex); cout << x; return *this; }
-class Logstream {
- static boost::mutex mutex;
-public:
- void flush() {
- boostlock lk(mutex);
- cout.flush();
+ class Logstream {
+ static boost::mutex mutex;
+ public:
+ void flush() {
+ boostlock lk(mutex);
+ cout.flush();
+ }
+ Logstream& operator<<(const char *x) LOGIT
+ Logstream& operator<<(char x) LOGIT
+ Logstream& operator<<(int x) LOGIT
+ Logstream& operator<<(unsigned long x) LOGIT
+ Logstream& operator<<(unsigned x) LOGIT
+ Logstream& operator<<(double x) LOGIT
+ Logstream& operator<<(void *x) LOGIT
+ Logstream& operator<<(long long x) LOGIT
+ Logstream& operator<<(unsigned long long x) LOGIT
+ Logstream& operator<<(const string& x) LOGIT
+ Logstream& operator<< (ostream& ( *_endl )(ostream&)) {
+ boostlock lk(mutex);
+ cout << _endl;
+ return *this;
+ }
+ Logstream& operator<< (ios_base& (*_hex)(ios_base&)) {
+ boostlock lk(mutex);
+ cout << _hex;
+ return *this;
+ }
+ Logstream& prolog(bool withNs = false) {
+ char now[64];
+ time_t_to_String(time(0), now);
+ now[20] = 0;
+
+ boostlock lk(mutex);
+ cout << now;
+ if ( withNs && /*database && */curNs )
+ cout << curNs << ' ';
+ return *this;
+ }
+ };
+ extern Logstream logstream;
+
+ inline Logstream& problem() {
+ return logstream.prolog(true);
}
- Logstream& operator<<(const char *x) LOGIT
- Logstream& operator<<(char x) LOGIT
- Logstream& operator<<(int x) LOGIT
- Logstream& operator<<(unsigned long x) LOGIT
- Logstream& operator<<(unsigned x) LOGIT
- Logstream& operator<<(double x) LOGIT
- Logstream& operator<<(void *x) LOGIT
- Logstream& operator<<(long long x) LOGIT
- Logstream& operator<<(unsigned long long x) LOGIT
- Logstream& operator<<(const string& x) LOGIT
- Logstream& operator<< (ostream& ( *_endl )(ostream&)) {
- boostlock lk(mutex);
- cout << _endl;
- return *this;
+ inline Logstream& log() {
+ return logstream.prolog();
}
- Logstream& operator<< (ios_base& (*_hex)(ios_base&)) {
- boostlock lk(mutex);
- cout << _hex;
- return *this;
+ inline Logstream& out() {
+ return logstream;
}
- Logstream& prolog(bool withNs = false) {
- char now[64];
- time_t_to_String(time(0), now);
- now[20] = 0;
-
- boostlock lk(mutex);
- cout << now;
- if ( withNs && /*database && */curNs )
- cout << curNs << ' ';
- return *this;
+
+ inline ostream& stdcout() {
+ return cout;
}
-};
-extern Logstream logstream;
-
-inline Logstream& problem() {
- return logstream.prolog(true);
-}
-inline Logstream& log() {
- return logstream.prolog();
-}
-
-inline ostream& stdcout() {
- return cout;
-}
-
-#define cout logstream
-
} // namespace mongo
diff --git a/util/lruishmap.h b/util/lruishmap.h
index bc55702edd6..444ce13605f 100644
--- a/util/lruishmap.h
+++ b/util/lruishmap.h
@@ -23,57 +23,57 @@
namespace mongo {
-/* Your K object must define:
- int hash() - must always return > 0.
- operator==
-*/
+ /* Your K object must define:
+ int hash() - must always return > 0.
+ operator==
+ */
-template <class K, class V, int MaxChain>
-class LRUishMap {
-public:
- LRUishMap(int _n) {
- n = nextPrime(_n);
- keys = new K[n];
- hashes = new int[n];
- for ( int i = 0; i < n; i++ ) hashes[i] = 0;
- }
- ~LRUishMap() {
- delete[] keys;
- delete[] hashes;
- }
+ template <class K, class V, int MaxChain>
+ class LRUishMap {
+ public:
+ LRUishMap(int _n) {
+ n = nextPrime(_n);
+ keys = new K[n];
+ hashes = new int[n];
+ for ( int i = 0; i < n; i++ ) hashes[i] = 0;
+ }
+ ~LRUishMap() {
+ delete[] keys;
+ delete[] hashes;
+ }
- int _find(const K& k, bool& found) {
- int h = k.hash();
- assert( h > 0 );
- int j = h % n;
- int first = j;
- for ( int i = 0; i < MaxChain; i++ ) {
- if ( hashes[j] == h ) {
- if ( keys[j] == k ) {
- found = true;
+ int _find(const K& k, bool& found) {
+ int h = k.hash();
+ assert( h > 0 );
+ int j = h % n;
+ int first = j;
+ for ( int i = 0; i < MaxChain; i++ ) {
+ if ( hashes[j] == h ) {
+ if ( keys[j] == k ) {
+ found = true;
+ return j;
+ }
+ }
+ else if ( hashes[j] == 0 ) {
+ found = false;
return j;
}
}
- else if ( hashes[j] == 0 ) {
- found = false;
- return j;
- }
+ found = false;
+ return first;
}
- found = false;
- return first;
- }
- V* find(const K& k) {
- bool found;
- int j = _find(k, found);
- return found ? &values[j] : 0;
- }
+ V* find(const K& k) {
+ bool found;
+ int j = _find(k, found);
+ return found ? &values[j] : 0;
+ }
-private:
- int n;
- K *keys;
- int *hashes;
- V *values;
-};
+ private:
+ int n;
+ K *keys;
+ int *hashes;
+ V *values;
+ };
} // namespace mongo
diff --git a/util/md5.hpp b/util/md5.hpp
index 428036c8d6d..8a1eafc634c 100644
--- a/util/md5.hpp
+++ b/util/md5.hpp
@@ -3,20 +3,20 @@
#pragma once
#include "md5.h"
-
-namespace mongo {
-typedef unsigned char md5digest[16];
+namespace mongo {
-inline void md5(const void *buf, int nbytes, md5digest digest) {
- md5_state_t st;
- md5_init(&st);
- md5_append(&st, (const md5_byte_t *) buf, nbytes);
- md5_finish(&st, digest);
-}
+ typedef unsigned char md5digest[16];
-inline void md5(const char *str, md5digest digest) {
- md5(str, strlen(str), digest);
-}
-
-} // namespace mongo
+ inline void md5(const void *buf, int nbytes, md5digest digest) {
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) buf, nbytes);
+ md5_finish(&st, digest);
+ }
+
+ inline void md5(const char *str, md5digest digest) {
+ md5(str, strlen(str), digest);
+ }
+
+} // namespace mongo
diff --git a/util/miniwebserver.cpp b/util/miniwebserver.cpp
index b3b514bae3f..4b4a1ad70f7 100644
--- a/util/miniwebserver.cpp
+++ b/util/miniwebserver.cpp
@@ -22,166 +22,166 @@
namespace mongo {
-MiniWebServer::MiniWebServer() {
- sock = 0;
-}
-
-bool MiniWebServer::init(int port) {
- SockAddr me(port);
- sock = socket(AF_INET, SOCK_STREAM, 0);
- if ( sock == INVALID_SOCKET ) {
- log() << "ERROR: MiniWebServer listen(): invalid socket? " << errno << endl;
- return false;
- }
- prebindOptions( sock );
- if ( ::bind(sock, (sockaddr *) &me.sa, me.addressSize) != 0 ) {
- log() << "MiniWebServer: bind() failed port:" << port << " errno:" << errno << endl;
- if ( errno == 98 )
- log() << "98 == addr already in use" << endl;
- closesocket(sock);
- return false;
+ MiniWebServer::MiniWebServer() {
+ sock = 0;
}
- if ( ::listen(sock, 16) != 0 ) {
- log() << "MiniWebServer: listen() failed " << errno << endl;
- closesocket(sock);
- return false;
- }
+ bool MiniWebServer::init(int port) {
+ SockAddr me(port);
+ sock = socket(AF_INET, SOCK_STREAM, 0);
+ if ( sock == INVALID_SOCKET ) {
+ log() << "ERROR: MiniWebServer listen(): invalid socket? " << errno << endl;
+ return false;
+ }
+ prebindOptions( sock );
+ if ( ::bind(sock, (sockaddr *) &me.sa, me.addressSize) != 0 ) {
+ log() << "MiniWebServer: bind() failed port:" << port << " errno:" << errno << endl;
+ if ( errno == 98 )
+ log() << "98 == addr already in use" << endl;
+ closesocket(sock);
+ return false;
+ }
+
+ if ( ::listen(sock, 16) != 0 ) {
+ log() << "MiniWebServer: listen() failed " << errno << endl;
+ closesocket(sock);
+ return false;
+ }
- return true;
-}
+ return true;
+ }
-string MiniWebServer::parseURL( const char * buf ) {
- const char * urlStart = strstr( buf , " " );
- if ( ! urlStart )
- return "/";
+ string MiniWebServer::parseURL( const char * buf ) {
+ const char * urlStart = strstr( buf , " " );
+ if ( ! urlStart )
+ return "/";
- urlStart++;
+ urlStart++;
- const char * end = strstr( urlStart , " " );
- if ( ! end ) {
- end = strstr( urlStart , "\r" );
+ const char * end = strstr( urlStart , " " );
if ( ! end ) {
- end = strstr( urlStart , "\n" );
+ end = strstr( urlStart , "\r" );
+ if ( ! end ) {
+ end = strstr( urlStart , "\n" );
+ }
}
- }
- if ( ! end )
- return "/";
+ if ( ! end )
+ return "/";
- int diff = (int)(end-urlStart);
- if ( diff < 0 || diff > 255 )
- return "/";
+ int diff = (int)(end-urlStart);
+ if ( diff < 0 || diff > 255 )
+ return "/";
- return string( urlStart , (int)(end-urlStart) );
-}
+ return string( urlStart , (int)(end-urlStart) );
+ }
-void MiniWebServer::parseParams( map<string,string> & params , string query ) {
- if ( query.size() == 0 )
- return;
+ void MiniWebServer::parseParams( map<string,string> & params , string query ) {
+ if ( query.size() == 0 )
+ return;
- while ( query.size() ) {
+ while ( query.size() ) {
- string::size_type amp = query.find( "&" );
+ string::size_type amp = query.find( "&" );
- string cur;
- if ( amp == string::npos ) {
- cur = query;
- query = "";
- }
- else {
- cur = query.substr( 0 , amp );
- query = query.substr( amp + 1 );
+ string cur;
+ if ( amp == string::npos ) {
+ cur = query;
+ query = "";
+ }
+ else {
+ cur = query.substr( 0 , amp );
+ query = query.substr( amp + 1 );
+ }
+
+ string::size_type eq = cur.find( "=" );
+ if ( eq == string::npos )
+ continue;
+
+ params[cur.substr(0,eq)] = cur.substr(eq+1);
}
+ return;
+ }
- string::size_type eq = cur.find( "=" );
- if ( eq == string::npos )
- continue;
+ string MiniWebServer::parseMethod( const char * headers ) {
+ const char * end = strstr( headers , " " );
+ if ( ! end )
+ return "GET";
+ return string( headers , (int)(end-headers) );
+ }
- params[cur.substr(0,eq)] = cur.substr(eq+1);
+ const char *MiniWebServer::body( const char *buf ) {
+ const char *ret = strstr( buf, "\r\n\r\n" );
+ return ret ? ret + 4 : ret;
}
- return;
-}
-
-string MiniWebServer::parseMethod( const char * headers ) {
- const char * end = strstr( headers , " " );
- if ( ! end )
- return "GET";
- return string( headers , (int)(end-headers) );
-}
-
-const char *MiniWebServer::body( const char *buf ) {
- const char *ret = strstr( buf, "\r\n\r\n" );
- return ret ? ret + 4 : ret;
-}
-
-bool MiniWebServer::fullReceive( const char *buf ) {
- const char *bod = body( buf );
- if ( !bod )
+
+ bool MiniWebServer::fullReceive( const char *buf ) {
+ const char *bod = body( buf );
+ if ( !bod )
+ return false;
+ const char *lenString = "Content-Length:";
+ const char *lengthLoc = strstr( buf, lenString );
+ if ( !lengthLoc )
+ return true;
+ lengthLoc += strlen( lenString );
+ long len = strtol( lengthLoc, 0, 10 );
+ if ( long( strlen( bod ) ) == len )
+ return true;
return false;
- const char *lenString = "Content-Length:";
- const char *lengthLoc = strstr( buf, lenString );
- if ( !lengthLoc )
- return true;
- lengthLoc += strlen( lenString );
- long len = strtol( lengthLoc, 0, 10 );
- if ( long( strlen( bod ) ) == len )
- return true;
- return false;
-}
-
-void MiniWebServer::accepted(int s) {
- char buf[4096];
- int len = 0;
- while ( 1 ) {
- int x = ::recv(s, buf + len, sizeof(buf) - 1 - len, 0);
- if ( x <= 0 ) {
- return;
- }
- len += x;
- buf[ len ] = 0;
- if ( fullReceive( buf ) )
- break;
}
- buf[len] = 0;
-
- string responseMsg;
- int responseCode = 599;
- vector<string> headers;
- doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers);
-
- stringstream ss;
- ss << "HTTP/1.0 " << responseCode;
- if ( responseCode == 200 ) ss << " OK";
- ss << "\r\n";
- if ( headers.empty() ) {
- ss << "Content-Type: text/html\r\n";
- }
- else {
- for ( vector<string>::iterator i = headers.begin(); i != headers.end(); i++ )
- ss << *i << "\r\n";
+
+ void MiniWebServer::accepted(int s) {
+ char buf[4096];
+ int len = 0;
+ while ( 1 ) {
+ int x = ::recv(s, buf + len, sizeof(buf) - 1 - len, 0);
+ if ( x <= 0 ) {
+ return;
+ }
+ len += x;
+ buf[ len ] = 0;
+ if ( fullReceive( buf ) )
+ break;
+ }
+ buf[len] = 0;
+
+ string responseMsg;
+ int responseCode = 599;
+ vector<string> headers;
+ doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers);
+
+ stringstream ss;
+ ss << "HTTP/1.0 " << responseCode;
+ if ( responseCode == 200 ) ss << " OK";
+ ss << "\r\n";
+ if ( headers.empty() ) {
+ ss << "Content-Type: text/html\r\n";
+ }
+ else {
+ for ( vector<string>::iterator i = headers.begin(); i != headers.end(); i++ )
+ ss << *i << "\r\n";
+ }
+ ss << "\r\n";
+ ss << responseMsg;
+ string response = ss.str();
+
+ ::send(s, response.c_str(), response.size(), 0);
}
- ss << "\r\n";
- ss << responseMsg;
- string response = ss.str();
-
- ::send(s, response.c_str(), response.size(), 0);
-}
-
-void MiniWebServer::run() {
- SockAddr from;
- while ( 1 ) {
- int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
- if ( s < 0 ) {
- log() << "MiniWebServer: accept() returns " << s << " errno:" << errno << endl;
- sleepmillis(200);
- continue;
+
+ void MiniWebServer::run() {
+ SockAddr from;
+ while ( 1 ) {
+ int s = accept(sock, (sockaddr *) &from.sa, &from.addressSize);
+ if ( s < 0 ) {
+ log() << "MiniWebServer: accept() returns " << s << " errno:" << errno << endl;
+ sleepmillis(200);
+ continue;
+ }
+ disableNagle(s);
+ RARELY log() << "MiniWebServer: connection accepted from " << from.toString() << endl;
+ accepted( s );
+ closesocket(s);
}
- disableNagle(s);
- RARELY log() << "MiniWebServer: connection accepted from " << from.toString() << endl;
- accepted( s );
- closesocket(s);
}
-}
} // namespace mongo
diff --git a/util/miniwebserver.h b/util/miniwebserver.h
index 503655340f9..d41935bd49f 100644
--- a/util/miniwebserver.h
+++ b/util/miniwebserver.h
@@ -22,34 +22,34 @@
namespace mongo {
-class MiniWebServer {
-public:
- MiniWebServer();
-
- bool init(int port);
- void run();
-
- virtual void doRequest(
- const char *rq, // the full request
- string url,
- // set these and return them:
- string& responseMsg,
- int& responseCode,
- vector<string>& headers // if completely empty, content-type: text/html will be added
- ) = 0;
-
-
-protected:
- string parseURL( const char * buf );
- string parseMethod( const char * headers );
- void parseParams( map<string,string> & params , string query );
- static const char *body( const char *buf );
-
-private:
- void accepted(int s);
- static bool fullReceive( const char *buf );
-
- int sock;
-};
+ class MiniWebServer {
+ public:
+ MiniWebServer();
+
+ bool init(int port);
+ void run();
+
+ virtual void doRequest(
+ const char *rq, // the full request
+ string url,
+ // set these and return them:
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers // if completely empty, content-type: text/html will be added
+ ) = 0;
+
+
+ protected:
+ string parseURL( const char * buf );
+ string parseMethod( const char * headers );
+ void parseParams( map<string,string> & params , string query );
+ static const char *body( const char *buf );
+
+ private:
+ void accepted(int s);
+ static bool fullReceive( const char *buf );
+
+ int sock;
+ };
} // namespace mongo
diff --git a/util/mmap.cpp b/util/mmap.cpp
index 2d0df61f3d9..ff943504588 100644
--- a/util/mmap.cpp
+++ b/util/mmap.cpp
@@ -21,33 +21,33 @@
namespace mongo {
-set<MemoryMappedFile*> mmfiles;
-
-MemoryMappedFile::~MemoryMappedFile() {
- close();
- mmfiles.erase(this);
-}
-
-/*static*/
-int closingAllFiles = 0;
-void MemoryMappedFile::closeAllFiles() {
- if ( closingAllFiles ) {
- cout << "warning closingAllFiles=" << closingAllFiles << endl;
- return;
+ set<MemoryMappedFile*> mmfiles;
+
+ MemoryMappedFile::~MemoryMappedFile() {
+ close();
+ mmfiles.erase(this);
+ }
+
+ /*static*/
+ int closingAllFiles = 0;
+ void MemoryMappedFile::closeAllFiles() {
+ if ( closingAllFiles ) {
+ out() << "warning closingAllFiles=" << closingAllFiles << endl;
+ return;
+ }
+ ++closingAllFiles;
+ for ( set<MemoryMappedFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
+ (*i)->close();
+ log() << " closeAllFiles() finished" << endl;
+ --closingAllFiles;
+ }
+
+ void MemoryMappedFile::updateLength( const char *filename, int &length ) const {
+ if ( !boost::filesystem::exists( filename ) )
+ return;
+ // make sure we map full length if preexisting file.
+ length = boost::filesystem::file_size( filename );
}
- ++closingAllFiles;
- for ( set<MemoryMappedFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
- (*i)->close();
- log() << " closeAllFiles() finished" << endl;
- --closingAllFiles;
-}
-
-void MemoryMappedFile::updateLength( const char *filename, int &length ) const {
- if ( !boost::filesystem::exists( filename ) )
- return;
- // make sure we map full length if preexisting file.
- length = boost::filesystem::file_size( filename );
-}
#if defined(_WIN32)
@@ -57,72 +57,72 @@ void MemoryMappedFile::updateLength( const char *filename, int &length ) const {
namespace mongo {
-MemoryMappedFile::MemoryMappedFile() {
- fd = 0;
- maphandle = 0;
- view = 0;
- mmfiles.insert(this);
-}
-
-void MemoryMappedFile::close() {
- if ( view )
- UnmapViewOfFile(view);
- view = 0;
- if ( maphandle )
- CloseHandle(maphandle);
- maphandle = 0;
- if ( fd )
- CloseHandle(fd);
- fd = 0;
-}
-
-std::wstring toWideString(const char *s) {
- std::basic_ostringstream<TCHAR> buf;
- buf << s;
- return buf.str();
-}
-
-unsigned mapped = 0;
-
-void* MemoryMappedFile::map(const char *filename, int length) {
- updateLength( filename, length );
- std::wstring filenamew = toWideString(filename);
-
- fd = CreateFile(
- filenamew.c_str(), GENERIC_WRITE | GENERIC_READ, FILE_SHARE_READ,
- NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
- if ( fd == INVALID_HANDLE_VALUE ) {
- cout << "CreateFile failed " << filename << endl;
- return 0;
+ MemoryMappedFile::MemoryMappedFile() {
+ fd = 0;
+ maphandle = 0;
+ view = 0;
+ mmfiles.insert(this);
+ }
+
+ void MemoryMappedFile::close() {
+ if ( view )
+ UnmapViewOfFile(view);
+ view = 0;
+ if ( maphandle )
+ CloseHandle(maphandle);
+ maphandle = 0;
+ if ( fd )
+ CloseHandle(fd);
+ fd = 0;
+ }
+
+ std::wstring toWideString(const char *s) {
+ std::basic_ostringstream<TCHAR> buf;
+ buf << s;
+ return buf.str();
}
+ unsigned mapped = 0;
+
+ void* MemoryMappedFile::map(const char *filename, int length) {
+ updateLength( filename, length );
+ std::wstring filenamew = toWideString(filename);
+
+ fd = CreateFile(
+ filenamew.c_str(), GENERIC_WRITE | GENERIC_READ, FILE_SHARE_READ,
+ NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if ( fd == INVALID_HANDLE_VALUE ) {
+ out() << "CreateFile failed " << filename << endl;
+ return 0;
+ }
+
#if defined(_WIN32)
- if ( mapped > 500000000 ) {
- cout << "WARNING: too much mem mapped for win32" << endl;
+ if ( mapped > 500000000 ) {
+ out() << "WARNING: too much mem mapped for win32" << endl;
// if( length > 50000000 )
// length = 50000000;
- }
- mapped += length;
+ }
+ mapped += length;
#endif
- maphandle = CreateFileMapping(fd, NULL, PAGE_READWRITE, 0, length, NULL);
- if ( maphandle == NULL ) {
- cout << "CreateFileMapping failed " << filename << endl;
- return 0;
- }
+ maphandle = CreateFileMapping(fd, NULL, PAGE_READWRITE, 0, length, NULL);
+ if ( maphandle == NULL ) {
+ out() << "CreateFileMapping failed " << filename << endl;
+ return 0;
+ }
- view = MapViewOfFile(maphandle, FILE_MAP_ALL_ACCESS, 0, 0, 0);
- if ( view == 0 ) {
- cout << "MapViewOfFile failed " << filename << " errno:";
- cout << GetLastError();
- cout << endl;
- }
+ view = MapViewOfFile(maphandle, FILE_MAP_ALL_ACCESS, 0, 0, 0);
+ if ( view == 0 ) {
+ out() << "MapViewOfFile failed " << filename << " errno:";
+ out() << GetLastError();
+ out() << endl;
+ }
- return view;
-}
+ return view;
+ }
-void MemoryMappedFile::flush(bool) {
-}
+ void MemoryMappedFile::flush(bool) {
+ }
#else
@@ -136,88 +136,88 @@ void MemoryMappedFile::flush(bool) {
namespace mongo {
-MemoryMappedFile::MemoryMappedFile() {
- fd = 0;
- maphandle = 0;
- view = 0;
- len = 0;
- mmfiles.insert(this);
-}
+ MemoryMappedFile::MemoryMappedFile() {
+ fd = 0;
+ maphandle = 0;
+ view = 0;
+ len = 0;
+ mmfiles.insert(this);
+ }
-void MemoryMappedFile::close() {
- if ( view )
- munmap(view, len);
- view = 0;
+ void MemoryMappedFile::close() {
+ if ( view )
+ munmap(view, len);
+ view = 0;
- if ( fd )
- ::close(fd);
- fd = 0;
-}
+ if ( fd )
+ ::close(fd);
+ fd = 0;
+ }
#ifndef O_NOATIME
#warning NO O_NOATIME
#define O_NOATIME 0
#endif
-void* MemoryMappedFile::map(const char *filename, int length) {
- updateLength( filename, length );
- len = length;
+ void* MemoryMappedFile::map(const char *filename, int length) {
+ updateLength( filename, length );
+ len = length;
- fd = open(filename, O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
- if ( !fd ) {
- cout << "couldn't open " << filename << ' ' << errno << endl;
- return 0;
- }
+ fd = open(filename, O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
+ if ( !fd ) {
+ out() << "couldn't open " << filename << ' ' << errno << endl;
+ return 0;
+ }
- /* make sure the file is the full desired length */
- off_t filelen = lseek(fd, 0, SEEK_END);
- if ( filelen < length ) {
+ /* make sure the file is the full desired length */
+ off_t filelen = lseek(fd, 0, SEEK_END);
+ if ( filelen < length ) {
// log() << "map: file length=" << (unsigned) filelen << " want:"
// << length
// << endl;
- if ( filelen != 0 ) {
- problem() << "failure mapping new file " << filename << " length:" << length << endl;
- return 0;
- }
- // Check for end of disk.
- lseek(fd, length - 1, SEEK_SET);
- write(fd, "", 1);
- Logstream &l = log();
- l << "new datafile " << filename << " filling with zeroes...";
- l.flush();
- Timer t;
- int z = 8192;
- char buf[z];
- memset(buf, 0, z);
- int left = length;
- while ( 1 ) {
- if ( left <= z ) {
- write(fd, buf, left);
- break;
+ if ( filelen != 0 ) {
+ problem() << "failure mapping new file " << filename << " length:" << length << endl;
+ return 0;
}
- write(fd, buf, z);
- left -= z;
+ // Check for end of disk.
+ lseek(fd, length - 1, SEEK_SET);
+ write(fd, "", 1);
+ Logstream &l = log();
+ l << "new datafile " << filename << " filling with zeroes...";
+ l.flush();
+ Timer t;
+ int z = 8192;
+ char buf[z];
+ memset(buf, 0, z);
+ int left = length;
+ while ( 1 ) {
+ if ( left <= z ) {
+ write(fd, buf, left);
+ break;
+ }
+ write(fd, buf, z);
+ left -= z;
+ }
+ l << "done " << ((double)t.millis())/1000.0 << " secs" << endl;
}
- l << "done " << ((double)t.millis())/1000.0 << " secs" << endl;
- }
- view = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
- if ( view == MAP_FAILED ) {
- cout << " mmap() failed for " << filename << " len:" << length << " errno:" << errno << endl;
- return 0;
+ view = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if ( view == MAP_FAILED ) {
+ out() << " mmap() failed for " << filename << " len:" << length << " errno:" << errno << endl;
+ return 0;
+ }
+ return view;
}
- return view;
-}
-void MemoryMappedFile::flush(bool sync) {
- if ( msync(view, len, sync ? MS_SYNC : MS_ASYNC) )
- problem() << "msync error " << errno << endl;
-}
+ void MemoryMappedFile::flush(bool sync) {
+ if ( msync(view, len, sync ? MS_SYNC : MS_ASYNC) )
+ problem() << "msync error " << errno << endl;
+ }
#endif
-void* MemoryMappedFile::map(const char *filename) {
- return map( filename , file_size( filename ) ); // file_size is from boost
-}
+ void* MemoryMappedFile::map(const char *filename) {
+ return map( filename , file_size( filename ) ); // file_size is from boost
+ }
} // namespace mongo
diff --git a/util/mmap.h b/util/mmap.h
index bc89d7ef8e9..0044cc5ea4f 100644
--- a/util/mmap.h
+++ b/util/mmap.h
@@ -20,38 +20,38 @@
namespace mongo {
-class MemoryMappedFile {
-public:
- static void closeAllFiles();
- MemoryMappedFile();
- ~MemoryMappedFile(); /* closes the file if open */
- void close();
-
- // Throws exception if file doesn't exist.
- void* map( const char *filename );
-
- /* Creates with length if DNE, otherwise uses existing file length.
- */
- void* map(const char *filename, int length);
-
- void flush(bool sync);
-
- void* viewOfs() {
- return view;
- }
-
- int length() {
- return len;
- }
-
- void updateLength( const char *filename, int &length ) const;
-
-private:
- HANDLE fd;
- HANDLE maphandle;
- void *view;
- int len;
-};
+ class MemoryMappedFile {
+ public:
+ static void closeAllFiles();
+ MemoryMappedFile();
+ ~MemoryMappedFile(); /* closes the file if open */
+ void close();
+
+ // Throws exception if file doesn't exist.
+ void* map( const char *filename );
+
+ /* Creates with length if DNE, otherwise uses existing file length.
+ */
+ void* map(const char *filename, int length);
+
+ void flush(bool sync);
+
+ void* viewOfs() {
+ return view;
+ }
+
+ int length() {
+ return len;
+ }
+
+ void updateLength( const char *filename, int &length ) const;
+
+ private:
+ HANDLE fd;
+ HANDLE maphandle;
+ void *view;
+ int len;
+ };
} // namespace mongo
diff --git a/util/sock.cpp b/util/sock.cpp
index ec3a2c16fdf..560b016bf24 100644
--- a/util/sock.cpp
+++ b/util/sock.cpp
@@ -21,170 +21,170 @@
namespace mongo {
-static boost::mutex sock_mutex;
+ static boost::mutex sock_mutex;
// .empty() if err
-string hostbyname(const char *hostname) {
- boostlock lk(sock_mutex);
- struct hostent *h;
- h = gethostbyname(hostname);
- if ( h == 0 ) return "";
- return inet_ntoa( *((struct in_addr *)(h->h_addr)) );
-}
-
-void sendtest() {
- cout << "sendtest\n";
- SockAddr me(27016);
- SockAddr dest("127.0.0.1", 27015);
- UDPConnection c;
- if ( c.init(me) ) {
- char buf[256];
- cout << "sendto: ";
- cout << c.sendto(buf, sizeof(buf), dest) << " errno:" << h_errno << endl;
+ string hostbyname(const char *hostname) {
+ boostlock lk(sock_mutex);
+ struct hostent *h;
+ h = gethostbyname(hostname);
+ if ( h == 0 ) return "";
+ return inet_ntoa( *((struct in_addr *)(h->h_addr)) );
}
- cout << "end\n";
-}
-
-void listentest() {
- cout << "listentest\n";
- SockAddr me(27015);
- SockAddr sender;
- UDPConnection c;
- if ( c.init(me) ) {
- char buf[256];
- cout << "recvfrom: ";
- cout << c.recvfrom(buf, sizeof(buf), sender) << " errno:" << h_errno << endl;
+
+ void sendtest() {
+ out() << "sendtest\n";
+ SockAddr me(27016);
+ SockAddr dest("127.0.0.1", 27015);
+ UDPConnection c;
+ if ( c.init(me) ) {
+ char buf[256];
+ out() << "sendto: ";
+ out() << c.sendto(buf, sizeof(buf), dest) << " errno:" << h_errno << endl;
+ }
+ out() << "end\n";
}
- cout << "end listentest\n";
-}
-void xmain();
-struct SockStartupTests {
- SockStartupTests() {
-#if defined(_WIN32)
- WSADATA d;
- if ( WSAStartup(MAKEWORD(2,2), &d) != 0 ) {
- cout << "ERROR: wsastartup failed " << errno << endl;
- problem() << "ERROR: wsastartup failed " << errno << endl;
- exit(1);
+ void listentest() {
+ out() << "listentest\n";
+ SockAddr me(27015);
+ SockAddr sender;
+ UDPConnection c;
+ if ( c.init(me) ) {
+ char buf[256];
+ out() << "recvfrom: ";
+ out() << c.recvfrom(buf, sizeof(buf), sender) << " errno:" << h_errno << endl;
}
-#endif
- //cout << "ntohl:" << ntohl(256) << endl;
- //sendtest();
- //listentest();
+ out() << "end listentest\n";
}
-} sstests;
+
+ void xmain();
+ struct SockStartupTests {
+ SockStartupTests() {
+#if defined(_WIN32)
+ WSADATA d;
+ if ( WSAStartup(MAKEWORD(2,2), &d) != 0 ) {
+ out() << "ERROR: wsastartup failed " << errno << endl;
+ problem() << "ERROR: wsastartup failed " << errno << endl;
+ exit(1);
+ }
+#endif
+ //out() << "ntohl:" << ntohl(256) << endl;
+ //sendtest();
+ //listentest();
+ }
+ } sstests;
#if 0
-void smain() {
-
- WSADATA wsaData;
- SOCKET RecvSocket;
- sockaddr_in RecvAddr;
- int Port = 27015;
- char RecvBuf[1024];
- int BufLen = 1024;
- sockaddr_in SenderAddr;
- int SenderAddrSize = sizeof(SenderAddr);
-
- //-----------------------------------------------
- // Initialize Winsock
- WSAStartup(MAKEWORD(2,2), &wsaData);
-
- //-----------------------------------------------
- // Create a receiver socket to receive datagrams
- RecvSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- prebindOptions( RecvSocket );
-
- //-----------------------------------------------
- // Bind the socket to any address and the specified port.
- RecvAddr.sin_family = AF_INET;
- RecvAddr.sin_port = htons(Port);
- RecvAddr.sin_addr.s_addr = htonl(INADDR_ANY);
-
- ::bind(RecvSocket, (SOCKADDR *) &RecvAddr, sizeof(RecvAddr));
-
- //-----------------------------------------------
- // Call the recvfrom function to receive datagrams
- // on the bound socket.
- printf("Receiving datagrams...\n");
- recvfrom(RecvSocket,
- RecvBuf,
- BufLen,
- 0,
- (SOCKADDR *)&SenderAddr,
- &SenderAddrSize);
-
- //-----------------------------------------------
- // Close the socket when finished receiving datagrams
- printf("Finished receiving. Closing socket.\n");
- closesocket(RecvSocket);
-
- //-----------------------------------------------
- // Clean up and exit.
- printf("Exiting.\n");
- WSACleanup();
- return;
-}
-
-
-
-
-void xmain() {
-
- WSADATA wsaData;
- SOCKET RecvSocket;
- sockaddr_in RecvAddr;
- int Port = 27015;
- char RecvBuf[1024];
- int BufLen = 1024;
- sockaddr_in SenderAddr;
- int SenderAddrSize = sizeof(SenderAddr);
-
- //-----------------------------------------------
- // Initialize Winsock
- WSAStartup(MAKEWORD(2,2), &wsaData);
-
- //-----------------------------------------------
- // Create a receiver socket to receive datagrams
-
- RecvSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- prebindOptions( RecvSocket );
-
- //-----------------------------------------------
- // Bind the socket to any address and the specified port.
- RecvAddr.sin_family = AF_INET;
- RecvAddr.sin_port = htons(Port);
- RecvAddr.sin_addr.s_addr = htonl(INADDR_ANY);
-
- SockAddr a(Port);
- ::bind(RecvSocket, (SOCKADDR *) &a.sa, a.addressSize);
+ void smain() {
+
+ WSADATA wsaData;
+ SOCKET RecvSocket;
+ sockaddr_in RecvAddr;
+ int Port = 27015;
+ char RecvBuf[1024];
+ int BufLen = 1024;
+ sockaddr_in SenderAddr;
+ int SenderAddrSize = sizeof(SenderAddr);
+
+ //-----------------------------------------------
+ // Initialize Winsock
+ WSAStartup(MAKEWORD(2,2), &wsaData);
+
+ //-----------------------------------------------
+ // Create a receiver socket to receive datagrams
+ RecvSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ prebindOptions( RecvSocket );
+
+ //-----------------------------------------------
+ // Bind the socket to any address and the specified port.
+ RecvAddr.sin_family = AF_INET;
+ RecvAddr.sin_port = htons(Port);
+ RecvAddr.sin_addr.s_addr = htonl(INADDR_ANY);
+
+ ::bind(RecvSocket, (SOCKADDR *) &RecvAddr, sizeof(RecvAddr));
+
+ //-----------------------------------------------
+ // Call the recvfrom function to receive datagrams
+ // on the bound socket.
+ printf("Receiving datagrams...\n");
+ recvfrom(RecvSocket,
+ RecvBuf,
+ BufLen,
+ 0,
+ (SOCKADDR *)&SenderAddr,
+ &SenderAddrSize);
+
+ //-----------------------------------------------
+ // Close the socket when finished receiving datagrams
+ printf("Finished receiving. Closing socket.\n");
+ closesocket(RecvSocket);
+
+ //-----------------------------------------------
+ // Clean up and exit.
+ printf("Exiting.\n");
+ WSACleanup();
+ return;
+ }
+
+
+
+
+ void xmain() {
+
+ WSADATA wsaData;
+ SOCKET RecvSocket;
+ sockaddr_in RecvAddr;
+ int Port = 27015;
+ char RecvBuf[1024];
+ int BufLen = 1024;
+ sockaddr_in SenderAddr;
+ int SenderAddrSize = sizeof(SenderAddr);
+
+ //-----------------------------------------------
+ // Initialize Winsock
+ WSAStartup(MAKEWORD(2,2), &wsaData);
+
+ //-----------------------------------------------
+ // Create a receiver socket to receive datagrams
+
+ RecvSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ prebindOptions( RecvSocket );
+
+ //-----------------------------------------------
+ // Bind the socket to any address and the specified port.
+ RecvAddr.sin_family = AF_INET;
+ RecvAddr.sin_port = htons(Port);
+ RecvAddr.sin_addr.s_addr = htonl(INADDR_ANY);
+
+ SockAddr a(Port);
+ ::bind(RecvSocket, (SOCKADDR *) &a.sa, a.addressSize);
// bind(RecvSocket, (SOCKADDR *) &RecvAddr, sizeof(RecvAddr));
- SockAddr b;
-
- //-----------------------------------------------
- // Call the recvfrom function to receive datagrams
- // on the bound socket.
- printf("Receiving datagrams...\n");
- recvfrom(RecvSocket,
- RecvBuf,
- BufLen,
- 0,
- (SOCKADDR *) &b.sa, &b.addressSize);
+ SockAddr b;
+
+ //-----------------------------------------------
+ // Call the recvfrom function to receive datagrams
+ // on the bound socket.
+ printf("Receiving datagrams...\n");
+ recvfrom(RecvSocket,
+ RecvBuf,
+ BufLen,
+ 0,
+ (SOCKADDR *) &b.sa, &b.addressSize);
// (SOCKADDR *)&SenderAddr,
// &SenderAddrSize);
- //-----------------------------------------------
- // Close the socket when finished receiving datagrams
- printf("Finished receiving. Closing socket.\n");
- closesocket(RecvSocket);
-
- //-----------------------------------------------
- // Clean up and exit.
- printf("Exiting.\n");
- WSACleanup();
- return;
-}
+ //-----------------------------------------------
+ // Close the socket when finished receiving datagrams
+ printf("Finished receiving. Closing socket.\n");
+ closesocket(RecvSocket);
+
+ //-----------------------------------------------
+ // Clean up and exit.
+ printf("Exiting.\n");
+ WSACleanup();
+ return;
+ }
#endif
diff --git a/util/sock.h b/util/sock.h
index d75095e28f0..9450070593d 100644
--- a/util/sock.h
+++ b/util/sock.h
@@ -27,17 +27,17 @@ namespace mongo {
#if defined(_WIN32)
//#include <winsock2.h>
//#include <ws2tcpip.h>
-typedef int socklen_t;
-inline int getLastError() {
- return WSAGetLastError();
-}
-inline void disableNagle(int sock) {
- int x = 1;
- if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
- cout << "ERROR: disableNagle failed" << endl;
-}
-inline void prebindOptions( int sock ) {
-}
+ typedef int socklen_t;
+ inline int getLastError() {
+ return WSAGetLastError();
+ }
+ inline void disableNagle(int sock) {
+ int x = 1;
+ if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ out() << "ERROR: disableNagle failed" << endl;
+ }
+ inline void prebindOptions( int sock ) {
+ }
#else
} // namespace mongo
@@ -53,181 +53,181 @@ inline void prebindOptions( int sock ) {
namespace mongo {
-inline void closesocket(int s) {
- close(s);
-}
-const int INVALID_SOCKET = -1;
-typedef int SOCKET;
+ inline void closesocket(int s) {
+ close(s);
+ }
+ const int INVALID_SOCKET = -1;
+ typedef int SOCKET;
//#define h_errno errno
-inline int getLastError() {
- return errno;
-}
-inline void disableNagle(int sock) {
- int x = 1;
+ inline int getLastError() {
+ return errno;
+ }
+ inline void disableNagle(int sock) {
+ int x = 1;
#ifdef SOL_TCP
- int level = SOL_TCP;
+ int level = SOL_TCP;
#else
- int level = SOL_SOCKET;
+ int level = SOL_SOCKET;
#endif
- if ( setsockopt(sock, level, TCP_NODELAY, (char *) &x, sizeof(x)) )
- log() << "ERROR: disableNagle failed" << endl;
+ if ( setsockopt(sock, level, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ log() << "ERROR: disableNagle failed" << endl;
-}
-inline void prebindOptions( int sock ) {
- DEV log() << "doing prebind option" << endl;
- int x = 1;
- if ( setsockopt( sock , SOL_SOCKET, SO_REUSEADDR, &x, sizeof(x)) < 0 )
- cout << "Failed to set socket opt, SO_REUSEADDR" << endl;
-}
+ }
+ inline void prebindOptions( int sock ) {
+ DEV log() << "doing prebind option" << endl;
+ int x = 1;
+ if ( setsockopt( sock , SOL_SOCKET, SO_REUSEADDR, &x, sizeof(x)) < 0 )
+ out() << "Failed to set socket opt, SO_REUSEADDR" << endl;
+ }
#endif
-inline void setSockReceiveTimeout(int sock, int secs) {
+ inline void setSockReceiveTimeout(int sock, int secs) {
// todo - finish - works?
- struct timeval tv;
- tv.tv_sec = 0;//secs;
- tv.tv_usec = 1000;
- int rc = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv));
- if ( rc ) {
- cout << "ERROR: setsockopt RCVTIMEO failed rc:" << rc << " errno:" << getLastError() << " secs:" << secs << " sock:" << sock << endl;
+ struct timeval tv;
+ tv.tv_sec = 0;//secs;
+ tv.tv_usec = 1000;
+ int rc = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv));
+ if ( rc ) {
+ out() << "ERROR: setsockopt RCVTIMEO failed rc:" << rc << " errno:" << getLastError() << " secs:" << secs << " sock:" << sock << endl;
+ }
}
-}
// .empty() if err
-string hostbyname(const char *hostname);
+ string hostbyname(const char *hostname);
-struct SockAddr {
- SockAddr() {
- addressSize = sizeof(sockaddr_in);
- memset(&sa, 0, sizeof(sa));
- }
- SockAddr(int sourcePort); /* listener side */
- SockAddr(const char *ip, int port); /* EndPoint (remote) side, or if you want to specify which interface locally */
+ struct SockAddr {
+ SockAddr() {
+ addressSize = sizeof(sockaddr_in);
+ memset(&sa, 0, sizeof(sa));
+ }
+ SockAddr(int sourcePort); /* listener side */
+ SockAddr(const char *ip, int port); /* EndPoint (remote) side, or if you want to specify which interface locally */
- struct sockaddr_in sa;
- socklen_t addressSize;
+ struct sockaddr_in sa;
+ socklen_t addressSize;
- bool isLocalHost() const {
+ bool isLocalHost() const {
#if defined(_WIN32)
- return sa.sin_addr.S_un.S_addr == 0x100007f;
+ return sa.sin_addr.S_un.S_addr == 0x100007f;
#else
- return sa.sin_addr.s_addr == 0x100007f;
+ return sa.sin_addr.s_addr == 0x100007f;
#endif
- }
+ }
- string toString() {
- stringstream out;
- out << inet_ntoa(sa.sin_addr) << ':'
- << sa.sin_port;
- return out.str();
- }
+ string toString() {
+ stringstream out;
+ out << inet_ntoa(sa.sin_addr) << ':'
+ << sa.sin_port;
+ return out.str();
+ }
- unsigned getPort() {
- return sa.sin_port;
- }
+ unsigned getPort() {
+ return sa.sin_port;
+ }
- bool operator==(const SockAddr& r) const {
- return sa.sin_addr.s_addr == r.sa.sin_addr.s_addr &&
- sa.sin_port == r.sa.sin_port;
- }
- bool operator!=(const SockAddr& r) const {
- return !(*this == r);
- }
- bool operator<(const SockAddr& r) const {
- if ( sa.sin_port >= r.sa.sin_port )
- return false;
- return sa.sin_addr.s_addr < r.sa.sin_addr.s_addr;
- }
-};
+ bool operator==(const SockAddr& r) const {
+ return sa.sin_addr.s_addr == r.sa.sin_addr.s_addr &&
+ sa.sin_port == r.sa.sin_port;
+ }
+ bool operator!=(const SockAddr& r) const {
+ return !(*this == r);
+ }
+ bool operator<(const SockAddr& r) const {
+ if ( sa.sin_port >= r.sa.sin_port )
+ return false;
+ return sa.sin_addr.s_addr < r.sa.sin_addr.s_addr;
+ }
+ };
-const int MaxMTU = 16384;
+ const int MaxMTU = 16384;
-class UDPConnection {
-public:
- UDPConnection() {
- sock = 0;
- }
- ~UDPConnection() {
- if ( sock ) {
- closesocket(sock);
+ class UDPConnection {
+ public:
+ UDPConnection() {
sock = 0;
}
- }
- bool init(const SockAddr& myAddr);
- int recvfrom(char *buf, int len, SockAddr& sender);
- int sendto(char *buf, int len, const SockAddr& EndPoint);
- int mtu(const SockAddr& sa) {
- return sa.isLocalHost() ? 16384 : 1480;
- }
+ ~UDPConnection() {
+ if ( sock ) {
+ closesocket(sock);
+ sock = 0;
+ }
+ }
+ bool init(const SockAddr& myAddr);
+ int recvfrom(char *buf, int len, SockAddr& sender);
+ int sendto(char *buf, int len, const SockAddr& EndPoint);
+ int mtu(const SockAddr& sa) {
+ return sa.isLocalHost() ? 16384 : 1480;
+ }
- SOCKET sock;
-};
+ SOCKET sock;
+ };
-inline int UDPConnection::recvfrom(char *buf, int len, SockAddr& sender) {
- return ::recvfrom(sock, buf, len, 0, (sockaddr *) &sender.sa, &sender.addressSize);
-}
+ inline int UDPConnection::recvfrom(char *buf, int len, SockAddr& sender) {
+ return ::recvfrom(sock, buf, len, 0, (sockaddr *) &sender.sa, &sender.addressSize);
+ }
-inline int UDPConnection::sendto(char *buf, int len, const SockAddr& EndPoint) {
- if ( 0 && rand() < (RAND_MAX>>4) ) {
- cout << " NOTSENT ";
- // cout << curTimeMillis() << " .TEST: NOT SENDING PACKET" << endl;
- return 0;
+ inline int UDPConnection::sendto(char *buf, int len, const SockAddr& EndPoint) {
+ if ( 0 && rand() < (RAND_MAX>>4) ) {
+ out() << " NOTSENT ";
+ // out() << curTimeMillis() << " .TEST: NOT SENDING PACKET" << endl;
+ return 0;
+ }
+ return ::sendto(sock, buf, len, 0, (sockaddr *) &EndPoint.sa, EndPoint.addressSize);
}
- return ::sendto(sock, buf, len, 0, (sockaddr *) &EndPoint.sa, EndPoint.addressSize);
-}
-
-inline bool UDPConnection::init(const SockAddr& myAddr) {
- sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if ( sock == INVALID_SOCKET ) {
- cout << "invalid socket? " << errno << endl;
- return false;
+
+ inline bool UDPConnection::init(const SockAddr& myAddr) {
+ sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if ( sock == INVALID_SOCKET ) {
+ out() << "invalid socket? " << errno << endl;
+ return false;
+ }
+ //out() << sizeof(sockaddr_in) << ' ' << myAddr.addressSize << endl;
+ if ( ::bind(sock, (sockaddr *) &myAddr.sa, myAddr.addressSize) != 0 ) {
+ out() << "udp init failed" << endl;
+ closesocket(sock);
+ sock = 0;
+ return false;
+ }
+ socklen_t optLen;
+ int rcvbuf;
+ if (getsockopt(sock,
+ SOL_SOCKET,
+ SO_RCVBUF,
+ (char*)&rcvbuf,
+ &optLen) != -1)
+ out() << "SO_RCVBUF:" << rcvbuf << endl;
+ return true;
}
- //cout << sizeof(sockaddr_in) << ' ' << myAddr.addressSize << endl;
- if ( ::bind(sock, (sockaddr *) &myAddr.sa, myAddr.addressSize) != 0 ) {
- cout << "udp init failed" << endl;
- closesocket(sock);
- sock = 0;
- return false;
+
+ inline SockAddr::SockAddr(int sourcePort) {
+ memset(sa.sin_zero, 0, sizeof(sa.sin_zero));
+ sa.sin_family = AF_INET;
+ sa.sin_port = htons(sourcePort);
+ sa.sin_addr.s_addr = htonl(INADDR_ANY);
+ addressSize = sizeof(sa);
+ }
+
+ inline SockAddr::SockAddr(const char *ip, int port) {
+ memset(sa.sin_zero, 0, sizeof(sa.sin_zero));
+ sa.sin_family = AF_INET;
+ sa.sin_port = htons(port);
+ sa.sin_addr.s_addr = inet_addr(ip);
+ addressSize = sizeof(sa);
}
- socklen_t optLen;
- int rcvbuf;
- if (getsockopt(sock,
- SOL_SOCKET,
- SO_RCVBUF,
- (char*)&rcvbuf,
- &optLen) != -1)
- cout << "SO_RCVBUF:" << rcvbuf << endl;
- return true;
-}
-
-inline SockAddr::SockAddr(int sourcePort) {
- memset(sa.sin_zero, 0, sizeof(sa.sin_zero));
- sa.sin_family = AF_INET;
- sa.sin_port = htons(sourcePort);
- sa.sin_addr.s_addr = htonl(INADDR_ANY);
- addressSize = sizeof(sa);
-}
-
-inline SockAddr::SockAddr(const char *ip, int port) {
- memset(sa.sin_zero, 0, sizeof(sa.sin_zero));
- sa.sin_family = AF_INET;
- sa.sin_port = htons(port);
- sa.sin_addr.s_addr = inet_addr(ip);
- addressSize = sizeof(sa);
-}
-
-inline string getHostName() {
- char buf[256];
- int ec = gethostname(buf, 127);
- if ( ec || *buf == 0 ) {
- log() << "can't get this server's hostname errno:" << ec << endl;
- return "";
+
+ inline string getHostName() {
+ char buf[256];
+ int ec = gethostname(buf, 127);
+ if ( ec || *buf == 0 ) {
+ log() << "can't get this server's hostname errno:" << ec << endl;
+ return "";
+ }
+ return buf;
}
- return buf;
-}
diff --git a/util/unittest.h b/util/unittest.h
index 0bf2a46100a..080291bfe11 100644
--- a/util/unittest.h
+++ b/util/unittest.h
@@ -20,35 +20,35 @@
namespace mongo {
-/* The idea here is to let all initialization of global variables (classes inheriting from UnitTest)
- complete before we run the tests -- otherwise order of initilization being arbitrary may mess
- us up. The app's main() function should call runTests().
-
- To define a unit test, inherit from this and implement run. instantiate one object for the new class
- as a global.
-*/
-struct UnitTest {
- UnitTest() {
- registerTest(this);
- }
+ /* The idea here is to let all initialization of global variables (classes inheriting from UnitTest)
+ complete before we run the tests -- otherwise order of initilization being arbitrary may mess
+ us up. The app's main() function should call runTests().
+
+ To define a unit test, inherit from this and implement run. instantiate one object for the new class
+ as a global.
+ */
+ struct UnitTest {
+ UnitTest() {
+ registerTest(this);
+ }
- // assert if fails
- virtual void run() = 0;
+ // assert if fails
+ virtual void run() = 0;
- static vector<UnitTest*> *tests;
+ static vector<UnitTest*> *tests;
- static void registerTest(UnitTest *t) {
- if ( tests == 0 )
- tests = new vector<UnitTest*>();
- tests->push_back(t);
- }
+ static void registerTest(UnitTest *t) {
+ if ( tests == 0 )
+ tests = new vector<UnitTest*>();
+ tests->push_back(t);
+ }
- static void runTests() {
- for ( vector<UnitTest*>::iterator i = tests->begin(); i != tests->end(); i++ ) {
- (*i)->run();
+ static void runTests() {
+ for ( vector<UnitTest*>::iterator i = tests->begin(); i != tests->end(); i++ ) {
+ (*i)->run();
+ }
}
- }
-};
+ };
} // namespace mongo
diff --git a/util/util.cpp b/util/util.cpp
index 4736ff86865..c161782ed1b 100644
--- a/util/util.cpp
+++ b/util/util.cpp
@@ -22,63 +22,63 @@
namespace mongo {
-vector<UnitTest*> *UnitTest::tests = 0;
+ vector<UnitTest*> *UnitTest::tests = 0;
-Nullstream nullstream;
-Logstream logstream;
-boost::mutex Logstream::mutex;
+ Nullstream nullstream;
+ Logstream logstream;
+ boost::mutex Logstream::mutex;
-unsigned occasion = 0;
-unsigned once = 0;
-bool goingAway = false;
+ unsigned occasion = 0;
+ unsigned once = 0;
+ bool goingAway = false;
-bool isPrime(int n) {
- int z = 2;
- while ( 1 ) {
- if ( z*z > n )
- break;
- if ( n % z == 0 )
- return false;
- z++;
+ bool isPrime(int n) {
+ int z = 2;
+ while ( 1 ) {
+ if ( z*z > n )
+ break;
+ if ( n % z == 0 )
+ return false;
+ z++;
+ }
+ return true;
}
- return true;
-}
-int nextPrime(int n) {
- n |= 1; // 2 goes to 3...don't care...
- while ( !isPrime(n) )
- n += 2;
- return n;
-}
+ int nextPrime(int n) {
+ n |= 1; // 2 goes to 3...don't care...
+ while ( !isPrime(n) )
+ n += 2;
+ return n;
+ }
-struct UtilTest : public UnitTest {
- void run() {
- assert( WrappingInt(0) <= WrappingInt(0) );
- assert( WrappingInt(0) <= WrappingInt(1) );
- assert( !(WrappingInt(1) <= WrappingInt(0)) );
- assert( (WrappingInt(0xf0000000) <= WrappingInt(0)) );
- assert( (WrappingInt(0xf0000000) <= WrappingInt(9000)) );
- assert( !(WrappingInt(300) <= WrappingInt(0xe0000000)) );
+ struct UtilTest : public UnitTest {
+ void run() {
+ assert( WrappingInt(0) <= WrappingInt(0) );
+ assert( WrappingInt(0) <= WrappingInt(1) );
+ assert( !(WrappingInt(1) <= WrappingInt(0)) );
+ assert( (WrappingInt(0xf0000000) <= WrappingInt(0)) );
+ assert( (WrappingInt(0xf0000000) <= WrappingInt(9000)) );
+ assert( !(WrappingInt(300) <= WrappingInt(0xe0000000)) );
- assert( tdiff(3, 4) == 1 );
- assert( tdiff(4, 3) == -1 );
- assert( tdiff(0xffffffff, 0) == 1 );
+ assert( tdiff(3, 4) == 1 );
+ assert( tdiff(4, 3) == -1 );
+ assert( tdiff(0xffffffff, 0) == 1 );
- assert( isPrime(3) );
- assert( isPrime(2) );
- assert( isPrime(13) );
- assert( isPrime(17) );
- assert( !isPrime(9) );
- assert( !isPrime(6) );
- assert( nextPrime(4) == 5 );
- assert( nextPrime(8) == 11 );
+ assert( isPrime(3) );
+ assert( isPrime(2) );
+ assert( isPrime(13) );
+ assert( isPrime(17) );
+ assert( !isPrime(9) );
+ assert( !isPrime(6) );
+ assert( nextPrime(4) == 5 );
+ assert( nextPrime(8) == 11 );
- assert( endsWith("abcde", "de") );
- assert( !endsWith("abcde", "dasdfasdfashkfde") );
+ assert( endsWith("abcde", "de") );
+ assert( !endsWith("abcde", "dasdfasdfashkfde") );
- assert( swapEndian(0x01020304) == 0x04030201 );
+ assert( swapEndian(0x01020304) == 0x04030201 );
- }
-} utilTest;
+ }
+ } utilTest;
} // namespace mongo