summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bridge.cpp23
-rw-r--r--tools/bsondump.cpp50
-rw-r--r--tools/dump.cpp44
-rw-r--r--tools/export.cpp43
-rw-r--r--tools/files.cpp50
-rw-r--r--tools/import.cpp176
-rw-r--r--tools/restore.cpp60
-rw-r--r--tools/sniffer.cpp194
-rw-r--r--tools/stat.cpp336
-rw-r--r--tools/tool.cpp167
-rw-r--r--tools/tool.h48
11 files changed, 611 insertions, 580 deletions
diff --git a/tools/bridge.cpp b/tools/bridge.cpp
index 26c0322bbcb..86dea0a83b8 100644
--- a/tools/bridge.cpp
+++ b/tools/bridge.cpp
@@ -45,7 +45,7 @@ public:
mp_.shutdown();
break;
}
-
+
int oldId = m.header()->id;
if ( m.operation() == dbQuery || m.operation() == dbMsg || m.operation() == dbGetMore ) {
bool exhaust = false;
@@ -63,15 +63,18 @@ public:
if ( qr->cursorId ) {
response.reset();
dest.port().recv( response );
- mp_.reply( m, response ); // m argument is ignored anyway
- } else {
+ mp_.reply( m, response ); // m argument is ignored anyway
+ }
+ else {
exhaust = false;
}
}
- } else {
+ }
+ else {
dest.port().say( m, oldId );
}
- } catch ( ... ) {
+ }
+ catch ( ... ) {
log() << "caught exception in Forwarder, continuing" << endl;
}
}
@@ -94,7 +97,7 @@ public:
auto_ptr< MyListener > listener;
-#if !defined(_WIN32)
+#if !defined(_WIN32)
void cleanup( int sig ) {
ListeningSockets::get()->closeAll();
for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
@@ -136,7 +139,7 @@ void check( bool b ) {
int main( int argc, char **argv ) {
static StaticObserver staticObserver;
-
+
setupSignals();
check( argc == 5 );
@@ -145,9 +148,11 @@ int main( int argc, char **argv ) {
check( i % 2 != 0 );
if ( strcmp( argv[ i ], "--port" ) == 0 ) {
port = strtol( argv[ ++i ], 0, 10 );
- } else if ( strcmp( argv[ i ], "--dest" ) == 0 ) {
+ }
+ else if ( strcmp( argv[ i ], "--dest" ) == 0 ) {
destUri = argv[ ++i ];
- } else {
+ }
+ else {
check( false );
}
}
diff --git a/tools/bsondump.cpp b/tools/bsondump.cpp
index 131335e3efd..506a4e7d3b9 100644
--- a/tools/bsondump.cpp
+++ b/tools/bsondump.cpp
@@ -35,23 +35,23 @@ class BSONDump : public BSONTool {
enum OutputType { JSON , DEBUG } _type;
public:
-
- BSONDump() : BSONTool( "bsondump", NONE ){
+
+ BSONDump() : BSONTool( "bsondump", NONE ) {
add_options()
- ("type" , po::value<string>()->default_value("json") , "type of output: json,debug" )
- ;
+ ("type" , po::value<string>()->default_value("json") , "type of output: json,debug" )
+ ;
add_hidden_options()
- ("file" , po::value<string>() , ".bson file" )
- ;
+ ("file" , po::value<string>() , ".bson file" )
+ ;
addPositionArg( "file" , 1 );
_noconnection = true;
}
-
+
virtual void printExtraHelp(ostream& out) {
out << "usage: " << _name << " [options] <bson filename>" << endl;
}
-
- virtual int doRun(){
+
+ virtual int doRun() {
{
string t = getParam( "type" );
if ( t == "json" )
@@ -63,64 +63,64 @@ public:
return 1;
}
}
-
+
path root = getParam( "file" );
if ( root == "" ) {
printExtraHelp(cout);
return 1;
}
-
+
processFile( root );
return 0;
}
-
- bool debug( const BSONObj& o , int depth=0){
+
+ bool debug( const BSONObj& o , int depth=0) {
string prefix = "";
- for ( int i=0; i<depth; i++ ){
+ for ( int i=0; i<depth; i++ ) {
prefix += "\t\t\t";
}
-
+
int read = 4;
try {
cout << prefix << "--- new object ---\n";
cout << prefix << "\t size : " << o.objsize() << "\n";
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
cout << prefix << "\t\t " << e.fieldName() << "\n" << prefix << "\t\t\t type:" << setw(3) << e.type() << " size: " << e.size() << endl;
- if ( ( read + e.size() ) > o.objsize() ){
+ if ( ( read + e.size() ) > o.objsize() ) {
cout << prefix << " SIZE DOES NOT WORK" << endl;
return false;
}
read += e.size();
try {
e.validate();
- if ( e.isABSONObj() ){
+ if ( e.isABSONObj() ) {
if ( ! debug( e.Obj() , depth + 1 ) )
return false;
}
- else if ( e.type() == String && ! isValidUTF8( e.valuestr() ) ){
+ else if ( e.type() == String && ! isValidUTF8( e.valuestr() ) ) {
cout << prefix << "\t\t\t" << "bad utf8 String!" << endl;
}
- else if ( logLevel > 0 ){
+ else if ( logLevel > 0 ) {
cout << prefix << "\t\t\t" << e << endl;
}
-
+
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << prefix << "\t\t\t bad value: " << e.what() << endl;
}
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << prefix << "\t" << e.what() << endl;
}
return true;
}
- virtual void gotObject( const BSONObj& o ){
- switch ( _type ){
+ virtual void gotObject( const BSONObj& o ) {
+ switch ( _type ) {
case JSON:
cout << o.jsonString( TenGen ) << endl;
break;
diff --git a/tools/dump.cpp b/tools/dump.cpp
index 2c820e44379..4038f099be7 100644
--- a/tools/dump.cpp
+++ b/tools/dump.cpp
@@ -28,16 +28,16 @@ namespace po = boost::program_options;
class Dump : public Tool {
public:
- Dump() : Tool( "dump" , ALL , "*" , "*" , false ){
+ Dump() : Tool( "dump" , ALL , "*" , "*" , false ) {
add_options()
- ("out,o", po::value<string>()->default_value("dump"), "output directory or \"-\" for stdout")
- ("query,q", po::value<string>() , "json query" )
- ("oplog", "Use oplog for point-in-time snapshotting" )
- ;
+ ("out,o", po::value<string>()->default_value("dump"), "output directory or \"-\" for stdout")
+ ("query,q", po::value<string>() , "json query" )
+ ("oplog", "Use oplog for point-in-time snapshotting" )
+ ;
}
// This is a functor that writes a BSONObj to a file
- struct Writer{
+ struct Writer {
Writer(ostream& out, ProgressMeter* m) :_out(out), _m(m) {}
void operator () (const BSONObj& obj) {
@@ -68,11 +68,12 @@ public:
Writer writer(out, m);
// use low-latency "exhaust" mode if going over the network
- if (typeid(connBase) == typeid(DBClientConnection&)){
+ if (typeid(connBase) == typeid(DBClientConnection&)) {
DBClientConnection& conn = static_cast<DBClientConnection&>(connBase);
boost::function<void(const BSONObj&)> castedWriter(writer); // needed for overload resolution
conn.query( castedWriter, coll.c_str() , q , NULL, queryOptions | QueryOption_Exhaust);
- } else {
+ }
+ else {
//This branch should only be taken with DBDirectClient which doesn't support exhaust mode
scoped_ptr<DBClientCursor> cursor(connBase.query( coll.c_str() , q , 0 , 0 , 0 , queryOptions ));
while ( cursor->more() ) {
@@ -83,7 +84,7 @@ public:
void writeCollectionFile( const string coll , path outputFile ) {
cout << "\t" << coll << " to " << outputFile.string() << endl;
-
+
ofstream out;
out.open( outputFile.string().c_str() , ios_base::out | ios_base::binary );
assertStreamGood( 10262 , "couldn't open file" , out );
@@ -107,7 +108,7 @@ public:
create_directories( outdir );
string sns = db + ".system.namespaces";
-
+
auto_ptr<DBClientCursor> cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
while ( cursor->more() ) {
BSONObj obj = cursor->next();
@@ -125,9 +126,9 @@ public:
}
}
-
- int run(){
-
+
+ int run() {
+
{
string q = getParam("query");
if ( q.size() )
@@ -137,20 +138,21 @@ public:
string opLogName = "";
unsigned long long opLogStart = 0;
if (hasParam("oplog")) {
- if (hasParam("query") || hasParam("db") || hasParam("collection")){
+ if (hasParam("query") || hasParam("db") || hasParam("collection")) {
cout << "oplog mode is only supported on full dumps" << endl;
return -1;
}
-
+
BSONObj isMaster;
conn("true").simpleCommand("admin", &isMaster, "isMaster");
if (isMaster.hasField("hosts")) { // if connected to replica set member
opLogName = "local.oplog.rs";
- } else {
+ }
+ else {
opLogName = "local.oplog.$main";
- if ( ! isMaster["ismaster"].trueValue() ){
+ if ( ! isMaster["ismaster"].trueValue() ) {
cout << "oplog mode is only supported on master or replica set member" << endl;
return -1;
}
@@ -161,12 +163,12 @@ public:
cout << "No operations in oplog. Please ensure you are connecting to a master." << endl;
return -1;
}
-
+
assert(op["ts"].type() == Timestamp);
opLogStart = op["ts"]._numberLong();
}
-
+
// check if we're outputting to stdout
string out = getParam("out");
@@ -184,7 +186,7 @@ public:
path root( out );
string db = _db;
- if ( db == "*" ){
+ if ( db == "*" ) {
cout << "all dbs" << endl;
auth( "admin" );
@@ -209,7 +211,7 @@ public:
go( db , root / db );
}
- if (!opLogName.empty()){
+ if (!opLogName.empty()) {
BSONObjBuilder b;
b.appendTimestamp("$gt", opLogStart);
diff --git a/tools/export.cpp b/tools/export.cpp
index f6fd3bed4fd..b1b0f5f1ac9 100644
--- a/tools/export.cpp
+++ b/tools/export.cpp
@@ -33,34 +33,34 @@ namespace po = boost::program_options;
class Export : public Tool {
public:
- Export() : Tool( "export" ){
+ Export() : Tool( "export" ) {
addFieldOptions();
add_options()
- ("query,q" , po::value<string>() , "query filter, as a JSON string" )
- ("csv","export to csv instead of json")
- ("out,o", po::value<string>(), "output file; if not specified, stdout is used")
- ("jsonArray", "output to a json array rather than one object per line")
- ;
+ ("query,q" , po::value<string>() , "query filter, as a JSON string" )
+ ("csv","export to csv instead of json")
+ ("out,o", po::value<string>(), "output file; if not specified, stdout is used")
+ ("jsonArray", "output to a json array rather than one object per line")
+ ;
_usesstdout = false;
}
-
- int run(){
+
+ int run() {
string ns;
const bool csv = hasParam( "csv" );
const bool jsonArray = hasParam( "jsonArray" );
ostream *outPtr = &cout;
string outfile = getParam( "out" );
auto_ptr<ofstream> fileStream;
- if ( hasParam( "out" ) ){
+ if ( hasParam( "out" ) ) {
size_t idx = outfile.rfind( "/" );
- if ( idx != string::npos ){
+ if ( idx != string::npos ) {
string dir = outfile.substr( 0 , idx + 1 );
create_directories( dir );
}
ofstream * s = new ofstream( outfile.c_str() , ios_base::out );
fileStream.reset( s );
outPtr = s;
- if ( ! s->good() ){
+ if ( ! s->good() ) {
cerr << "couldn't open [" << outfile << "]" << endl;
return -1;
}
@@ -72,20 +72,21 @@ public:
try {
ns = getNS();
- } catch (...) {
+ }
+ catch (...) {
printHelp(cerr);
return 1;
}
auth();
- if ( hasParam( "fields" ) || csv ){
+ if ( hasParam( "fields" ) || csv ) {
needFields();
fieldsToReturn = &_fieldsObj;
}
- if ( csv && _fields.size() == 0 ){
+ if ( csv && _fields.size() == 0 ) {
cerr << "csv mode requires a field list" << endl;
return -1;
}
@@ -96,15 +97,15 @@ public:
auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
- if ( csv ){
- for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
if ( i != _fields.begin() )
out << ",";
out << *i;
}
out << endl;
}
-
+
if (jsonArray)
out << '[';
@@ -112,12 +113,12 @@ public:
while ( cursor->more() ) {
num++;
BSONObj obj = cursor->next();
- if ( csv ){
- for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
if ( i != _fields.begin() )
out << ",";
const BSONElement & e = obj.getFieldDotted(i->c_str());
- if ( ! e.eoo() ){
+ if ( ! e.eoo() ) {
out << e.jsonString( Strict , false );
}
}
@@ -136,7 +137,7 @@ public:
if (jsonArray)
out << ']' << endl;
-
+
cerr << "exported " << num << " records" << endl;
return 0;
diff --git a/tools/files.cpp b/tools/files.cpp
index 0d43e08c924..b44e7fe3e62 100644
--- a/tools/files.cpp
+++ b/tools/files.cpp
@@ -33,21 +33,21 @@ namespace po = boost::program_options;
class Files : public Tool {
public:
- Files() : Tool( "files" ){
+ Files() : Tool( "files" ) {
add_options()
- ( "local,l", po::value<string>(), "local filename for put|get (default is to use the same name as 'gridfs filename')")
- ( "type,t", po::value<string>(), "MIME type for put (default is to omit)")
- ( "replace,r", "Remove other files with same name after PUT")
- ;
+ ( "local,l", po::value<string>(), "local filename for put|get (default is to use the same name as 'gridfs filename')")
+ ( "type,t", po::value<string>(), "MIME type for put (default is to omit)")
+ ( "replace,r", "Remove other files with same name after PUT")
+ ;
add_hidden_options()
- ( "command" , po::value<string>() , "command (list|search|put|get)" )
- ( "file" , po::value<string>() , "filename for get|put" )
- ;
+ ( "command" , po::value<string>() , "command (list|search|put|get)" )
+ ( "file" , po::value<string>() , "filename for get|put" )
+ ;
addPositionArg( "command" , 1 );
addPositionArg( "file" , 2 );
}
- virtual void printExtraHelp( ostream & out ){
+ virtual void printExtraHelp( ostream & out ) {
out << "usage: " << _name << " [options] command [gridfs filename]" << endl;
out << "command:" << endl;
out << " one of (list|search|put|get)" << endl;
@@ -60,20 +60,20 @@ public:
out << " delete - delete all files with filename 'gridfs filename'" << endl;
}
- void display( GridFS * grid , BSONObj obj ){
+ void display( GridFS * grid , BSONObj obj ) {
auto_ptr<DBClientCursor> c = grid->list( obj );
- while ( c->more() ){
+ while ( c->more() ) {
BSONObj obj = c->next();
cout
- << obj["filename"].str() << "\t"
- << (long)obj["length"].number()
- << endl;
+ << obj["filename"].str() << "\t"
+ << (long)obj["length"].number()
+ << endl;
}
}
- int run(){
+ int run() {
string cmd = getParam( "command" );
- if ( cmd.size() == 0 ){
+ if ( cmd.size() == 0 ) {
cerr << "ERROR: need command" << endl << endl;
printHelp(cout);
return -1;
@@ -84,7 +84,7 @@ public:
string filename = getParam( "file" );
- if ( cmd == "list" ){
+ if ( cmd == "list" ) {
BSONObjBuilder b;
if ( filename.size() )
b.appendRegex( "filename" , ( (string)"^" + filename ) );
@@ -92,22 +92,22 @@ public:
return 0;
}
- if ( filename.size() == 0 ){
+ if ( filename.size() == 0 ) {
cerr << "ERROR: need a filename" << endl << endl;
printHelp(cout);
return -1;
}
- if ( cmd == "search" ){
+ if ( cmd == "search" ) {
BSONObjBuilder b;
b.appendRegex( "filename" , filename );
display( &g , b.obj() );
return 0;
}
- if ( cmd == "get" ){
+ if ( cmd == "get" ) {
GridFile f = g.findFile( filename );
- if ( ! f.exists() ){
+ if ( ! f.exists() ) {
cerr << "ERROR: file not found" << endl;
return -2;
}
@@ -121,16 +121,16 @@ public:
return 0;
}
- if ( cmd == "put" ){
+ if ( cmd == "put" ) {
const string& infile = getParam("local", filename);
const string& type = getParam("type", "");
BSONObj file = g.storeFile(infile, filename, type);
cout << "added file: " << file << endl;
- if (hasParam("replace")){
+ if (hasParam("replace")) {
auto_ptr<DBClientCursor> cursor = conn().query(_db+".fs.files", BSON("filename" << filename << "_id" << NE << file["_id"] ));
- while (cursor->more()){
+ while (cursor->more()) {
BSONObj o = cursor->nextSafe();
conn().remove(_db+".fs.files", BSON("_id" << o["_id"]));
conn().remove(_db+".fs.chunks", BSON("_id" << o["_id"]));
@@ -144,7 +144,7 @@ public:
return 0;
}
- if ( cmd == "delete" ){
+ if ( cmd == "delete" ) {
g.removeFile(filename);
conn().getLastError();
cout << "done!" << endl;
diff --git a/tools/import.cpp b/tools/import.cpp
index 0d7da9b8101..a3d9166ccf7 100644
--- a/tools/import.cpp
+++ b/tools/import.cpp
@@ -33,7 +33,7 @@ using namespace mongo;
namespace po = boost::program_options;
class Import : public Tool {
-
+
enum Type { JSON , CSV , TSV };
Type _type;
@@ -44,36 +44,36 @@ class Import : public Tool {
bool _doimport;
bool _jsonArray;
vector<string> _upsertFields;
-
- void _append( BSONObjBuilder& b , const string& fieldName , const string& data ){
+
+ void _append( BSONObjBuilder& b , const string& fieldName , const string& data ) {
if ( b.appendAsNumber( fieldName , data ) )
return;
-
+
if ( _ignoreBlanks && data.size() == 0 )
return;
// TODO: other types?
b.append( fieldName , data );
}
-
- BSONObj parseLine( char * line ){
+
+ BSONObj parseLine( char * line ) {
uassert(13289, "Invalid UTF8 character detected", isValidUTF8(line));
- if ( _type == JSON ){
+ if ( _type == JSON ) {
char * end = ( line + strlen( line ) ) - 1;
- while ( isspace(*end) ){
+ while ( isspace(*end) ) {
*end = 0;
end--;
}
return fromjson( line );
}
-
+
BSONObjBuilder b;
unsigned int pos=0;
- while ( line[0] ){
+ while ( line[0] ) {
string name;
- if ( pos < _fields.size() ){
+ if ( pos < _fields.size() ) {
name = _fields[pos];
}
else {
@@ -82,76 +82,81 @@ class Import : public Tool {
name = ss.str();
}
pos++;
-
+
bool done = false;
string data;
char * end;
- if ( _type == CSV && line[0] == '"' ){
+ if ( _type == CSV && line[0] == '"' ) {
line++; //skip first '"'
while (true) {
end = strchr( line , '"' );
- if (!end){
+ if (!end) {
data += line;
done = true;
break;
- } else if (end[1] == '"') {
+ }
+ else if (end[1] == '"') {
// two '"'s get appended as one
data.append(line, end-line+1); //include '"'
line = end+2; //skip both '"'s
- } else if (end[-1] == '\\') {
+ }
+ else if (end[-1] == '\\') {
// "\\\"" gets appended as '"'
data.append(line, end-line-1); //exclude '\\'
data.append("\"");
line = end+1; //skip the '"'
- } else {
+ }
+ else {
data.append(line, end-line);
line = end+2; //skip '"' and ','
break;
}
}
- } else {
+ }
+ else {
end = strstr( line , _sep );
- if ( ! end ){
+ if ( ! end ) {
done = true;
data = string( line );
- } else {
+ }
+ else {
data = string( line , end - line );
line = end+1;
}
}
- if ( _headerLine ){
+ if ( _headerLine ) {
while ( isspace( data[0] ) )
data = data.substr( 1 );
_fields.push_back( data );
}
else
_append( b , name , data );
-
+
if ( done )
break;
}
return b.obj();
}
-
+
public:
- Import() : Tool( "import" ){
+ Import() : Tool( "import" ) {
addFieldOptions();
add_options()
- ("ignoreBlanks","if given, empty fields in csv and tsv will be ignored")
- ("type",po::value<string>() , "type of file to import. default: json (json,csv,tsv)")
- ("file",po::value<string>() , "file to import from; if not specified stdin is used" )
- ("drop", "drop collection first " )
- ("headerline","CSV,TSV only - use first line as headers")
- ("upsert", "insert or update objects that already exist" )
- ("upsertFields", po::value<string>(), "comma-separated fields for the query part of the upsert. You should make sure this is indexed" )
- ("stopOnError", "stop importing at first error rather than continuing" )
- ("jsonArray", "load a json array, not one item per line. Currently limited to 4MB." )
- ;
+ ("ignoreBlanks","if given, empty fields in csv and tsv will be ignored")
+ ("type",po::value<string>() , "type of file to import. default: json (json,csv,tsv)")
+ ("file",po::value<string>() , "file to import from; if not specified stdin is used" )
+ ("drop", "drop collection first " )
+ ("headerline","CSV,TSV only - use first line as headers")
+ ("upsert", "insert or update objects that already exist" )
+ ("upsertFields", po::value<string>(), "comma-separated fields for the query part of the upsert. You should make sure this is indexed" )
+ ("stopOnError", "stop importing at first error rather than continuing" )
+ ("jsonArray", "load a json array, not one item per line. Currently limited to 4MB." )
+ ;
add_hidden_options()
- ("noimport", "don't actually import. useful for benchmarking parser" )
- ;
+ ("noimport", "don't actually import. useful for benchmarking parser" )
+ ;
addPositionArg( "file" , 1 );
_type = JSON;
_ignoreBlanks = false;
@@ -160,8 +165,8 @@ public:
_doimport = true;
_jsonArray = false;
}
-
- int run(){
+
+ int run() {
string filename = getParam( "file" );
long long fileSize = -1;
@@ -169,8 +174,8 @@ public:
ifstream file( filename.c_str() , ios_base::in);
- if ( filename.size() > 0 && filename != "-" ){
- if ( ! exists( filename ) ){
+ if ( filename.size() > 0 && filename != "-" ) {
+ if ( ! exists( filename ) ) {
cerr << "file doesn't exist: " << filename << endl;
return -1;
}
@@ -182,53 +187,55 @@ public:
if (!isMaster()) {
return -1;
}
-
+
string ns;
try {
ns = getNS();
- } catch (...) {
+ }
+ catch (...) {
printHelp(cerr);
return -1;
}
-
+
log(1) << "ns: " << ns << endl;
-
+
auth();
- if ( hasParam( "drop" ) ){
+ if ( hasParam( "drop" ) ) {
cout << "dropping: " << ns << endl;
conn().dropCollection( ns.c_str() );
}
- if ( hasParam( "ignoreBlanks" ) ){
+ if ( hasParam( "ignoreBlanks" ) ) {
_ignoreBlanks = true;
}
- if ( hasParam( "upsert" ) || hasParam( "upsertFields" )){
+ if ( hasParam( "upsert" ) || hasParam( "upsertFields" )) {
_upsert = true;
string uf = getParam("upsertFields");
- if (uf.empty()){
+ if (uf.empty()) {
_upsertFields.push_back("_id");
- } else {
+ }
+ else {
StringSplitter(uf.c_str(), ",").split(_upsertFields);
}
}
- if ( hasParam( "noimport" ) ){
+ if ( hasParam( "noimport" ) ) {
_doimport = false;
}
- if ( hasParam( "type" ) ){
+ if ( hasParam( "type" ) ) {
string type = getParam( "type" );
if ( type == "json" )
_type = JSON;
- else if ( type == "csv" ){
+ else if ( type == "csv" ) {
_type = CSV;
_sep = ",";
}
- else if ( type == "tsv" ){
+ else if ( type == "tsv" ) {
_type = TSV;
_sep = "\t";
}
@@ -237,21 +244,21 @@ public:
return -1;
}
}
-
- if ( _type == CSV || _type == TSV ){
+
+ if ( _type == CSV || _type == TSV ) {
_headerLine = hasParam( "headerline" );
if ( ! _headerLine )
needFields();
}
- if (_type == JSON && hasParam("jsonArray")){
+ if (_type == JSON && hasParam("jsonArray")) {
_jsonArray = true;
}
int errors = 0;
-
+
int num = 0;
-
+
time_t start = time(0);
log(1) << "filesize: " << fileSize << endl;
@@ -259,37 +266,39 @@ public:
const int BUF_SIZE = 1024 * 1024 * 4;
boost::scoped_array<char> line(new char[BUF_SIZE+2]);
char * buf = line.get();
- while ( _jsonArray || in->rdstate() == 0 ){
- if (_jsonArray){
- if (buf == line.get()){ //first pass
+ while ( _jsonArray || in->rdstate() == 0 ) {
+ if (_jsonArray) {
+ if (buf == line.get()) { //first pass
in->read(buf, BUF_SIZE);
uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
buf[ in->gcount() ] = '\0';
}
- } else {
+ }
+ else {
buf = line.get();
in->getline( buf , BUF_SIZE );
log(1) << "got line:" << buf << endl;
}
uassert( 10263 , "unknown error reading file" ,
- (!(in->rdstate() & ios_base::badbit)) &&
- (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
+ (!(in->rdstate() & ios_base::badbit)) &&
+ (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
int len = 0;
- if (strncmp("\xEF\xBB\xBF", buf, 3) == 0){ // UTF-8 BOM (notepad is stupid)
+ if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
buf += 3;
len += 3;
}
- if (_jsonArray){
+ if (_jsonArray) {
while (buf[0] != '{' && buf[0] != '\0') {
len++;
buf++;
}
if (buf[0] == '\0')
break;
- } else {
- while (isspace( buf[0] )){
+ }
+ else {
+ while (isspace( buf[0] )) {
len++;
buf++;
}
@@ -300,24 +309,26 @@ public:
try {
BSONObj o;
- if (_jsonArray){
+ if (_jsonArray) {
int jslen;
o = fromjson(buf, &jslen);
len += jslen;
buf += jslen;
- } else {
+ }
+ else {
o = parseLine( buf );
}
- if ( _headerLine ){
+ if ( _headerLine ) {
_headerLine = false;
- } else if (_doimport) {
+ }
+ else if (_doimport) {
bool doUpsert = _upsert;
BSONObjBuilder b;
- if (_upsert){
- for (vector<string>::const_iterator it=_upsertFields.begin(), end=_upsertFields.end(); it!=end; ++it){
+ if (_upsert) {
+ for (vector<string>::const_iterator it=_upsertFields.begin(), end=_upsertFields.end(); it!=end; ++it) {
BSONElement e = o.getFieldDotted(it->c_str());
- if (e.eoo()){
+ if (e.eoo()) {
doUpsert = false;
break;
}
@@ -325,25 +336,26 @@ public:
}
}
- if (doUpsert){
+ if (doUpsert) {
conn().update(ns, Query(b.obj()), o, true);
- } else {
+ }
+ else {
conn().insert( ns.c_str() , o );
}
}
num++;
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "exception:" << e.what() << endl;
cout << buf << endl;
errors++;
-
+
if (hasParam("stopOnError") || _jsonArray)
break;
}
- if ( pm.hit( len + 1 ) ){
+ if ( pm.hit( len + 1 ) ) {
cout << "\t\t\t" << num << "\t" << ( num / ( time(0) - start ) ) << "/second" << endl;
}
}
@@ -351,10 +363,10 @@ public:
cout << "imported " << num << " objects" << endl;
conn().getLastError();
-
+
if ( errors == 0 )
return 0;
-
+
cerr << "encountered " << errors << " error" << ( errors == 1 ? "" : "s" ) << endl;
return -1;
}
diff --git a/tools/restore.cpp b/tools/restore.cpp
index 368efeb0890..9a18c00e93a 100644
--- a/tools/restore.cpp
+++ b/tools/restore.cpp
@@ -36,20 +36,20 @@ namespace {
class Restore : public BSONTool {
public:
-
+
bool _drop;
string _curns;
string _curdb;
- Restore() : BSONTool( "restore" ) , _drop(false){
+ Restore() : BSONTool( "restore" ) , _drop(false) {
add_options()
- ("drop" , "drop each collection before import" )
- ("oplogReplay" , "replay oplog for point-in-time restore")
- ;
+ ("drop" , "drop each collection before import" )
+ ("oplogReplay" , "replay oplog for point-in-time restore")
+ ;
add_hidden_options()
- ("dir", po::value<string>()->default_value("dump"), "directory to restore from")
- ("indexesLast" , "wait to add indexes (now default)") // left in for backwards compatibility
- ;
+ ("dir", po::value<string>()->default_value("dump"), "directory to restore from")
+ ("indexesLast" , "wait to add indexes (now default)") // left in for backwards compatibility
+ ;
addPositionArg("dir", 1);
}
@@ -57,7 +57,7 @@ public:
out << "usage: " << _name << " [options] [directory or filename to restore from]" << endl;
}
- virtual int doRun(){
+ virtual int doRun() {
auth();
path root = getParam("dir");
@@ -65,32 +65,32 @@ public:
if (!isMaster()) {
return -1;
}
-
+
_drop = hasParam( "drop" );
bool doOplog = hasParam( "oplogReplay" );
- if (doOplog){
+ if (doOplog) {
// fail early if errors
- if (_db != ""){
+ if (_db != "") {
cout << "Can only replay oplog on full restore" << endl;
return -1;
}
- if ( ! exists(root / "oplog.bson") ){
+ if ( ! exists(root / "oplog.bson") ) {
cout << "No oplog file to replay. Make sure you run mongodump with --oplog." << endl;
return -1;
}
BSONObj out;
- if (! conn().simpleCommand("admin", &out, "buildinfo")){
+ if (! conn().simpleCommand("admin", &out, "buildinfo")) {
cout << "buildinfo command failed: " << out["errmsg"].String() << endl;
return -1;
}
StringData version = out["version"].valuestr();
- if (versionCmp(version, "1.7.4-pre-") < 0){
+ if (versionCmp(version, "1.7.4-pre-") < 0) {
cout << "Can only replay oplog to server version >= 1.7.4" << endl;
return -1;
}
@@ -108,7 +108,7 @@ public:
drillDown(root, _db != "", _coll != "", true);
conn().getLastError();
- if (doOplog){
+ if (doOplog) {
out() << "\t Replaying oplog" << endl;
_curns = OPLOG_SENTINEL;
processFile( root / "oplog.bson" );
@@ -174,7 +174,7 @@ public:
log() << root.string() << endl;
- if ( root.leaf() == "system.profile.bson" ){
+ if ( root.leaf() == "system.profile.bson" ) {
log() << "\t skipping" << endl;
return;
}
@@ -182,23 +182,24 @@ public:
string ns;
if (use_db) {
ns += _db;
- }
+ }
else {
string dir = root.branch_path().string();
if ( dir.find( "/" ) == string::npos )
ns += dir;
else
ns += dir.substr( dir.find_last_of( "/" ) + 1 );
-
+
if ( ns.size() == 0 )
ns = "test";
}
-
+
assert( ns.size() );
if (use_coll) {
ns += "." + _coll;
- } else {
+ }
+ else {
string l = root.leaf();
l = l.substr( 0 , l.find_last_of( "." ) );
ns += "." + l;
@@ -206,17 +207,17 @@ public:
out() << "\t going into namespace [" << ns << "]" << endl;
- if ( _drop ){
+ if ( _drop ) {
out() << "\t dropping" << endl;
conn().dropCollection( ns );
}
-
+
_curns = ns.c_str();
_curdb = NamespaceString(_curns).db;
processFile( root );
}
- virtual void gotObject( const BSONObj& obj ){
+ virtual void gotObject( const BSONObj& obj ) {
if (_curns == OPLOG_SENTINEL) { // intentional ptr compare
if (obj["op"].valuestr()[0] == 'n') // skip no-ops
return;
@@ -227,7 +228,7 @@ public:
BSONObj cmd = BSON( "applyOps" << BSON_ARRAY( obj ) );
BSONObj out;
conn().runCommand(db, cmd, out);
- }
+ }
else if ( endsWith( _curns.c_str() , ".system.indexes" )) {
/* Index construction is slightly special: when restoring
indexes, we must ensure that the ns attribute is
@@ -237,13 +238,14 @@ public:
data. */
BSONObjBuilder bo;
BSONObjIterator i(obj);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if (strcmp(e.fieldName(), "ns") == 0) {
NamespaceString n(e.String());
string s = _curdb + "." + n.coll;
bo.append("ns", s);
- } else {
+ }
+ else {
bo.append(e);
}
}
@@ -257,13 +259,13 @@ public:
cerr << "To resume index restoration, run " << _name << " on file" << _fileName << " manually." << endl;
abort();
}
- }
+ }
else {
conn().insert( _curns , obj );
}
}
-
+
};
int main( int argc , char ** argv ) {
diff --git a/tools/sniffer.cpp b/tools/sniffer.cpp
index 52b2eba9bd3..0422f87399e 100644
--- a/tools/sniffer.cpp
+++ b/tools/sniffer.cpp
@@ -157,11 +157,11 @@ map< Connection, map< long long, long long > > mapCursor;
void processMessage( Connection& c , Message& d );
-void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet){
+void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet) {
const struct sniff_ip* ip = (struct sniff_ip*)(packet + captureHeaderSize);
int size_ip = IP_HL(ip)*4;
- if ( size_ip < 20 ){
+ if ( size_ip < 20 ) {
cerr << "*** Invalid IP header length: " << size_ip << " bytes" << endl;
return;
}
@@ -170,13 +170,13 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
const struct sniff_tcp* tcp = (struct sniff_tcp*)(packet + captureHeaderSize + size_ip);
int size_tcp = TH_OFF(tcp)*4;
- if (size_tcp < 20){
+ if (size_tcp < 20) {
cerr << "*** Invalid TCP header length: " << size_tcp << " bytes" << endl;
return;
}
if ( ! ( serverPorts.count( ntohs( tcp->th_sport ) ) ||
- serverPorts.count( ntohs( tcp->th_dport ) ) ) ){
+ serverPorts.count( ntohs( tcp->th_dport ) ) ) ) {
return;
}
@@ -199,7 +199,8 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
if ( expectedSeq[ c ] != ntohl( tcp->th_seq ) ) {
cerr << "Warning: sequence # mismatch, there may be dropped packets" << endl;
}
- } else {
+ }
+ else {
seen[ c ] = true;
}
@@ -223,7 +224,8 @@ void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *pa
messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
return;
}
- } else {
+ }
+ else {
bytesRemainingInMessage[ c ] -= size_payload;
messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
if ( bytesRemainingInMessage[ c ] < 0 ) {
@@ -264,70 +266,71 @@ public:
}
};
-void processMessage( Connection& c , Message& m ){
+void processMessage( Connection& c , Message& m ) {
AuditingDbMessage d(m);
-
+
if ( m.operation() == mongo::opReply )
out() << " - " << (unsigned)m.header()->responseTo;
out() << endl;
try {
- switch( m.operation() ){
- case mongo::opReply:{
- mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
- out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
- if ( r->nReturned ){
- mongo::BSONObj o( r->data() , 0 );
- out() << "\t" << o << endl;
- }
- break;
- }
- case mongo::dbQuery:{
- mongo::QueryMessage q(d);
- out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
- break;
+ switch( m.operation() ) {
+ case mongo::opReply: {
+ mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
+ out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
+ if ( r->nReturned ) {
+ mongo::BSONObj o( r->data() , 0 );
+ out() << "\t" << o << endl;
}
- case mongo::dbUpdate:{
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj( "update" );
- BSONObj o = d.nextJsObj( "update" );
- out() << "\tupdate flags:" << flags << " q:" << q << " o:" << o << endl;
- break;
- }
- case mongo::dbInsert:{
- out() << "\tinsert: " << d.nextJsObj( "insert" ) << endl;
- while ( d.moreJSObjs() ) {
- out() << "\t\t" << d.nextJsObj( "insert" ) << endl;
- }
- break;
- }
- case mongo::dbGetMore:{
- int nToReturn = d.pullInt();
- long long cursorId = d.pullInt64();
- out() << "\tgetMore nToReturn: " << nToReturn << " cursorId: " << cursorId << endl;
- break;
- }
- case mongo::dbDelete:{
- int flags = d.pullInt();
- BSONObj q = d.nextJsObj( "delete" );
- out() << "\tdelete flags: " << flags << " q: " << q << endl;
- break;
- }
- case mongo::dbKillCursors:{
- int *x = (int *) m.singleData()->_data;
- x++; // reserved
- int n = *x;
- out() << "\tkillCursors n: " << n << endl;
- break;
+ break;
+ }
+ case mongo::dbQuery: {
+ mongo::QueryMessage q(d);
+ out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip << endl;
+ break;
+ }
+ case mongo::dbUpdate: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "update" );
+ BSONObj o = d.nextJsObj( "update" );
+ out() << "\tupdate flags:" << flags << " q:" << q << " o:" << o << endl;
+ break;
+ }
+ case mongo::dbInsert: {
+ out() << "\tinsert: " << d.nextJsObj( "insert" ) << endl;
+ while ( d.moreJSObjs() ) {
+ out() << "\t\t" << d.nextJsObj( "insert" ) << endl;
}
- default:
- cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
+ break;
+ }
+ case mongo::dbGetMore: {
+ int nToReturn = d.pullInt();
+ long long cursorId = d.pullInt64();
+ out() << "\tgetMore nToReturn: " << nToReturn << " cursorId: " << cursorId << endl;
+ break;
+ }
+ case mongo::dbDelete: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "delete" );
+ out() << "\tdelete flags: " << flags << " q: " << q << endl;
+ break;
}
- } catch ( ... ) {
+ case mongo::dbKillCursors: {
+ int *x = (int *) m.singleData()->_data;
+ x++; // reserved
+ int n = *x;
+ out() << "\tkillCursors n: " << n << endl;
+ break;
+ }
+ default:
+ cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
+ }
+ }
+ catch ( ... ) {
cerr << "Error parsing message for operation: " << m.operation() << endl;
}
-
-
+
+
if ( !forwardAddress.empty() ) {
if ( m.operation() != mongo::opReply ) {
boost::shared_ptr<DBClientConnection> conn = forwarder[ c ];
@@ -353,10 +356,12 @@ void processMessage( Connection& c , Message& m ){
}
}
lastCursor[ c ] = 0;
- } else {
+ }
+ else {
conn->port().say( m );
}
- } else {
+ }
+ else {
Connection r = c.reverse();
long long myCursor = lastCursor[ r ];
QueryResult *qr = (QueryResult *) m.singleData();
@@ -375,7 +380,7 @@ void processMessage( Connection& c , Message& m ){
}
}
-void processDiagLog( const char * file ){
+void processDiagLog( const char * file ) {
Connection c;
MemoryMappedFile f;
long length;
@@ -385,45 +390,45 @@ void processDiagLog( const char * file ){
length = (long) L;
assert( root );
assert( length > 0 );
-
+
char * pos = root;
long read = 0;
- while ( read < length ){
+ while ( read < length ) {
Message m(pos,false);
int len = m.header()->len;
DbMessage d(m);
cout << len << " " << d.getns() << endl;
-
+
processMessage( c , m );
read += len;
pos += len;
}
-
+
f.close();
}
void usage() {
cout <<
- "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
- "--forward Forward all parsed request messages to mongod instance at \n"
- " specified host:port\n"
- "--source Source of traffic to sniff, either a network interface or a\n"
- " file containing previously captured packets in pcap format,\n"
- " or a file containing output from mongod's --diaglog option.\n"
- " If no source is specified, mongosniff will attempt to sniff\n"
- " from one of the machine's network interfaces.\n"
- "--objcheck Log hex representation of invalid BSON objects and nothing\n"
- " else. Spurious messages about invalid objects may result\n"
- " when there are dropped tcp packets.\n"
- "<port0>... These parameters are used to filter sniffing. By default, \n"
- " only port 27017 is sniffed.\n"
- "--help Print this help message.\n"
- << endl;
+ "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
+ "--forward Forward all parsed request messages to mongod instance at \n"
+ " specified host:port\n"
+ "--source Source of traffic to sniff, either a network interface or a\n"
+ " file containing previously captured packets in pcap format,\n"
+ " or a file containing output from mongod's --diaglog option.\n"
+ " If no source is specified, mongosniff will attempt to sniff\n"
+ " from one of the machine's network interfaces.\n"
+ "--objcheck Log hex representation of invalid BSON objects and nothing\n"
+ " else. Spurious messages about invalid objects may result\n"
+ " when there are dropped tcp packets.\n"
+ "<port0>... These parameters are used to filter sniffing. By default, \n"
+ " only port 27017 is sniffed.\n"
+ "--help Print this help message.\n"
+ << endl;
}
-int main(int argc, char **argv){
+int main(int argc, char **argv) {
stringstream nullStream;
nullStream.clear(ios::failbit);
@@ -435,7 +440,7 @@ int main(int argc, char **argv){
struct bpf_program fp;
bpf_u_int32 mask;
bpf_u_int32 net;
-
+
bool source = false;
bool replay = false;
bool diaglog = false;
@@ -451,10 +456,10 @@ int main(int argc, char **argv){
if ( arg == string( "--help" ) ) {
usage();
return 0;
- }
+ }
else if ( arg == string( "--forward" ) ) {
forwardAddress = args[ ++i ];
- }
+ }
else if ( arg == string( "--source" ) ) {
uassert( 10266 , "can't use --source twice" , source == false );
uassert( 10267 , "source needs more args" , args.size() > i + 2);
@@ -474,21 +479,22 @@ int main(int argc, char **argv){
serverPorts.insert( atoi( args[ i ] ) );
}
}
- } catch ( ... ) {
+ }
+ catch ( ... ) {
usage();
return -1;
}
if ( !serverPorts.size() )
serverPorts.insert( 27017 );
-
- if ( diaglog ){
+
+ if ( diaglog ) {
processDiagLog( file );
return 0;
}
- else if ( replay ){
+ else if ( replay ) {
handle = pcap_open_offline(file, errbuf);
- if ( ! handle ){
+ if ( ! handle ) {
cerr << "error opening capture file!" << endl;
return -1;
}
@@ -502,18 +508,18 @@ int main(int argc, char **argv){
}
cout << "found device: " << dev << endl;
}
- if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1){
+ if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
cerr << "can't get netmask: " << errbuf << endl;
return -1;
}
handle = pcap_open_live(dev, SNAP_LEN, 1, 1000, errbuf);
- if ( ! handle ){
+ if ( ! handle ) {
cerr << "error opening device: " << errbuf << endl;
return -1;
}
- }
+ }
- switch ( pcap_datalink( handle ) ){
+ switch ( pcap_datalink( handle ) ) {
case DLT_EN10MB:
captureHeaderSize = 14;
break;
diff --git a/tools/stat.cpp b/tools/stat.cpp
index f0618a74bce..5d00e508ec3 100644
--- a/tools/stat.cpp
+++ b/tools/stat.cpp
@@ -32,37 +32,37 @@
namespace po = boost::program_options;
namespace mongo {
-
+
class Stat : public Tool {
public:
- Stat() : Tool( "stat" , REMOTE_SERVER , "admin" ){
+ Stat() : Tool( "stat" , REMOTE_SERVER , "admin" ) {
_sleep = 1;
_http = false;
_many = false;
-
+
add_hidden_options()
- ( "sleep" , po::value<int>() , "time to sleep between calls" )
- ;
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
add_options()
- ("noheaders", "don't output column names")
- ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
- ("http", "use http instead of raw db connection")
- ("discover" , "discover nodes and display stats for all" )
- ("all" , "all optional fields" )
- ;
+ ("noheaders", "don't output column names")
+ ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
+ ("http", "use http instead of raw db connection")
+ ("discover" , "discover nodes and display stats for all" )
+ ("all" , "all optional fields" )
+ ;
addPositionArg( "sleep" , 1 );
_autoreconnect = true;
}
- virtual void printExtraHelp( ostream & out ){
+ virtual void printExtraHelp( ostream & out ) {
out << "usage: " << _name << " [options] [sleep time]" << endl;
out << "sleep time: time to wait (in seconds) between calls" << endl;
}
- virtual void printExtraHelpAfter( ostream & out ){
+ virtual void printExtraHelpAfter( ostream & out ) {
out << "\n";
out << " Fields\n";
out << " inserts \t- # of inserts per second\n";
@@ -85,12 +85,12 @@ namespace mongo {
out << " conn \t- number of open connections\n";
}
-
- BSONObj stats(){
- if ( _http ){
+
+ BSONObj stats() {
+ if ( _http ) {
HttpClient c;
HttpClient::Result r;
-
+
string url;
{
stringstream ss;
@@ -101,36 +101,36 @@ namespace mongo {
url = ss.str();
}
- if ( c.get( url , &r ) != 200 ){
+ if ( c.get( url , &r ) != 200 ) {
cout << "error (http): " << r.getEntireResponse() << endl;
return BSONObj();
}
-
+
BSONObj x = fromjson( r.getBody() );
BSONElement e = x["serverStatus"];
- if ( e.type() != Object ){
+ if ( e.type() != Object ) {
cout << "BROKEN: " << x << endl;
return BSONObj();
}
return e.embeddedObjectUserCheck();
}
BSONObj out;
- if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ){
+ if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ) {
cout << "error: " << out << endl;
return BSONObj();
}
return out.getOwned();
}
- double diff( const string& name , const BSONObj& a , const BSONObj& b ){
+ double diff( const string& name , const BSONObj& a , const BSONObj& b ) {
BSONElement x = a.getFieldDotted( name.c_str() );
BSONElement y = b.getFieldDotted( name.c_str() );
if ( ! x.isNumber() || ! y.isNumber() )
return -1;
return ( y.number() - x.number() ) / _sleep;
}
-
- double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ){
+
+ double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ) {
double x = ( b.getFieldDotted( val ).number() - a.getFieldDotted( val ).number() );
double y = ( b.getFieldDotted( outof ).number() - a.getFieldDotted( outof ).number() );
if ( y == 0 )
@@ -141,20 +141,20 @@ namespace mongo {
}
template<typename T>
- void _append( BSONObjBuilder& result , const string& name , unsigned width , const T& t ){
+ void _append( BSONObjBuilder& result , const string& name , unsigned width , const T& t ) {
if ( name.size() > width )
width = name.size();
result.append( name , BSON( "width" << (int)width << "data" << t ) );
}
-
- void _appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz ){
+
+ void _appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz ) {
string unit = "m";
- if ( sz > 1024 ){
+ if ( sz > 1024 ) {
unit = "g";
sz /= 1024;
}
- if ( sz > 1024 ){
+ if ( sz > 1024 ) {
string s = str::stream() << (int)sz << unit;
_append( result , name , width , s );
return;
@@ -164,24 +164,24 @@ namespace mongo {
ss << setprecision(3) << sz << unit;
_append( result , name , width , ss.str() );
}
-
- void _appendNet( BSONObjBuilder& result , const string& name , double diff ){
+
+ void _appendNet( BSONObjBuilder& result , const string& name , double diff ) {
// I think 1000 is correct for megabit, but I've seen conflicting things (ERH 11/2010)
const double div = 1000;
-
+
string unit = "b";
- if ( diff >= div ){
+ if ( diff >= div ) {
unit = "k";
diff /= div;
}
-
- if ( diff >= div ){
+
+ if ( diff >= div ) {
unit = "m";
diff /= div;
}
- if ( diff >= div ){
+ if ( diff >= div ) {
unit = "g";
diff /= div;
}
@@ -193,62 +193,62 @@ namespace mongo {
/**
* BSON( <field> -> BSON( width : ### , data : XXX ) )
*/
- BSONObj doRow( const BSONObj& a , const BSONObj& b ){
+ BSONObj doRow( const BSONObj& a , const BSONObj& b ) {
BSONObjBuilder result;
- if ( b["opcounters"].type() == Object ){
+ if ( b["opcounters"].type() == Object ) {
BSONObj ax = a["opcounters"].embeddedObject();
BSONObj bx = b["opcounters"].embeddedObject();
-
+
BSONObj ar = a["opcountersRepl"].isABSONObj() ? a["opcountersRepl"].embeddedObject() : BSONObj();
BSONObj br = b["opcountersRepl"].isABSONObj() ? b["opcountersRepl"].embeddedObject() : BSONObj();
-
+
BSONObjIterator i( bx );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
- if ( ar.isEmpty() || br.isEmpty() ){
+ if ( ar.isEmpty() || br.isEmpty() ) {
_append( result , e.fieldName() , 6 , (int)diff( e.fieldName() , ax , bx ) );
}
else {
string f = e.fieldName();
-
+
int m = (int)diff( f , ax , bx );
int r = (int)diff( f , ar , br );
-
+
string myout;
- if ( f == "command" ){
+ if ( f == "command" ) {
myout = str::stream() << m << "|" << r;
}
- else if ( f == "getmore" ){
+ else if ( f == "getmore" ) {
myout = str::stream() << m;
}
- else if ( m && r ){
+ else if ( m && r ) {
// this is weird...
myout = str::stream() << m << "|" << r;
}
- else if ( m ){
+ else if ( m ) {
myout = str::stream() << m;
}
- else if ( r ){
+ else if ( r ) {
myout = str::stream() << "*" << r;
}
else {
myout = "*0";
}
-
+
_append( result , f , 6 , myout );
}
}
}
-
- if ( b["backgroundFlushing"].type() == Object ){
+
+ if ( b["backgroundFlushing"].type() == Object ) {
BSONObj ax = a["backgroundFlushing"].embeddedObject();
BSONObj bx = b["backgroundFlushing"].embeddedObject();
_append( result , "flushes" , 6 , (int)diff( "flushes" , ax , bx ) );
}
- if ( b.getFieldDotted("mem.supported").trueValue() ){
+ if ( b.getFieldDotted("mem.supported").trueValue() ) {
BSONObj bx = b["mem"].embeddedObject();
BSONObjIterator i( bx );
_appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
@@ -259,17 +259,17 @@ namespace mongo {
_appendMem( result , "non-mapped" , 6 , bx["virtual"].numberInt() - bx["mapped"].numberInt() );
}
- if ( b["extra_info"].type() == Object ){
+ if ( b["extra_info"].type() == Object ) {
BSONObj ax = a["extra_info"].embeddedObject();
BSONObj bx = b["extra_info"].embeddedObject();
if ( ax["page_faults"].type() || ax["page_faults"].type() )
_append( result , "faults" , 6 , (int)diff( "page_faults" , ax , bx ) );
}
-
+
_append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
_append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
- if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ){
+ if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.currentQueue.readers" ).numberInt();
int w = b.getFieldDotted( "globalLock.currentQueue.writers" ).numberInt();
stringstream temp;
@@ -277,15 +277,15 @@ namespace mongo {
_append( result , "qr|qw" , 9 , temp.str() );
}
- if ( b.getFieldDotted( "globalLock.activeClients" ).type() == Object ){
+ if ( b.getFieldDotted( "globalLock.activeClients" ).type() == Object ) {
int r = b.getFieldDotted( "globalLock.activeClients.readers" ).numberInt();
int w = b.getFieldDotted( "globalLock.activeClients.writers" ).numberInt();
stringstream temp;
temp << r << "|" << w;
_append( result , "ar|aw" , 7 , temp.str() );
}
-
- if ( b["network"].isABSONObj() ){
+
+ if ( b["network"].isABSONObj() ) {
BSONObj ax = a["network"].embeddedObject();
BSONObj bx = b["network"].embeddedObject();
_appendNet( result , "netIn" , diff( "bytesIn" , ax , bx ) );
@@ -294,14 +294,14 @@ namespace mongo {
_append( result , "conn" , 5 , b.getFieldDotted( "connections.current" ).numberInt() );
- if ( b["repl"].type() == Object ){
+ if ( b["repl"].type() == Object ) {
BSONObj x = b["repl"].embeddedObject();
bool isReplSet = x["setName"].type() == String;
stringstream ss;
- if ( isReplSet ){
+ if ( isReplSet ) {
string setName = x["setName"].String();
_append( result , "set" , setName.size() , setName );
}
@@ -314,13 +314,13 @@ namespace mongo {
ss << "REC";
else if ( isReplSet )
ss << "UNK";
- else
+ else
ss << "SLV";
-
+
_append( result , "repl" , 4 , ss.str() );
-
+
}
- else if ( b["shardCursorType"].type() == Object ){
+ else if ( b["shardCursorType"].type() == Object ) {
// is a mongos
// TODO: should have a better check
_append( result , "repl" , 4 , "RTR" );
@@ -330,35 +330,35 @@ namespace mongo {
struct tm t;
time_t_to_Struct( time(0), &t , true );
stringstream temp;
- temp << setfill('0') << setw(2) << t.tm_hour
- << ":"
+ temp << setfill('0') << setw(2) << t.tm_hour
+ << ":"
<< setfill('0') << setw(2) << t.tm_min
- << ":"
+ << ":"
<< setfill('0') << setw(2) << t.tm_sec;
_append( result , "time" , 10 , temp.str() );
}
return result.obj();
}
-
- virtual void preSetup(){
- if ( hasParam( "http" ) ){
+
+ virtual void preSetup() {
+ if ( hasParam( "http" ) ) {
_http = true;
_noconnection = true;
}
- if ( hasParam( "host" ) &&
- getParam( "host" ).find( ',' ) != string::npos ){
+ if ( hasParam( "host" ) &&
+ getParam( "host" ).find( ',' ) != string::npos ) {
_noconnection = true;
_many = true;
}
- if ( hasParam( "discover" ) ){
+ if ( hasParam( "discover" ) ) {
_noconnection = true;
_many = true;
}
}
- int run(){
+ int run() {
_sleep = getParam( "sleep" , _sleep );
_all = hasParam( "all" );
if ( _many )
@@ -366,31 +366,31 @@ namespace mongo {
return runNormal();
}
- static void printHeaders( const BSONObj& o ){
+ static void printHeaders( const BSONObj& o ) {
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
BSONObj x = e.Obj();
cout << setw( x["width"].numberInt() ) << e.fieldName() << ' ';
}
- cout << endl;
+ cout << endl;
}
- static void printData( const BSONObj& o , const BSONObj& headers ){
-
+ static void printData( const BSONObj& o , const BSONObj& headers ) {
+
BSONObjIterator i(headers);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
BSONObj h = e.Obj();
int w = h["width"].numberInt();
-
+
BSONElement data;
{
BSONElement temp = o[e.fieldName()];
if ( temp.isABSONObj() )
data = temp.Obj()["data"];
}
-
+
if ( data.type() == String )
cout << setw(w) << data.String();
else if ( data.type() == NumberDouble )
@@ -399,15 +399,15 @@ namespace mongo {
cout << setw(w) << data.numberInt();
else if ( data.eoo() )
cout << setw(w) << "";
- else
+ else
cout << setw(w) << "???";
-
+
cout << ' ';
}
- cout << endl;
+ cout << endl;
}
- int runNormal(){
+ int runNormal() {
bool showHeaders = ! hasParam( "noheaders" );
int rowCount = getParam( "rowcount" , 0 );
int rowNum = 0;
@@ -416,50 +416,50 @@ namespace mongo {
if ( prev.isEmpty() )
return -1;
- while ( rowCount == 0 || rowNum < rowCount ){
+ while ( rowCount == 0 || rowNum < rowCount ) {
sleepsecs(_sleep);
BSONObj now;
try {
now = stats();
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "can't get data: " << e.what() << endl;
continue;
}
if ( now.isEmpty() )
return -2;
-
+
try {
BSONObj out = doRow( prev , now );
- if ( showHeaders && rowNum % 10 == 0 ){
+ if ( showHeaders && rowNum % 10 == 0 ) {
printHeaders( out );
}
-
+
printData( out , out );
}
- catch ( AssertionException& e ){
+ catch ( AssertionException& e ) {
cout << "\nerror: " << e.what() << "\n"
<< now
<< endl;
}
-
+
prev = now;
rowNum++;
}
return 0;
}
-
+
struct ServerState {
- ServerState() : lock( "Stat::ServerState" ){}
+ ServerState() : lock( "Stat::ServerState" ) {}
string host;
scoped_ptr<boost::thread> thr;
-
+
mongo::mutex lock;
-
+
BSONObj prev;
BSONObj now;
time_t lastUpdate;
@@ -468,21 +468,21 @@ namespace mongo {
string error;
bool mongos;
};
-
- static void serverThread( shared_ptr<ServerState> state ){
+
+ static void serverThread( shared_ptr<ServerState> state ) {
try {
DBClientConnection conn( true );
conn._logLevel = 1;
string errmsg;
if ( ! conn.connect( state->host , errmsg ) )
state->error = errmsg;
-
+
long long cycleNumber = 0;
- while ( ++cycleNumber ){
+ while ( ++cycleNumber ) {
try {
BSONObj out;
- if ( conn.simpleCommand( "admin" , &out , "serverStatus" ) ){
+ if ( conn.simpleCommand( "admin" , &out , "serverStatus" ) ) {
scoped_lock lk( state->lock );
state->error = "";
state->lastUpdate = time(0);
@@ -494,13 +494,13 @@ namespace mongo {
state->error = "serverStatus failed";
state->lastUpdate = time(0);
}
-
- if ( out["shardCursorType"].type() == Object ){
+
+ if ( out["shardCursorType"].type() == Object ) {
state->mongos = true;
- if ( cycleNumber % 10 == 1 ){
+ if ( cycleNumber % 10 == 1 ) {
auto_ptr<DBClientCursor> c = conn.query( "config.shards" , BSONObj() );
vector<BSONObj> shards;
- while ( c->more() ){
+ while ( c->more() ) {
shards.push_back( c->next().getOwned() );
}
scoped_lock lk( state->lock );
@@ -508,57 +508,57 @@ namespace mongo {
}
}
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
scoped_lock lk( state->lock );
state->error = e.what();
}
-
+
sleepsecs( 1 );
}
-
-
+
+
}
- catch ( std::exception& e ){
+ catch ( std::exception& e ) {
cout << "serverThread (" << state->host << ") fatal error : " << e.what() << endl;
}
- catch ( ... ){
+ catch ( ... ) {
cout << "serverThread (" << state->host << ") fatal error" << endl;
}
}
typedef map<string,shared_ptr<ServerState> > StateMap;
- bool _add( StateMap& threads , string host ){
+ bool _add( StateMap& threads , string host ) {
shared_ptr<ServerState>& state = threads[host];
if ( state )
return false;
-
+
state.reset( new ServerState() );
state->host = host;
state->thr.reset( new boost::thread( boost::bind( serverThread , state ) ) );
return true;
}
-
+
/**
* @param hosts [ "a.foo.com" , "b.foo.com" ]
*/
- bool _addAll( StateMap& threads , const BSONObj& hosts ){
+ bool _addAll( StateMap& threads , const BSONObj& hosts ) {
BSONObjIterator i( hosts );
bool added = false;
- while ( i.more() ){
+ while ( i.more() ) {
bool me = _add( threads , i.next().String() );
added = added || me;
}
return added;
}
- bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ){
-
+ bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ) {
+
BSONObj info = ss->now;
bool found = false;
-
- if ( info["repl"].isABSONObj() ){
+
+ if ( info["repl"].isABSONObj() ) {
BSONObj x = info["repl"].Obj();
if ( x["hosts"].isABSONObj() )
if ( _addAll( threads , x["hosts"].Obj() ) )
@@ -567,106 +567,108 @@ namespace mongo {
if ( _addAll( threads , x["passives"].Obj() ) )
found = true;
}
-
- if ( ss->mongos ){
- for ( unsigned i=0; i<ss->shards.size(); i++ ){
+
+ if ( ss->mongos ) {
+ for ( unsigned i=0; i<ss->shards.size(); i++ ) {
BSONObj x = ss->shards[i];
string errmsg;
ConnectionString cs = ConnectionString::parse( x["host"].String() , errmsg );
- if ( errmsg.size() ){
+ if ( errmsg.size() ) {
cerr << errmsg << endl;
continue;
}
-
+
vector<HostAndPort> v = cs.getServers();
- for ( unsigned i=0; i<v.size(); i++ ){
+ for ( unsigned i=0; i<v.size(); i++ ) {
if ( _add( threads , v[i].toString() ) )
found = true;
}
}
}
-
+
return found;
}
-
- int runMany(){
+
+ int runMany() {
StateMap threads;
-
+
{
string orig = getParam( "host" );
if ( orig == "" )
orig = "localhost:27017";
StringSplitter ss( orig.c_str() , "," );
- while ( ss.more() ){
+ while ( ss.more() ) {
string host = ss.next();
_add( threads , host );
}
}
-
+
sleepsecs(1);
-
+
int row = 0;
bool discover = hasParam( "discover" );
- while ( 1 ){
+ while ( 1 ) {
sleepsecs( _sleep );
-
+
// collect data
vector<Row> rows;
- for ( map<string,shared_ptr<ServerState> >::iterator i=threads.begin(); i!=threads.end(); ++i ){
+ for ( map<string,shared_ptr<ServerState> >::iterator i=threads.begin(); i!=threads.end(); ++i ) {
scoped_lock lk( i->second->lock );
-
- if ( i->second->error.size() ){
+
+ if ( i->second->error.size() ) {
rows.push_back( Row( i->first , i->second->error ) );
}
- else if ( i->second->prev.isEmpty() || i->second->now.isEmpty() ){
+ else if ( i->second->prev.isEmpty() || i->second->now.isEmpty() ) {
rows.push_back( Row( i->first ) );
}
else {
BSONObj out = doRow( i->second->prev , i->second->now );
rows.push_back( Row( i->first , out ) );
}
-
- if ( discover && ! i->second->now.isEmpty() ){
+
+ if ( discover && ! i->second->now.isEmpty() ) {
if ( _discover( threads , i->first , i->second ) )
break;
}
}
-
+
// compute some stats
unsigned longestHost = 0;
BSONObj biggest;
- for ( unsigned i=0; i<rows.size(); i++ ){
+ for ( unsigned i=0; i<rows.size(); i++ ) {
if ( rows[i].host.size() > longestHost )
longestHost = rows[i].host.size();
if ( rows[i].data.nFields() > biggest.nFields() )
biggest = rows[i].data;
}
-
- { // check for any headers not in biggest
- // TODO: we put any new headers at end,
+ {
+ // check for any headers not in biggest
+
+ // TODO: we put any new headers at end,
// ideally we would interleave
set<string> seen;
-
+
BSONObjBuilder b;
-
- { // iterate biggest
+
+ {
+ // iterate biggest
BSONObjIterator i( biggest );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
seen.insert( e.fieldName() );
b.append( e );
}
}
-
+
// now do the rest
- for ( unsigned j=0; j<rows.size(); j++ ){
+ for ( unsigned j=0; j<rows.size(); j++ ) {
BSONObjIterator i( rows[j].data );
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
if ( seen.count( e.fieldName() ) )
continue;
@@ -677,30 +679,30 @@ namespace mongo {
}
biggest = b.obj();
-
+
}
-
+
// display data
-
+
cout << endl;
// header
- if ( row++ % 5 == 0 && ! biggest.isEmpty() ){
+ if ( row++ % 5 == 0 && ! biggest.isEmpty() ) {
cout << setw( longestHost ) << "" << "\t";
printHeaders( biggest );
}
-
+
// rows
- for ( unsigned i=0; i<rows.size(); i++ ){
+ for ( unsigned i=0; i<rows.size(); i++ ) {
cout << setw( longestHost ) << rows[i].host << "\t";
if ( rows[i].err.size() )
cout << rows[i].err << endl;
else if ( rows[i].data.isEmpty() )
cout << "no data" << endl;
- else
+ else
printData( rows[i].data , biggest );
}
-
+
}
return 0;
@@ -712,16 +714,16 @@ namespace mongo {
bool _all;
struct Row {
- Row( string h , string e ){
+ Row( string h , string e ) {
host = h;
err = e;
}
-
- Row( string h ){
+
+ Row( string h ) {
host = h;
}
- Row( string h , BSONObj d ){
+ Row( string h , BSONObj d ) {
host = h;
data = d;
}
diff --git a/tools/tool.cpp b/tools/tool.cpp
index e460f600cef..0491012b880 100644
--- a/tools/tool.cpp
+++ b/tools/tool.cpp
@@ -35,41 +35,41 @@ namespace mongo {
CmdLine cmdLine;
- Tool::Tool( string name , DBAccess access , string defaultDB ,
+ Tool::Tool( string name , DBAccess access , string defaultDB ,
string defaultCollection , bool usesstdout ) :
- _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) ,
+ _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) ,
_usesstdout(usesstdout), _noconnection(false), _autoreconnect(false), _conn(0), _slaveConn(0), _paired(false) {
-
+
_options = new po::options_description( "options" );
_options->add_options()
- ("help","produce help message")
- ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
- ;
+ ("help","produce help message")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ;
if ( access & REMOTE_SERVER )
_options->add_options()
- ("host,h",po::value<string>(), "mongo host to connect to (\"left,right\" for pairs)" )
- ("port",po::value<string>(), "server port. Can also use --host hostname:port" )
- ("ipv6", "enable IPv6 support (disabled by default)")
-
- ("username,u",po::value<string>(), "username" )
- ("password,p", new PasswordValue( &_password ), "password" )
- ;
-
+ ("host,h",po::value<string>(), "mongo host to connect to (\"left,right\" for pairs)" )
+ ("port",po::value<string>(), "server port. Can also use --host hostname:port" )
+ ("ipv6", "enable IPv6 support (disabled by default)")
+
+ ("username,u",po::value<string>(), "username" )
+ ("password,p", new PasswordValue( &_password ), "password" )
+ ;
+
if ( access & LOCAL_SERVER )
_options->add_options()
- ("dbpath",po::value<string>(), "directly access mongod database "
- "files in the given path, instead of connecting to a mongod "
- "server - needs to lock the data directory, so cannot be "
- "used if a mongod is currently accessing the same path" )
- ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
- ;
-
+ ("dbpath",po::value<string>(), "directly access mongod database "
+ "files in the given path, instead of connecting to a mongod "
+ "server - needs to lock the data directory, so cannot be "
+ "used if a mongod is currently accessing the same path" )
+ ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ;
+
if ( access & SPECIFY_DBCOL )
_options->add_options()
- ("db,d",po::value<string>(), "database to use" )
- ("collection,c",po::value<string>(), "collection to use (some commands)" )
- ;
+ ("db,d",po::value<string>(), "database to use" )
+ ("collection,c",po::value<string>(), "collection to use (some commands)" )
+ ;
_hidden_options = new po::options_description( name + " hidden options" );
@@ -79,7 +79,7 @@ namespace mongo {
}
}
- Tool::~Tool(){
+ Tool::~Tool() {
delete( _options );
delete( _hidden_options );
if ( _conn )
@@ -92,9 +92,9 @@ namespace mongo {
printExtraHelpAfter(out);
}
- int Tool::main( int argc , char ** argv ){
+ int Tool::main( int argc , char ** argv ) {
static StaticObserver staticObserver;
-
+
cmdLine.prealloc = false;
boost::filesystem::path::default_name_check( boost::filesystem::no_check );
@@ -116,23 +116,24 @@ namespace mongo {
style(command_line_style).run() , _params );
po::notify( _params );
- } catch (po::error &e) {
+ }
+ catch (po::error &e) {
cerr << "ERROR: " << e.what() << endl << endl;
printHelp(cerr);
return EXIT_BADOPTIONS;
}
// hide password from ps output
- for (int i=0; i < (argc-1); ++i){
- if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")){
+ for (int i=0; i < (argc-1); ++i) {
+ if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")) {
char* arg = argv[i+1];
- while (*arg){
+ while (*arg) {
*arg++ = 'x';
}
}
}
- if ( _params.count( "help" ) ){
+ if ( _params.count( "help" ) ) {
printHelp(cout);
return 0;
}
@@ -146,11 +147,11 @@ namespace mongo {
logLevel = s.length();
}
}
-
+
preSetup();
bool useDirectClient = hasParam( "dbpath" );
-
+
if ( ! useDirectClient ) {
_host = "127.0.0.1";
if ( _params.count( "host" ) )
@@ -158,21 +159,21 @@ namespace mongo {
if ( _params.count( "port" ) )
_host += ':' + _params["port"].as<string>();
-
- if ( _noconnection ){
+
+ if ( _noconnection ) {
// do nothing
}
else {
string errmsg;
ConnectionString cs = ConnectionString::parse( _host , errmsg );
- if ( ! cs.isValid() ){
+ if ( ! cs.isValid() ) {
cerr << "invalid hostname [" << _host << "] " << errmsg << endl;
return -1;
}
-
+
_conn = cs.connect( errmsg );
- if ( ! _conn ){
+ if ( ! _conn ) {
cerr << "couldn't connect to [" << _host << "] " << errmsg << endl;
return -1;
}
@@ -194,10 +195,10 @@ namespace mongo {
try {
acquirePathLock();
}
- catch ( DBException& ){
+ catch ( DBException& ) {
cerr << endl << "If you are running a mongod on the same "
- "path you should connect to that instead of direct data "
- "file access" << endl << endl;
+ "path you should connect to that instead of direct data "
+ "file access" << endl << endl;
dbexit( EXIT_CLEAN );
return -1;
}
@@ -215,7 +216,7 @@ namespace mongo {
_username = _params["username"].as<string>();
if ( _params.count( "password" )
- && ( _password.empty() ) ) {
+ && ( _password.empty() ) ) {
_password = askPassword();
}
@@ -226,11 +227,11 @@ namespace mongo {
try {
ret = run();
}
- catch ( DBException& e ){
+ catch ( DBException& e ) {
cerr << "assertion: " << e.toString() << endl;
ret = -1;
}
-
+
if ( currentClient.get() )
currentClient->shutdown();
@@ -239,8 +240,8 @@ namespace mongo {
return ret;
}
- DBClientBase& Tool::conn( bool slaveIfPaired ){
- if ( slaveIfPaired && _conn->type() == ConnectionString::SET ){
+ DBClientBase& Tool::conn( bool slaveIfPaired ) {
+ if ( slaveIfPaired && _conn->type() == ConnectionString::SET ) {
if (!_slaveConn)
_slaveConn = &((DBClientReplicaSet*)_conn)->slaveConn();
return *_slaveConn;
@@ -252,47 +253,47 @@ namespace mongo {
if ( hasParam("dbpath") ) {
return true;
}
-
+
BSONObj info;
bool isMaster;
bool ok = conn().isMaster(isMaster, &info);
-
+
if (ok && !isMaster) {
cerr << "ERROR: trying to write to non-master " << conn().toString() << endl;
cerr << "isMaster info: " << info << endl;
return false;
}
-
+
return true;
}
- void Tool::addFieldOptions(){
+ void Tool::addFieldOptions() {
add_options()
- ("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
- ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
- ;
+ ("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
+ ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
+ ;
}
- void Tool::needFields(){
+ void Tool::needFields() {
- if ( hasParam( "fields" ) ){
+ if ( hasParam( "fields" ) ) {
BSONObjBuilder b;
-
+
string fields_arg = getParam("fields");
pcrecpp::StringPiece input(fields_arg);
-
+
string f;
pcrecpp::RE re("([#\\w\\.\\s\\-]+),?" );
- while ( re.Consume( &input, &f ) ){
+ while ( re.Consume( &input, &f ) ) {
_fields.push_back( f );
b.append( f , 1 );
}
-
+
_fieldsObj = b.obj();
return;
}
- if ( hasParam( "fieldFile" ) ){
+ if ( hasParam( "fieldFile" ) ) {
string fn = getParam( "fieldFile" );
if ( ! exists( fn ) )
throw UserException( 9999 , ((string)"file: " + fn ) + " doesn't exist" );
@@ -302,7 +303,7 @@ namespace mongo {
ifstream file( fn.c_str() );
BSONObjBuilder b;
- while ( file.rdstate() == ios_base::goodbit ){
+ while ( file.rdstate() == ios_base::goodbit ) {
file.getline( line , BUF_SIZE );
const char * cur = line;
while ( isspace( cur[0] ) ) cur++;
@@ -319,7 +320,7 @@ namespace mongo {
throw UserException( 9998 , "you need to specify fields" );
}
- void Tool::auth( string dbname ){
+ void Tool::auth( string dbname ) {
if ( ! dbname.size() )
dbname = _db;
@@ -338,28 +339,28 @@ namespace mongo {
throw UserException( 9997 , (string)"auth failed: " + errmsg );
}
- BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
- : Tool( name , access , "" , "" ) , _objcheck( objcheck ){
-
+ BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
+ : Tool( name , access , "" , "" ) , _objcheck( objcheck ) {
+
add_options()
- ("objcheck" , "validate object before inserting" )
- ("filter" , po::value<string>() , "filter to apply before inserting" )
- ;
+ ("objcheck" , "validate object before inserting" )
+ ("filter" , po::value<string>() , "filter to apply before inserting" )
+ ;
}
- int BSONTool::run(){
+ int BSONTool::run() {
_objcheck = hasParam( "objcheck" );
-
+
if ( hasParam( "filter" ) )
_matcher.reset( new Matcher( fromjson( getParam( "filter" ) ) ) );
-
+
return doRun();
}
- long long BSONTool::processFile( const path& root ){
+ long long BSONTool::processFile( const path& root ) {
_fileName = root.string();
-
+
unsigned long long fileLength = file_size( root );
if ( fileLength == 0 ) {
@@ -369,7 +370,7 @@ namespace mongo {
FILE* file = fopen( _fileName.c_str() , "rb" );
- if ( ! file ){
+ if ( ! file ) {
log() << "error opening file: " << _fileName << endl;
return 0;
}
@@ -393,7 +394,7 @@ namespace mongo {
while ( read < fileLength ) {
int readlen = fread(buf, 4, 1, file);
int size = ((int*)buf)[0];
- if ( size >= BUF_SIZE ){
+ if ( size >= BUF_SIZE ) {
cerr << "got an object of size: " << size << " terminating..." << endl;
}
uassert( 10264 , "invalid object size" , size < BUF_SIZE );
@@ -401,24 +402,24 @@ namespace mongo {
readlen = fread(buf+4, size-4, 1, file);
BSONObj o( buf );
- if ( _objcheck && ! o.valid() ){
+ if ( _objcheck && ! o.valid() ) {
cerr << "INVALID OBJECT - going try and pring out " << endl;
cerr << "size: " << size << endl;
BSONObjIterator i(o);
- while ( i.more() ){
+ while ( i.more() ) {
BSONElement e = i.next();
try {
e.validate();
}
- catch ( ... ){
+ catch ( ... ) {
cerr << "\t\t NEXT ONE IS INVALID" << endl;
}
cerr << "\t name : " << e.fieldName() << " " << e.type() << endl;
cerr << "\t " << e << endl;
}
}
-
- if ( _matcher.get() == 0 || _matcher->matches( o ) ){
+
+ if ( _matcher.get() == 0 || _matcher->matches( o ) ) {
gotObject( o );
processed++;
}
@@ -435,8 +436,8 @@ namespace mongo {
out() << "\t " << processed << " objects processed" << endl;
return processed;
}
-
- void setupSignals( bool inFork ){}
+
+ void setupSignals( bool inFork ) {}
}
diff --git a/tools/tool.h b/tools/tool.h
index 746f09f37d6..f6124b87b6c 100644
--- a/tools/tool.h
+++ b/tools/tool.h
@@ -36,45 +36,45 @@ namespace mongo {
class Tool {
public:
enum DBAccess {
- NONE = 0 ,
- REMOTE_SERVER = 1 << 1 ,
- LOCAL_SERVER = 1 << 2 ,
+ NONE = 0 ,
+ REMOTE_SERVER = 1 << 1 ,
+ LOCAL_SERVER = 1 << 2 ,
SPECIFY_DBCOL = 1 << 3 ,
ALL = REMOTE_SERVER | LOCAL_SERVER | SPECIFY_DBCOL
};
- Tool( string name , DBAccess access=ALL, string defaultDB="test" ,
+ Tool( string name , DBAccess access=ALL, string defaultDB="test" ,
string defaultCollection="", bool usesstdout=true);
virtual ~Tool();
int main( int argc , char ** argv );
- boost::program_options::options_description_easy_init add_options(){
+ boost::program_options::options_description_easy_init add_options() {
return _options->add_options();
}
- boost::program_options::options_description_easy_init add_hidden_options(){
+ boost::program_options::options_description_easy_init add_hidden_options() {
return _hidden_options->add_options();
}
- void addPositionArg( const char * name , int pos ){
+ void addPositionArg( const char * name , int pos ) {
_positonalOptions.add( name , pos );
}
- string getParam( string name , string def="" ){
+ string getParam( string name , string def="" ) {
if ( _params.count( name ) )
return _params[name.c_str()].as<string>();
return def;
}
- int getParam( string name , int def ){
+ int getParam( string name , int def ) {
if ( _params.count( name ) )
return _params[name.c_str()].as<int>();
return def;
}
- bool hasParam( string name ){
+ bool hasParam( string name ) {
return _params.count( name );
}
- string getNS(){
- if ( _coll.size() == 0 ){
+ string getNS() {
+ if ( _coll.size() == 0 ) {
cerr << "no collection specified!" << endl;
throw -1;
}
@@ -82,21 +82,21 @@ namespace mongo {
}
bool isMaster();
-
- virtual void preSetup(){}
+
+ virtual void preSetup() {}
virtual int run() = 0;
virtual void printHelp(ostream &out);
- virtual void printExtraHelp( ostream & out ){}
- virtual void printExtraHelpAfter( ostream & out ){}
+ virtual void printExtraHelp( ostream & out ) {}
+ virtual void printExtraHelpAfter( ostream & out ) {}
protected:
mongo::DBClientBase &conn( bool slaveIfPaired = false );
void auth( string db = "" );
-
+
string _name;
string _db;
@@ -105,18 +105,18 @@ namespace mongo {
string _username;
string _password;
-
+
bool _usesstdout;
bool _noconnection;
bool _autoreconnect;
void addFieldOptions();
void needFields();
-
+
vector<string> _fields;
BSONObj _fieldsObj;
-
+
string _host;
protected:
@@ -136,17 +136,17 @@ namespace mongo {
class BSONTool : public Tool {
bool _objcheck;
auto_ptr<Matcher> _matcher;
-
+
public:
BSONTool( const char * name , DBAccess access=ALL, bool objcheck = false );
-
+
virtual int doRun() = 0;
virtual void gotObject( const BSONObj& obj ) = 0;
-
+
virtual int run();
long long processFile( const path& file );
-
+
};
}