diff options
-rw-r--r-- | src/mongo/client/dbclient.cpp | 89 | ||||
-rw-r--r-- | src/mongo/client/dbclientinterface.h | 8 | ||||
-rw-r--r-- | src/mongo/db/cloner.cpp | 74 | ||||
-rw-r--r-- | src/mongo/db/repl/master_slave.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/repl/rs_rollback.cpp | 7 | ||||
-rw-r--r-- | src/mongo/s/commands_admin.cpp | 11 | ||||
-rw-r--r-- | src/mongo/s/d_migrate.cpp | 22 | ||||
-rw-r--r-- | src/mongo/shell/db.js | 60 |
8 files changed, 203 insertions, 76 deletions
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp index 4c114078d7c..d45ce2c8389 100644 --- a/src/mongo/client/dbclient.cpp +++ b/src/mongo/client/dbclient.cpp @@ -850,28 +850,95 @@ namespace mongo { return names; } - list<string> DBClientWithCommands::getCollectionNames( const string& db ) { + list<BSONObj> infos = getCollectionInfos( db ); list<string> names; + for ( list<BSONObj>::iterator it = infos.begin(); it != infos.end(); ++it ) { + names.push_back( db + "." + (*it)["name"].valuestr() ); + } + return names; + } + + list<BSONObj> DBClientWithCommands::getCollectionInfos( const string& db, + const BSONObj& filter ) { + list<BSONObj> infos; + + // first we're going to try the command + // it was only added in 3.0, so if we're talking to an older server + // we'll fail back to querying system.namespaces + // TODO(spencer): remove fallback behavior after 3.0 + + { + BSONObj res; + if (runCommand(db, + BSON("listCollections" << 1 << "filter" << filter + << "cursor" << BSONObj()), + res, + QueryOption_SlaveOk)) { + BSONObj cursorObj = res["cursor"].Obj(); + BSONObj collections = cursorObj["firstBatch"].Obj(); + BSONObjIterator it( collections ); + while ( it.more() ) { + BSONElement e = it.next(); + infos.push_back( e.Obj().getOwned() ); + } + + const long long id = cursorObj["id"].Long(); + + if ( id != 0 ) { + const std::string ns = cursorObj["ns"].String(); + auto_ptr<DBClientCursor> cursor = getMore(ns, id, 0, 0); + while ( cursor->more() ) { + infos.push_back(cursor->nextSafe().getOwned()); + } + } + + return infos; + } + + // command failed + + int code = res["code"].numberInt(); + string errmsg = res["errmsg"].valuestrsafe(); + if ( code == ErrorCodes::CommandNotFound || + errmsg.find( "no such cmd" ) != string::npos ) { + // old version of server, ok, fall through to old code + } + else { + uasserted( 18630, str::stream() << "listCollections failed: " << res ); + } + + } + + // SERVER-14951 filter for old version fallback needs to db qualify the 'name' element + BSONObjBuilder fallbackFilter; + if ( filter.hasField( "name" ) && filter["name"].type() == String ) { + fallbackFilter.append( "name", db + "." + filter["name"].str() ); + } + fallbackFilter.appendElementsUnique( filter ); string ns = db + ".system.namespaces"; - auto_ptr<DBClientCursor> c = query( ns.c_str() , BSONObj() ); + auto_ptr<DBClientCursor> c = query( + ns.c_str(), fallbackFilter.obj(), 0, 0, 0, QueryOption_SlaveOk); while ( c->more() ) { - string name = c->nextSafe()["name"].valuestr(); - if ( name.find( "$" ) != string::npos ) + BSONObj obj = c->nextSafe(); + string ns = obj["name"].valuestr(); + if ( ns.find( "$" ) != string::npos ) continue; - names.push_back( name ); + BSONObjBuilder b; + b.append( "name", ns.substr( db.size() + 1 ) ); + b.appendElementsUnique( obj ); + infos.push_back( b.obj() ); } - return names; + + return infos; } bool DBClientWithCommands::exists( const string& ns ) { - - string db = nsGetDB( ns ) + ".system.namespaces"; - BSONObj q = BSON( "name" << ns ); - return count( db.c_str() , q, QueryOption_SlaveOk ) != 0; + BSONObj filter = BSON( "name" << nsToCollectionSubstring( ns ) ); + list<BSONObj> results = getCollectionInfos( nsToDatabase( ns ), filter ); + return !results.empty(); } - /* --- dbclientconnection --- */ void DBClientConnection::_auth(const BSONObj& params) { diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h index 6f05fce5637..d0555e341f5 100644 --- a/src/mongo/client/dbclientinterface.h +++ b/src/mongo/client/dbclientinterface.h @@ -937,6 +937,14 @@ namespace mongo { */ list<string> getCollectionNames( const string& db ); + /** + * { name : "<short collection name>", + * options : { } + * } + */ + std::list<BSONObj> getCollectionInfos( const std::string& db, + const BSONObj& filter = BSONObj() ); + bool exists( const string& ns ); /** Create an index if it does not already exist. diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index a4c0ab9f873..fa2e9dedafb 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -316,14 +316,22 @@ namespace mongo { bool mayYield, bool mayBeInterrupted, bool copyIndexes, bool logForRepl) { + const NamespaceString nss(ns); + const string dbname = nss.db().toString(); + Client::WriteContext ctx(ns); // config - string temp = ctx.ctx().db()->name() + ".system.namespaces"; - BSONObj config = _conn->findOne(temp , BSON("name" << ns)); - if (config["options"].isABSONObj()) - if (!userCreateNS(ns.c_str(), config["options"].Obj(), errmsg, logForRepl, 0)) - return false; + BSONObj filter = BSON("name" << nss.coll().toString()); + list<BSONObj> collList = _conn->getCollectionInfos( dbname, filter); + if (!collList.empty()) { + invariant(collList.size() <= 1); + BSONObj col = collList.front(); + if (col["options"].isABSONObj()) { + if (!userCreateNS(ns.c_str(), col["options"].Obj(), errmsg, logForRepl, 0)) + return false; + } + } // main data copy(ctx.ctx(), @@ -336,7 +344,7 @@ namespace mongo { } // indexes - temp = ctx.ctx().db()->name() + ".system.indexes"; + std::string temp = ctx.ctx().db()->name() + ".system.indexes"; copy(ctx.ctx(), temp.c_str(), temp.c_str(), true, logForRepl, false, true, mayYield, mayBeInterrupted, BSON( "ns" << ns )); @@ -388,8 +396,6 @@ namespace mongo { } } - string systemNamespacesNS = opts.fromDB + ".system.namespaces"; - list<BSONObj> toClone; if ( clonedColls ) clonedColls->clear(); if ( opts.syncData ) { @@ -399,24 +405,10 @@ namespace mongo { mayInterrupt( opts.mayBeInterrupted ); dbtempreleaseif r( opts.mayYield ); - // just using exhaust for collection copying right now - - // todo: if snapshot (bool param to this func) is true, we need to snapshot this query? - // only would be relevant if a thousands of collections -- maybe even then it is hard - // to exceed a single cursor batch. - // for repl it is probably ok as we apply oplog section after the clone (i.e. repl - // doesnt not use snapshot=true). - auto_ptr<DBClientCursor> cursor = _conn->query(systemNamespacesNS, BSONObj(), 0, 0, 0, - opts.slaveOk ? QueryOption_SlaveOk : 0); + list<BSONObj> raw = _conn->getCollectionInfos( opts.fromDB ); + for ( list<BSONObj>::iterator it = raw.begin(); it != raw.end(); ++it ) { + BSONObj collection = *it; - if (!validateQueryResults(cursor, errCode, errmsg)) { - errmsg = str::stream() << "index query on ns " << systemNamespacesNS - << " failed: " << errmsg; - return false; - } - - while ( cursor->more() ) { - BSONObj collection = cursor->next(); LOG(2) << "\t cloner got " << collection << endl; @@ -437,30 +429,34 @@ namespace mongo { } verify( !e.eoo() ); verify( e.type() == String ); - const char *from_name = e.valuestr(); - if( strstr(from_name, ".system.") ) { + const NamespaceString ns(opts.fromDB, e.valuestr()); + + if( ns.isSystem() ) { /* system.users and s.js is cloned -- but nothing else from system. * system.indexes is handled specially at the end*/ - if( legalClientSystemNS( from_name , true ) == 0 ) { + if( legalClientSystemNS( ns.ns() , true ) == 0 ) { LOG(2) << "\t\t not cloning because system collection" << endl; continue; } } - if( ! NamespaceString::normal( from_name ) ) { + if( !ns.isNormal() ) { LOG(2) << "\t\t not cloning because has $ " << endl; continue; } - if( opts.collsToIgnore.find( string( from_name ) ) != opts.collsToIgnore.end() ){ - LOG(2) << "\t\t ignoring collection " << from_name << endl; + if( opts.collsToIgnore.find( ns.ns() ) != opts.collsToIgnore.end() ){ + LOG(2) << "\t\t ignoring collection " << ns << endl; continue; } else { - LOG(2) << "\t\t not ignoring collection " << from_name << endl; + LOG(2) << "\t\t not ignoring collection " << ns << endl; + } + + if (clonedColls) { + clonedColls->insert(ns.ns()); } - if ( clonedColls ) clonedColls->insert( from_name ); toClone.push_back( collection.getOwned() ); } } @@ -472,14 +468,10 @@ namespace mongo { } BSONObj collection = *i; LOG(2) << " really will clone: " << collection << endl; - const char * from_name = collection["name"].valuestr(); BSONObj options = collection.getObjectField("options"); - /* change name "<fromdb>.collection" -> <todb>.collection */ - const char *p = strchr(from_name, '.'); - verify(p); - string to_name = todb + p; - + string to_name = todb + "." + collection["name"].valuestr(); + string from_name = opts.fromDB + "." + collection["name"].valuestr(); { string err; const char *toname = to_name.c_str(); @@ -496,8 +488,8 @@ namespace mongo { Query q; if( opts.snapshot ) q.snapshot(); - copy(context,from_name, to_name.c_str(), false, opts.logForRepl, masterSameProcess, - opts.slaveOk, opts.mayYield, opts.mayBeInterrupted, q); + copy(context, from_name.c_str(), to_name.c_str(), false, opts.logForRepl, + masterSameProcess, opts.slaveOk, opts.mayYield, opts.mayBeInterrupted, q); { /* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 5330b1d8963..6b715c9f644 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -286,11 +286,9 @@ namespace mongo { ReplSource tmp(obj); if ( tmp.syncedTo.isNull() ) { DBDirectClient c; - if ( c.exists( "local.oplog.$main" ) ) { - BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) ); - if ( !op.isEmpty() ) { - tmp.syncedTo = op[ "ts" ].date(); - } + BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) ); + if ( !op.isEmpty() ) { + tmp.syncedTo = op[ "ts" ].date(); } } addSourceToList(v, tmp, old); diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index b0a7ba88d75..00a178dd8d0 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -552,9 +552,10 @@ namespace mongo { // did we just empty the collection? if so let's check if it even exists on the source. if( collection->numRecords() == 0 ) { try { - string sys = cc().database()->name() + ".system.namespaces"; - bo o = them->findOne(sys, QUERY("name"<<d.ns)); - if( o.isEmpty() ) { + std::list<BSONObj> lst = + them->getCollectionInfos( cc().database()->name(), + BSON( "name" << nsToCollectionSubstring( d.ns ) ) ); + if (lst.empty()) { // we should drop try { cc().database()->dropCollection(d.ns); diff --git a/src/mongo/s/commands_admin.cpp b/src/mongo/s/commands_admin.cpp index 761398c2bea..08ae02bf813 100644 --- a/src/mongo/s/commands_admin.cpp +++ b/src/mongo/s/commands_admin.cpp @@ -532,8 +532,15 @@ namespace mongo { ScopedDbConnection conn(config->getPrimary().getConnString()); //check that collection is not capped - BSONObj res = conn->findOne( config->getName() + ".system.namespaces", - BSON( "name" << ns ) ); + BSONObj res; + { + std::list<BSONObj> all = conn->getCollectionInfos( config->getName(), + BSON( "name" << nsToCollectionSubstring( ns ) ) ); + if ( !all.empty() ) { + res = all.front().getOwned(); + } + } + if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) { errmsg = "can't shard capped collection"; diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp index 2c8cea21e79..2c79a6243c4 100644 --- a/src/mongo/s/d_migrate.cpp +++ b/src/mongo/s/d_migrate.cpp @@ -1637,16 +1637,20 @@ namespace mongo { Collection* collection = db->getCollection( ns ); if ( !collection ) { - string system_namespaces = nsToDatabase(ns) + ".system.namespaces"; - BSONObj entry = conn->findOne( system_namespaces, BSON( "name" << ns ) ); - if ( entry["options"].isABSONObj() ) { - string errmsg; - if ( ! userCreateNS( ns.c_str(), entry["options"].Obj(), errmsg, true, 0 ) ) - warning() << "failed to create collection with options: " << errmsg - << endl; + list<BSONObj> infos = + conn->getCollectionInfos(nsToDatabase(ns), + BSON("name" << nsToCollectionSubstring(ns))); + + BSONObj options; + if (infos.size() > 0) { + BSONObj entry = infos.front(); + if (entry["options"].isABSONObj()) { + options = entry["options"].Obj(); + } } - else { - db->createCollection( ns ); + if ( ! userCreateNS( ns.c_str(), options, errmsg, true, 0 ) ) { + warning() << "failed to create collection [" << ns << "] " + << " with options " << options << ": " << errmsg; } } } diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js index e9572e7d8cb..b4512683a30 100644 --- a/src/mongo/shell/db.js +++ b/src/mongo/shell/db.js @@ -277,6 +277,7 @@ DB.prototype.help = function() { print("\tdb.fsyncLock() flush data to disk and lock server for backups"); print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()"); print("\tdb.getCollection(cname) same as db['cname'] or db.cname"); + print("\tdb.getCollectionInfos()"); print("\tdb.getCollectionNames()"); print("\tdb.getLastError() - just returns the err msg string"); print("\tdb.getLastErrorObj() - return full status object"); @@ -547,22 +548,71 @@ DB.prototype.getPrevError = function(){ return this.runCommand( { getpreverror : 1 } ); } -DB.prototype.getCollectionNames = function(){ +DB.prototype._getCollectionInfosSystemNamespaces = function(){ var all = []; var nsLength = this._name.length + 1; var c = this.getCollection( "system.namespaces" ).find(); while ( c.hasNext() ){ - var name = c.next().name; + var infoObj = c.next(); - if ( name.indexOf( "$" ) >= 0 && name.indexOf( ".oplog.$" ) < 0 ) + if ( infoObj.name.indexOf( "$" ) >= 0 && infoObj.name.indexOf( ".oplog.$" ) < 0 ) continue; - all.push( name.substring( nsLength ) ); + infoObj.name = infoObj.name.substring( nsLength ); + + all.push( infoObj ); } - return all.sort(); + // Return list of objects sorted by collection name. + return all.sort(function(coll1, coll2) { return coll1.name.localeCompare(coll2.name); }); +} + + +DB.prototype._getCollectionInfosCommand = function() { + try { + var res = this.runCommand( "listCollections" ); + } + catch (e) { + // command doesn't exist, very old mongod + return null; + } + + if ( res.code == 59 ) { + // command doesn't exist, old mongod + return null; + } + + if ( !res.ok ) { + if ( res.errmsg && res.errmsg.startsWith( "no such cmd" ) ) { + return null; + } + + throw Error( "listCollections failed: " + tojson( res ) ); + } + + // The listCollections command returns its results sorted by collection name. There's no need + // to re-sort. + return new DBCommandCursor(this._mongo, res).toArray(); +} + +/** + * Returns this database's list of collection metadata objects, sorted by collection name. + */ +DB.prototype.getCollectionInfos = function() { + var res = this._getCollectionInfosCommand(); + if ( res ) { + return res; + } + return this._getCollectionInfosSystemNamespaces(); +} + +/** + * Returns this database's list of collection names in sorted order. + */ +DB.prototype.getCollectionNames = function() { + return this.getCollectionInfos().map(function(infoObj) { return infoObj.name; }); } DB.prototype.tojson = function(){ |