diff options
35 files changed, 1012 insertions, 769 deletions
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js index f445fafb34e..9fbccff0ee6 100644 --- a/jstests/sharding/drop_configdb.js +++ b/jstests/sharding/drop_configdb.js @@ -7,20 +7,17 @@ var config = st._configServers[0].getDB('config'); // Try to drop config db via configsvr print ( "1: Try to drop config database via configsvr" ) -config.dropDatabase() - -print ( "2: Ensure it wasn't dropped" ) -assert.eq( 1, config.databases.find({ _id : "admin", partitioned : false, primary : "config"}).toArray().length ) - +assert.eq(0, config.dropDatabase().ok); +assert.eq("Cannot drop 'config' database if mongod started with --configsvr", + config.dropDatabase().errmsg); // Try to drop config db via mongos - var config = mongos.getDB( "config" ) print ( "1: Try to drop config database via mongos" ) -config.dropDatabase() +assert.eq(0, config.dropDatabase().ok); -print ( "2: Ensure it wasn't dropped" ) -assert.eq( 1, config.databases.find({ _id : "admin", partitioned : false, primary : "config"}).toArray().length ) +// 20 = ErrorCodes::IllegalOperation +assert.eq(20, config.dropDatabase().code); -st.stop();
\ No newline at end of file +st.stop(); diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js index c73f73427c6..c5fda358840 100644 --- a/jstests/sharding/moveprimary_ignore_sharded.js +++ b/jstests/sharding/moveprimary_ignore_sharded.js @@ -49,7 +49,7 @@ st.printShardingStatus(); jsTest.log( "Running movePrimary for foo through mongosA ..." ) // MongosA should already know about all the collection states -printjson( adminA.runCommand({ movePrimary : "foo", to : fooOtherShard._id }) ) +printjson( adminA.runCommand({ movePrimary : "foo", to : fooOtherShard._id }) ); // All collections still correctly sharded / unsharded assert.neq( null, mongosA.getCollection("foo.coll0").findOne() ); @@ -76,6 +76,11 @@ assert.eq( 1, realCollectionCount( new Mongo( fooOtherShard.host ).getDB( "foo" jsTest.log( "Running movePrimary for bar through mongosB ..." ); printjson( adminB.runCommand({ movePrimary : "bar", to : barOtherShard._id }) ); +// We need to flush the cluster config on mongosA, so it can discover that database 'bar' got +// moved. Otherwise since the collections are not sharded, we have no way of discovering this. +// See SERVER-8059. +adminA.runCommand({ flushRouterConfig : 1 }); + // All collections still correctly sharded / unsharded assert.neq( null, mongosA.getCollection("bar.coll0").findOne() ); assert.neq( null, mongosA.getCollection("bar.coll1").findOne() ); diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js new file mode 100644 index 00000000000..d07280fc0d3 --- /dev/null +++ b/jstests/sharding/read_does_not_create_namespaces.js @@ -0,0 +1,12 @@ +// This test ensures that just attempting to read from a non-existent database or collection won't
+// cause entries to be created in the catalog.
+var shardingTest = new ShardingTest('read_does_not_create_namespaces', 1);
+var db = shardingTest.getDB('NonExistentDB');
+
+assert.isnull(db.nonExistentColl.findOne({}));
+
+// Neither the database nor the collection should have been created
+assert.isnull(shardingTest.getDB('config').databases.findOne({ _id: 'NonExistentDB' }));
+assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
+
+shardingTest.stop();
\ No newline at end of file diff --git a/jstests/slow2/sharding_jscore_passthrough.js b/jstests/slow2/sharding_jscore_passthrough.js index 3d257e5f0ef..209cd3dd504 100644 --- a/jstests/slow2/sharding_jscore_passthrough.js +++ b/jstests/slow2/sharding_jscore_passthrough.js @@ -72,6 +72,7 @@ files.forEach(function(x) { 'profile\\d*|' + 'dbhash|' + 'dbhash2|' + + 'explain_missing_database|' + 'median|' + 'evalb|' + 'evald|' + diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err index 9ff2ab7171e..65d3581c7ce 100644 --- a/src/mongo/base/error_codes.err +++ b/src/mongo/base/error_codes.err @@ -117,6 +117,7 @@ error_code("InitialSyncOplogSourceMissing", 114) error_code("CommandNotSupported", 115) error_code("DocTooLargeForCapped", 116) error_code("ConflictingOperationInProgress", 117) +error_code("NamespaceNotSharded", 118) # Non-sequential error codes (for compatibility only) error_code("NotMaster", 10107) #this comes from assert_util.h diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp index 6a9659bb511..caaada1cf82 100644 --- a/src/mongo/client/parallel.cpp +++ b/src/mongo/client/parallel.cpp @@ -42,6 +42,7 @@ #include "mongo/client/dbclient_rs.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/db/query/lite_parsed_query.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" @@ -520,25 +521,31 @@ namespace mongo { forceReload = tries > 2; } - void ParallelSortClusteredCursor::_handleStaleNS( const NamespaceString& staleNS, bool forceReload, bool fullReload ){ + void ParallelSortClusteredCursor::_handleStaleNS(const NamespaceString& staleNS, + bool forceReload, + bool fullReload) { - DBConfigPtr config = grid.getDBConfig( staleNS.db() ); + auto status = grid.catalogCache()->getDatabase(staleNS.db().toString()); + if (!status.isOK()) { + warning() << "cannot reload database info for stale namespace " << staleNS; + return; + } + + shared_ptr<DBConfig> config = status.getValue(); // Reload db if needed, make sure it works - if( config && fullReload && ! config->reload() ){ - // We didn't find the db after the reload, the db may have been dropped, - // reset this ptr + if (fullReload && !config->reload()) { + // We didn't find the db after reload, the db may have been dropped, reset this ptr config.reset(); } - if( ! config ){ - warning() << "cannot reload database info for stale namespace " << staleNS << endl; + if (!config) { + warning() << "cannot reload database info for stale namespace " << staleNS; } else { // Reload chunk manager, potentially forcing the namespace - config->getChunkManagerIfExists( staleNS, true, forceReload ); + config->getChunkManagerIfExists(staleNS, true, forceReload); } - } void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk( @@ -631,12 +638,11 @@ namespace mongo { } void ParallelSortClusteredCursor::startInit() { + const bool returnPartial = (_qSpec.options() & QueryOption_PartialResults); + const NamespaceString ns(!_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns()); - const bool returnPartial = ( _qSpec.options() & QueryOption_PartialResults ); - NamespaceString ns( !_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns() ); - - ChunkManagerPtr manager; - ShardPtr primary; + shared_ptr<ChunkManager> manager; + shared_ptr<Shard> primary; string prefix; if (MONGO_unlikely(shouldLog(pc))) { @@ -649,35 +655,40 @@ namespace mongo { } LOG( pc ) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl; - set<Shard> todoStorage; - set<Shard>& todo = todoStorage; + set<Shard> shardsSet; string vinfo; - DBConfigPtr config = grid.getDBConfig( ns.db() ); // Gets or loads the config - uassert( 15989, "database not found for parallel cursor request", config ); + { + shared_ptr<DBConfig> config; - // Try to get either the chunk manager or the primary shard - config->getChunkManagerOrPrimary( ns, manager, primary ); + auto status = grid.catalogCache()->getDatabase(ns.db().toString()); + if (status.isOK()) { + config = status.getValue(); + config->getChunkManagerOrPrimary(ns, manager, primary); + } + } - if (MONGO_unlikely(shouldLog(pc))) { - if (manager) { + if (manager) { + if (MONGO_unlikely(shouldLog(pc))) { vinfo = str::stream() << "[" << manager->getns() << " @ " - << manager->getVersion().toString() << "]"; - } - else { - vinfo = str::stream() << "[unsharded @ " - << primary->toString() << "]"; + << manager->getVersion().toString() << "]"; } + + manager->getShardsForQuery(shardsSet, + !_cInfo.isEmpty() ? _cInfo.cmdFilter : _qSpec.filter()); } + else if (primary) { + if (MONGO_unlikely(shouldLog(pc))) { + vinfo = str::stream() << "[unsharded @ " << primary->toString() << "]"; + } - if( manager ) manager->getShardsForQuery( todo, !_cInfo.isEmpty() ? _cInfo.cmdFilter : _qSpec.filter() ); - else if( primary ) todo.insert( *primary ); + shardsSet.insert(*primary); + } // Close all cursors on extra shards first, as these will be invalid for (map<Shard, PCMData>::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i) { - if (todo.find(i->first) == todo.end()) { - + if (shardsSet.find(i->first) == shardsSet.end()) { LOG( pc ) << "closing cursor on shard " << i->first << " as the connection is no longer required by " << vinfo << endl; @@ -685,29 +696,24 @@ namespace mongo { } } - verify( todo.size() ); - - LOG( pc ) << "initializing over " << todo.size() - << " shards required by " << vinfo << endl; + LOG(pc) << "initializing over " << shardsSet.size() + << " shards required by " << vinfo; // Don't retry indefinitely for whatever reason _totalTries++; uassert( 15986, "too many retries in total", _totalTries < 10 ); - for( set<Shard>::iterator i = todo.begin(), end = todo.end(); i != end; ++i ){ - + for (set<Shard>::iterator i = shardsSet.begin(), end = shardsSet.end(); i != end; ++i) { const Shard& shard = *i; PCMData& mdata = _cursorMap[ shard ]; LOG( pc ) << "initializing on shard " << shard - << ", current connection state is " << mdata.toBSON() << endl; + << ", current connection state is " << mdata.toBSON() << endl; // This may be the first time connecting to this shard, if so we can get an error here try { - - if( mdata.initialized ){ - - verify( mdata.pcState ); + if (mdata.initialized) { + invariant(mdata.pcState); PCStatePtr state = mdata.pcState; @@ -759,7 +765,7 @@ namespace mongo { // shard version must have changed on the single shard between queries. // - if (todo.size() > 1) { + if (shardsSet.size() > 1) { // Query limits split for multiple shards @@ -895,24 +901,39 @@ namespace mongo { } // Sanity check final init'ed connections - for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){ - + for (map<Shard, PCMData>::iterator i = _cursorMap.begin(), end = _cursorMap.end(); + i != end; + ++i) { const Shard& shard = i->first; PCMData& mdata = i->second; - if( ! mdata.pcState ) continue; + if (!mdata.pcState) { + continue; + } // Make sure all state is in shards - verify( todo.find( shard ) != todo.end() ); - verify( mdata.initialized == true ); - if( ! mdata.completed ) verify( mdata.pcState->conn->ok() ); - verify( mdata.pcState->cursor ); - verify( mdata.pcState->primary || mdata.pcState->manager ); - verify( ! mdata.retryNext ); + invariant(shardsSet.find(shard) != shardsSet.end()); + invariant(mdata.initialized == true); + + if (!mdata.completed) { + invariant(mdata.pcState->conn->ok()); + } - if( mdata.completed ) verify( mdata.finished ); - if( mdata.finished ) verify( mdata.initialized ); - if( ! returnPartial ) verify( mdata.initialized ); + invariant(mdata.pcState->cursor); + invariant(mdata.pcState->primary || mdata.pcState->manager); + invariant(!mdata.retryNext); + + if (mdata.completed) { + invariant(mdata.finished); + } + + if (mdata.finished) { + invariant(mdata.initialized); + } + + if (!returnPartial) { + invariant(mdata.initialized); + } } } diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp index 4e9a204cbf8..489b4ce51c8 100644 --- a/src/mongo/db/auth/authz_manager_external_state_s.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp @@ -32,8 +32,9 @@ #include "mongo/db/auth/authz_manager_external_state_s.h" -#include <boost/thread/mutex.hpp> #include <boost/scoped_ptr.hpp> +#include <boost/shared_ptr.hpp> +#include <boost/thread/mutex.hpp> #include <string> #include "mongo/client/dbclientinterface.h" @@ -42,6 +43,7 @@ #include "mongo/db/auth/authz_session_external_state_s.h" #include "mongo/db/auth/user_name.h" #include "mongo/db/jsobj.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/config.h" #include "mongo/s/distlock.h" @@ -55,20 +57,18 @@ namespace mongo { using boost::scoped_ptr; + using boost::shared_ptr; using std::endl; using std::vector; namespace { - ScopedDbConnection* getConnectionForAuthzCollection(const NamespaceString& ns) { - // + ScopedDbConnection* getConnectionForAuthzCollection(const NamespaceString& nss) { // Note: The connection mechanism here is *not* ideal, and should not be used elsewhere. // If the primary for the collection moves, this approach may throw rather than handle // version exceptions. - // - - DBConfigPtr config = grid.getDBConfig(ns.ns()); - Shard s = config->getShard(ns.ns()); + auto config = uassertStatusOK(grid.catalogCache()->getDatabase(nss.db().toString())); + Shard s = config->getShard(nss.ns()); return new ScopedDbConnection(s.getConnString(), 30.0); } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index b107808ab86..796ea3fb066 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -57,6 +57,7 @@ #include "mongo/db/range_preserver.h" #include "mongo/db/repl/replication_coordinator_global.h" #include "mongo/db/storage_options.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/collection_metadata.h" #include "mongo/s/d_state.h" @@ -70,6 +71,7 @@ namespace mongo { using boost::scoped_ptr; + using boost::shared_ptr; using std::auto_ptr; using std::endl; using std::set; @@ -1602,25 +1604,28 @@ namespace mongo { result.append( "result" , config.outputOptions.collectionName ); } - // fetch result from other shards 1 chunk at a time - // it would be better to do just one big $or query, but then the sorting would not be efficient - string shardName = shardingState.getShardName(); - DBConfigPtr confOut = grid.getDBConfig( dbname , false ); - - if (!confOut) { - log() << "Sharding metadata for output database: " << dbname - << " does not exist"; - return false; + auto status = grid.catalogCache()->getDatabase(dbname); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } + shared_ptr<DBConfig> confOut = status.getValue(); + vector<ChunkPtr> chunks; if ( confOut->isSharded(config.outputOptions.finalNamespace) ) { ChunkManagerPtr cm = confOut->getChunkManager( config.outputOptions.finalNamespace); + + // Fetch result from other shards 1 chunk at a time. It would be better to do + // just one big $or query, but then the sorting would not be efficient. + const string shardName = shardingState.getShardName(); const ChunkMap& chunkMap = cm->getChunkMap(); + for ( ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it ) { ChunkPtr chunk = it->second; - if (chunk->getShard().getName() == shardName) chunks.push_back(chunk); + if (chunk->getShard().getName() == shardName) { + chunks.push_back(chunk); + } } } diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp index 9a4fbcdbd78..1b556bed8f4 100644 --- a/src/mongo/s/balance.cpp +++ b/src/mongo/s/balance.cpp @@ -40,6 +40,7 @@ #include "mongo/db/server_options.h" #include "mongo/db/write_concern.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" @@ -64,6 +65,7 @@ namespace mongo { using boost::scoped_ptr; + using boost::shared_ptr; using std::auto_ptr; using std::endl; using std::map; @@ -119,10 +121,13 @@ namespace mongo { // at the moment. // TODO: Handle all these things more cleanly, since they're expected problems const CandidateChunk& chunkInfo = *it->get(); + const NamespaceString nss(chunkInfo.ns); + try { + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + fassert(28628, status.getStatus()); - DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns ); - verify( cfg ); + shared_ptr<DBConfig> cfg = status.getValue(); // NOTE: We purposely do not reload metadata here, since _doBalanceRound already // tried to do so once. @@ -470,12 +475,16 @@ namespace mongo { } cursor.reset(); - DBConfigPtr cfg = grid.getDBConfig( ns ); - if ( !cfg ) { - warning() << "could not load db config to balance " << ns << " collection" << endl; + const NamespaceString nss(ns); + auto statusGetDb = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!statusGetDb.isOK()) { + warning() << "could not load db config to balance " << ns + << ", collection: " << statusGetDb.getStatus(); continue; } + shared_ptr<DBConfig> cfg = statusGetDb.getValue(); + // This line reloads the chunk manager once if this process doesn't know the collection // is sharded yet. ChunkManagerPtr cm = cfg->getChunkManagerIfExists( ns, true ); diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript index 50b8e38458a..a2739426de4 100644 --- a/src/mongo/s/catalog/SConscript +++ b/src/mongo/s/catalog/SConscript @@ -19,7 +19,8 @@ env.Library( env.Library( target='catalog_manager', source=[ - 'catalog_manager.cpp' + 'catalog_cache.cpp', + 'catalog_manager.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base/base', diff --git a/src/mongo/s/catalog/catalog_cache.cpp b/src/mongo/s/catalog/catalog_cache.cpp new file mode 100644 index 00000000000..03238143af5 --- /dev/null +++ b/src/mongo/s/catalog/catalog_cache.cpp @@ -0,0 +1,89 @@ +/** + * Copyright (C) 2015 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/s/catalog/catalog_cache.h" + +#include <boost/make_shared.hpp> + +#include "mongo/base/status_with.h" +#include "mongo/s/catalog/catalog_manager.h" +#include "mongo/s/config.h" +#include "mongo/s/type_database.h" + +namespace mongo { + + using boost::shared_ptr; + using std::string; + + + CatalogCache::CatalogCache(CatalogManager* catalogManager) + : _catalogManager(catalogManager) { + + invariant(_catalogManager); + } + + StatusWith<shared_ptr<DBConfig>> CatalogCache::getDatabase(const string& dbName) { + boost::lock_guard<boost::mutex> guard(_mutex); + + ShardedDatabasesMap::iterator it = _databases.find(dbName); + if (it != _databases.end()) { + return it->second; + } + + // Need to load from the store + StatusWith<DatabaseType> status = _catalogManager->getDatabase(dbName); + if (!status.isOK()) { + return status.getStatus(); + } + + shared_ptr<DBConfig> db = boost::make_shared<DBConfig>(dbName, status.getValue()); + db->load(); + + invariant(_databases.insert(std::make_pair(dbName, db)).second); + + return db; + } + + void CatalogCache::invalidate(const string& dbName) { + boost::lock_guard<boost::mutex> guard(_mutex); + + ShardedDatabasesMap::iterator it = _databases.find(dbName); + if (it != _databases.end()) { + _databases.erase(it); + } + } + + void CatalogCache::invalidateAll() { + boost::lock_guard<boost::mutex> guard(_mutex); + + _databases.clear(); + } + +} // namespace mongo diff --git a/src/mongo/s/catalog/catalog_cache.h b/src/mongo/s/catalog/catalog_cache.h new file mode 100644 index 00000000000..8b4936a02d9 --- /dev/null +++ b/src/mongo/s/catalog/catalog_cache.h @@ -0,0 +1,89 @@ +/** + * Copyright (C) 2015 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include <boost/shared_ptr.hpp> +#include <boost/thread/mutex.hpp> +#include <map> +#include <string> + +#include "mongo/base/disallow_copying.h" + +namespace mongo { + + class CatalogManager; + class DBConfig; + template<typename T> class StatusWith; + + + /** + * This is the root of the "read-only" hierarchy of cached catalog metadata. It is read only + * in the sense that it only reads from the persistent store, but never writes to it. Instead + * writes happen thorugh the CatalogManager and the cache hierarchy needs to be invalidated. + */ + class CatalogCache { + MONGO_DISALLOW_COPYING(CatalogCache); + public: + explicit CatalogCache(CatalogManager* catalogManager); + + /** + * Retrieves the cached metadata for the specified database. The returned value is still + * owned by the cache and it should not be cached elsewhere, but instead only used as a + * local variable. The reason for this is so that if the cache gets invalidated, the caller + * does not miss getting the most up-to-date value. + * + * @param dbname The name of the database (must not contain dots, etc). + * @return The database if it exists, NULL otherwise. + */ + StatusWith<boost::shared_ptr<DBConfig>> getDatabase(const std::string& dbName); + + /** + * Removes the database information for the specified name from the cache, so that the + * next time getDatabase is called, it will be reloaded. + */ + void invalidate(const std::string& dbName); + + /** + * Purges all cached database information, which will cause the data to be reloaded again. + */ + void invalidateAll(); + + private: + typedef std::map<std::string, boost::shared_ptr<DBConfig>> ShardedDatabasesMap; + + + // Reference to the catalog manager. Not owned. + CatalogManager* const _catalogManager; + + // Databases catalog map and mutex to protect it + boost::mutex _mutex; + ShardedDatabasesMap _databases; + }; + +} // namespace mongo diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h index feb053f9260..61f11a4d8a7 100644 --- a/src/mongo/s/catalog/catalog_manager.h +++ b/src/mongo/s/catalog/catalog_manager.h @@ -43,6 +43,7 @@ namespace mongo { class ConnectionString; class DatabaseType; class OperationContext; + class Shard; class ShardType; class Status; template<typename T> class StatusWith; @@ -109,13 +110,35 @@ namespace mongo { const std::string& name) = 0; /** + * Creates a new database entry for the specified database name in the configuration + * metadata and sets the specified shard as primary. + * + * @param dbName name of the database (case sensitive) + * @param shard Optional shard to use as primary. If NULL is specified, one will be picked + * by the system. + * + * Returns Status::OK on success or any error code indicating the failure. These are some + * of the known failures: + * - NamespaceExists - database already exists + * - DatabaseDifferCaseCode - database already exists, but with a different case + * - ShardNotFound - could not find a shard to place the DB on + */ + virtual Status createDatabase(const std::string& dbName, const Shard* shard) = 0; + + /** * Updates the metadata for a given database. Currently, if the specified DB entry does * not exist, it will be created. */ virtual Status updateDatabase(const std::string& dbName, const DatabaseType& db) = 0; /** - * Retrieves the metadata for a given database. + * Retrieves the metadata for a given database, if it exists. + * + * @param dbName name of the database (case sensitive) + * + * Returns Status::OK along with the database information or any error code indicating the + * failure. These are some of the known failures: + * - DatabaseNotFound - database does not exist */ virtual StatusWith<DatabaseType> getDatabase(const std::string& dbName) = 0; diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp index 59490cb0e9e..68cc1b5e3b8 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp @@ -362,50 +362,83 @@ namespace { Status CatalogManagerLegacy::enableSharding(const std::string& dbName) { invariant(nsIsDbOnly(dbName)); - try { - DatabaseType db; - - // Check for case sensitivity violations - Status status = _checkDbDoesNotExist(dbName); - if (status.isOK()) { - // Database does not exist, create a new entry - const Shard primary = Shard::pick(); - if (primary.ok()) { - log() << "Placing [" << dbName << "] on: " << primary; - - db.setName(dbName); - db.setPrimary(primary.getName()); - db.setSharded(true); - } - else { - return Status(ErrorCodes::ShardNotFound, "can't find a shard to put new db on"); - } - } - else if (status.code() == ErrorCodes::NamespaceExists) { - // Database exists, so just update it - StatusWith<DatabaseType> dbStatus = getDatabase(dbName); - if (!dbStatus.isOK()) { - return dbStatus.getStatus(); - } - - db = dbStatus.getValue(); + DatabaseType db; + + // Check for case sensitivity violations + Status status = _checkDbDoesNotExist(dbName); + if (status.isOK()) { + // Database does not exist, create a new entry + const Shard primary = Shard::pick(); + if (primary.ok()) { + log() << "Placing [" << dbName << "] on: " << primary; + + db.setName(dbName); + db.setPrimary(primary.getName()); db.setSharded(true); } else { - // Some fatal error - return status; + return Status(ErrorCodes::ShardNotFound, "can't find a shard to put new db on"); + } + } + else if (status.code() == ErrorCodes::NamespaceExists) { + // Database exists, so just update it + StatusWith<DatabaseType> dbStatus = getDatabase(dbName); + if (!dbStatus.isOK()) { + return dbStatus.getStatus(); } - log() << "Enabling sharding for database [" << dbName << "] in config db"; + db = dbStatus.getValue(); + db.setSharded(true); + } + else { + // Some fatal error + return status; + } + + log() << "Enabling sharding for database [" << dbName << "] in config db"; + + return updateDatabase(dbName, db); + } + + Status CatalogManagerLegacy::createDatabase(const std::string& dbName, const Shard* shard) { + invariant(nsIsDbOnly(dbName)); + + // The admin and config databases should never be explicitly created. They "just exist", + // i.e. getDatabase will always return an entry for them. + invariant(dbName != "admin"); + invariant(dbName != "config"); - return updateDatabase(dbName, db); + // Check for case sensitivity violations + Status status = _checkDbDoesNotExist(dbName); + if (!status.isOK()) { + return status; + } + + // Database does not exist, pick a shard and create a new entry + const Shard primaryShard = (shard ? *shard : Shard::pick()); + if (!primaryShard.ok()) { + return Status(ErrorCodes::ShardNotFound, "can't find a shard to put new db on"); + } + + log() << "Placing [" << dbName << "] on: " << primaryShard; + + DatabaseType db; + db.setName(dbName); + db.setPrimary(primaryShard.getName()); + db.setSharded(false); + + BatchedCommandResponse response; + status = insert(DatabaseType::ConfigNS, db.toBSON(), &response); + if (status.isOK()) { + return status; } - catch (DBException& e) { - e.addContext("error creating initial database config information"); - warning() << e.what(); - return e.toStatus(); + if (status.code() == ErrorCodes::DuplicateKey) { + return Status(ErrorCodes::NamespaceExists, "database " + dbName + " already exists"); } + + return Status(status.code(), str::stream() << "database metadata write failed for " + << dbName << ". Error: " << response.toBSON()); } StatusWith<string> CatalogManagerLegacy::addShard(const string& name, @@ -645,13 +678,25 @@ namespace { } StatusWith<DatabaseType> CatalogManagerLegacy::getDatabase(const std::string& dbName) { + invariant(nsIsDbOnly(dbName)); + + // The two databases that are hosted on the config server are config and admin + if (dbName == "config" || dbName == "admin") { + DatabaseType dbt; + dbt.setName(dbName); + dbt.setSharded(false); + dbt.setPrimary("config"); + + return dbt; + } + ScopedDbConnection conn(_configServerConnectionString, 30.0); BSONObj dbObj = conn->findOne(DatabaseType::ConfigNS, BSON(DatabaseType::name(dbName))); if (dbObj.isEmpty()) { conn.done(); return Status(ErrorCodes::DatabaseNotFound, - stream() << "database " << dbName << " not found."); + stream() << "database " << dbName << " not found"); } return DatabaseType::fromBSON(dbObj); diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h index 9f394176dba..68c35f3fb16 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h @@ -59,6 +59,8 @@ namespace mongo { virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn, const std::string& name); + virtual Status createDatabase(const std::string& dbName, const Shard* shard); + virtual Status updateDatabase(const std::string& dbName, const DatabaseType& db); virtual StatusWith<DatabaseType> getDatabase(const std::string& dbName); diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index 1bdff9c1a8e..67fdf59de47 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -38,6 +38,7 @@ #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/chunk_diff.h" #include "mongo/s/client/shard_connection.h" @@ -309,7 +310,11 @@ namespace { } ChunkManagerPtr ChunkManager::reload(bool force) const { - return grid.getDBConfig(getns())->getChunkManager(getns(), force); + const NamespaceString nss(_ns); + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + shared_ptr<DBConfig> config = uassertStatusOK(status); + + return config->getChunkManager(getns(), force); } bool ChunkManager::_isValid(const ChunkMap& chunkMap) { diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp index 553b509a17c..171738f99e3 100644 --- a/src/mongo/s/chunk_manager_targeter.cpp +++ b/src/mongo/s/chunk_manager_targeter.cpp @@ -40,13 +40,13 @@ namespace mongo { + using boost::shared_ptr; + using mongoutils::str::stream; using std::map; using std::set; using std::string; using std::vector; - using mongoutils::str::stream; - namespace { enum UpdateType { @@ -65,25 +65,6 @@ namespace { const int maxWaitMillis = 500; /** - * Helper to get the DBConfigPtr object in an exception-safe way. - */ - bool getDBConfigSafe(StringData db, DBConfigPtr& config, string* errMsg) { - try { - config = grid.getDBConfig(db, true); - if (config) { - return true; - } - - *errMsg = stream() << "could not load or create database " << db; - } - catch (const DBException& ex) { - *errMsg = ex.toString(); - } - - return false; - } - - /** * There are two styles of update expressions: * * Replacement style: coll.update({ x : 1 }, { y : 2 }) @@ -297,14 +278,12 @@ namespace { } Status ChunkManagerTargeter::init() { - DBConfigPtr config; - - string errMsg; - if (!getDBConfigSafe(_nss.db(), config, &errMsg)) { - return Status(ErrorCodes::DatabaseNotFound, errMsg); + auto status = grid.implicitCreateDb(_nss.db().toString()); + if (!status.isOK()) { + return status.getStatus(); } - // Get either the chunk manager or primary shard + shared_ptr<DBConfig> config = status.getValue(); config->getChunkManagerOrPrimary(_nss.ns(), _manager, _primary); return Status::OK(); @@ -668,8 +647,9 @@ namespace { Status ChunkManagerTargeter::refreshIfNeeded( bool *wasChanged ) { bool dummy; - if ( !wasChanged ) + if (!wasChanged) { wasChanged = &dummy; + } *wasChanged = false; @@ -677,7 +657,9 @@ namespace { // Did we have any stale config or targeting errors at all? // - if ( !_needsTargetingRefresh && _remoteShardVersions.empty() ) return Status::OK(); + if (!_needsTargetingRefresh && _remoteShardVersions.empty()) { + return Status::OK(); + } // // Get the latest metadata information from the cache if there were issues @@ -686,14 +668,12 @@ namespace { ChunkManagerPtr lastManager = _manager; ShardPtr lastPrimary = _primary; - DBConfigPtr config; - - string errMsg; - if ( !getDBConfigSafe( _nss.db(), config, &errMsg ) ) { - return Status( ErrorCodes::DatabaseNotFound, errMsg ); + auto status = grid.implicitCreateDb(_nss.db().toString()); + if (!status.isOK()) { + return status.getStatus(); } - // Get either the chunk manager or primary shard + shared_ptr<DBConfig> config = status.getValue(); config->getChunkManagerOrPrimary( _nss.ns(), _manager, _primary ); // We now have the latest metadata from the cache. @@ -757,13 +737,13 @@ namespace { } Status ChunkManagerTargeter::refreshNow( RefreshType refreshType ) { - DBConfigPtr config; - - string errMsg; - if ( !getDBConfigSafe( _nss.db(), config, &errMsg ) ) { - return Status( ErrorCodes::DatabaseNotFound, errMsg ); + auto status = grid.implicitCreateDb(_nss.db().toString()); + if (!status.isOK()) { + return status.getStatus(); } + shared_ptr<DBConfig> config = status.getValue(); + // Try not to spam the configs refreshBackoff(); diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp index e16d977e94b..8145ee80530 100644 --- a/src/mongo/s/cluster_write.cpp +++ b/src/mongo/s/cluster_write.cpp @@ -37,6 +37,7 @@ #include "mongo/base/status.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_manager_targeter.h" @@ -50,6 +51,7 @@ namespace mongo { + using boost::shared_ptr; using std::auto_ptr; using std::vector; using std::map; @@ -111,25 +113,23 @@ namespace mongo { /** * Splits the chunks touched based from the targeter stats if needed. */ - void splitIfNeeded(const string& ns, const TargeterStats& stats) { + void splitIfNeeded(const NamespaceString& nss, const TargeterStats& stats) { if (!Chunk::ShouldAutoSplit) { return; } - DBConfigPtr config; - - try { - config = grid.getDBConfig(ns); - } - catch (const DBException& ex) { - warning() << "failed to get database config for " << ns - << " while checking for auto-split: " << causedBy(ex); + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + warning() << "failed to get database config for " << nss + << " while checking for auto-split: " << status.getStatus(); return; } + shared_ptr<DBConfig> config = status.getValue(); + ChunkManagerPtr chunkManager; ShardPtr dummyShard; - config->getChunkManagerOrPrimary(ns, chunkManager, dummyShard); + config->getChunkManagerOrPrimary(nss, chunkManager, dummyShard); if (!chunkManager) { return; @@ -270,7 +270,7 @@ namespace mongo { exec.executeBatch(request, response); if (_autoSplit) { - splitIfNeeded(request.getNS(), *targeter.getStats()); + splitIfNeeded(request.getNSS(), *targeter.getStats()); } _stats->setShardStats(exec.releaseStats()); diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp index 92644cb1887..e701762c69f 100644 --- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp @@ -30,13 +30,19 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> + #include "mongo/base/status.h" #include "mongo/db/commands.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" #include "mongo/util/log.h" namespace mongo { + + using boost::shared_ptr; + namespace { class DropDatabaseCmd : public Command { @@ -85,43 +91,28 @@ namespace { return 0; } - DBConfigPtr conf = grid.getDBConfig(dbname, false); + // Refresh the database metadata + grid.catalogCache()->invalidate(dbname); - log() << "DROP DATABASE: " << dbname; + auto status = grid.catalogCache()->getDatabase(dbname); + if (!status.isOK()) { + if (status == ErrorCodes::DatabaseNotFound) { + result.append("info", "database does not exist"); + return true; + } - if (!conf) { - result.append("info", "database didn't exist"); - return true; + return appendCommandStatus(result, status.getStatus()); } - // - // Reload the database configuration so that we're sure a database entry exists - // TODO: This won't work with parallel dropping - // + log() << "DROP DATABASE: " << dbname; - grid.removeDBIfExists(*conf); - grid.getDBConfig(dbname); + shared_ptr<DBConfig> conf = status.getValue(); // TODO: Make dropping logic saner and more tolerant of partial drops. This is // particularly important since a database drop can be aborted by *any* collection // with a distributed namespace lock taken (migrates/splits) - // - // Create a copy of the DB config object to drop, so that no one sees a weird - // intermediate version of the info - // - - DBConfig confCopy(conf->name()); - if (!confCopy.load()) { - errmsg = "could not load database info to drop"; - return false; - } - - // Enable sharding so we can correctly retry failed drops - // This will re-drop old sharded entries if they exist - confCopy.enableSharding(false); - - if (!confCopy.dropDatabase(errmsg)) { + if (!conf->dropDatabase(errmsg)) { return false; } diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp index 0f9f0406245..b100d306594 100644 --- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp +++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp @@ -39,6 +39,7 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/client_basic.h" #include "mongo/db/commands.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" @@ -116,12 +117,8 @@ namespace { audit::logEnableSharding(ClientBasic::getCurrent(), dbname); } - // Make sure to update any stale metadata - DBConfigPtr db = grid.getDBConfig(dbname); - if (!db->load()) { - status = Status(ErrorCodes::OperationFailed, - str::stream() << "error loading database info for db: " << dbname); - } + // Make sure to force update of any stale metadata + grid.catalogCache()->invalidate(dbname); return appendCommandStatus(result, status); } diff --git a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp index 439221a1894..0b80c544b26 100644 --- a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp +++ b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp @@ -29,6 +29,7 @@ #include "mongo/platform/basic.h" #include "mongo/db/commands.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/grid.h" namespace mongo { @@ -70,7 +71,8 @@ namespace { BSONObjBuilder& result, bool fromRepl) { - grid.flushConfig(); + grid.catalogCache()->invalidateAll(); + result.appendBool("flushed", true); return true; } diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp index 6137310dd3e..04cff5e0b80 100644 --- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp +++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp @@ -28,16 +28,22 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> + #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" namespace mongo { + + using boost::shared_ptr; + namespace { class GetShardVersion : public Command { @@ -87,19 +93,24 @@ namespace { BSONObjBuilder& result, bool fromRepl) { - const std::string ns = parseNs(dbname, cmdObj); - if (ns.size() == 0) { - errmsg = "need to specify full namespace"; - return false; + const NamespaceString nss(parseNs(dbname, cmdObj)); + if (nss.size() == 0) { + return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace, + "no namespace specified")); } - DBConfigPtr config = grid.getDBConfig(ns); - if (!config->isSharded(ns)) { - errmsg = "ns not sharded."; - return false; + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); + } + + boost::shared_ptr<DBConfig> config = status.getValue(); + if (!config->isSharded(nss.ns())) { + return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded, + "ns [" + nss.ns() + " is not sharded.")); } - ChunkManagerPtr cm = config->getChunkManagerIfExists(ns); + ChunkManagerPtr cm = config->getChunkManagerIfExists(nss.ns()); if (!cm) { errmsg = "no chunk manager?"; return false; diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp index 91dd0fecf2c..126f68a770f 100644 --- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp +++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp @@ -28,7 +28,8 @@ #include "mongo/platform/basic.h" -#include "mongo/base/init.h" +#include <boost/shared_ptr.hpp> + #include "mongo/client/connpool.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" @@ -36,6 +37,7 @@ #include "mongo/db/commands.h" #include "mongo/db/field_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" @@ -43,10 +45,13 @@ namespace mongo { + using boost::shared_ptr; using std::string; using std::stringstream; using std::vector; +namespace { + /** * Mongos-side command for merging chunks, passes command to appropriate shard. */ @@ -86,17 +91,6 @@ namespace mongo { static BSONField<string> shardNameField; static BSONField<string> configField; - // TODO: This refresh logic should be consolidated - ChunkManagerPtr refreshChunkCache(const NamespaceString& nss) { - - DBConfigPtr config = grid.getDBConfig(nss.ns()); - if (!config->isSharded(nss)) - return ChunkManagerPtr(); - - // Refreshes chunks as a side-effect - return config->getChunkManagerIfExists(nss, true); - } - bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, @@ -105,13 +99,6 @@ namespace mongo { BSONObjBuilder& result, bool ) { - string ns = parseNs(dbname, cmdObj); - - if ( ns.size() == 0 ) { - errmsg = "no namespace specified"; - return false; - } - vector<BSONObj> bounds; if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) { return false; @@ -140,16 +127,32 @@ namespace mongo { return false; } - // This refreshes the chunk metadata if stale. - ChunkManagerPtr manager = refreshChunkCache(NamespaceString(ns)); + const NamespaceString nss(parseNs(dbname, cmdObj)); + if (nss.size() == 0) { + return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace, + "no namespace specified")); + } + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); + } + + boost::shared_ptr<DBConfig> config = status.getValue(); + if (!config->isSharded(nss.ns())) { + return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded, + "ns [" + nss.ns() + " is not sharded.")); + } + + // This refreshes the chunk metadata if stale. + ChunkManagerPtr manager = config->getChunkManagerIfExists(nss, true); if (!manager) { - errmsg = (string) "collection " + ns + " is not sharded, cannot merge chunks"; - return false; + return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded, + "ns [" + nss.ns() + " is not sharded.")); } if (!manager->getShardKeyPattern().isShardKey(minKey) - || !manager->getShardKeyPattern().isShardKey(maxKey)) { + || !manager->getShardKeyPattern().isShardKey(maxKey)) { errmsg = stream() << "shard key bounds " << "[" << minKey << "," << maxKey << ")" << " are not valid for shard key pattern " << manager->getShardKeyPattern().toBSON(); @@ -181,7 +184,8 @@ namespace mongo { result.appendElements( remoteResult ); return ok; } - }; + + } clusterMergeChunksCommand; BSONField<string> ClusterMergeChunksCommand::nsField( "mergeChunks" ); BSONField<vector<BSONObj> > ClusterMergeChunksCommand::boundsField( "bounds" ); @@ -189,9 +193,5 @@ namespace mongo { BSONField<string> ClusterMergeChunksCommand::configField( "config" ); BSONField<string> ClusterMergeChunksCommand::shardNameField( "shardName" ); - MONGO_INITIALIZER(InitMergeChunksPassCommand)(InitializerContext* context) { - // Leaked intentionally: a Command registers itself when constructed. - new ClusterMergeChunksCommand(); - return Status::OK(); - } -} +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp index e2a160cee7d..d2f4a6291d4 100644 --- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp @@ -30,6 +30,7 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> #include <boost/scoped_ptr.hpp> #include "mongo/db/audit.h" @@ -40,6 +41,7 @@ #include "mongo/db/client_basic.h" #include "mongo/db/commands.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/grid.h" @@ -48,6 +50,7 @@ namespace mongo { + using boost::shared_ptr; using boost::scoped_ptr; using std::string; @@ -111,18 +114,32 @@ namespace { ShardConnection::sync(); Timer t; - string ns = parseNs(dbname, cmdObj); - if (ns.size() == 0) { - errmsg = "no ns"; - return false; + + const NamespaceString nss(parseNs(dbname, cmdObj)); + + boost::shared_ptr<DBConfig> config; + + { + if (nss.size() == 0) { + return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace, + "no namespace specified")); + } + + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); + } + + config = status.getValue(); } - DBConfigPtr config = grid.getDBConfig(ns); - if (!config->isSharded(ns)) { + if (!config->isSharded(nss.ns())) { config->reload(); - if (!config->isSharded(ns)) { - errmsg = "ns not sharded. have to shard before we can move a chunk"; - return false; + + if (!config->isSharded(nss.ns())) { + return appendCommandStatus(result, + Status(ErrorCodes::NamespaceNotSharded, + "ns [" + nss.ns() + " is not sharded.")); } } @@ -135,7 +152,7 @@ namespace { Shard to = Shard::findIfExists(toString); if (!to.ok()) { string msg(str::stream() << - "Could not move chunk in '" << ns << + "Could not move chunk in '" << nss.ns() << "' to shard '" << toString << "' because that shard does not exist"); log() << msg; @@ -159,7 +176,7 @@ namespace { } // This refreshes the chunk metadata if stale. - ChunkManagerPtr info = config->getChunkManager(ns, true); + ChunkManagerPtr info = config->getChunkManager(nss.ns(), true); ChunkPtr chunk; if (!find.isEmpty()) { diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp index db7ab43780a..581fde5fef8 100644 --- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp @@ -30,6 +30,7 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> #include <set> #include "mongo/db/auth/action_set.h" @@ -39,6 +40,7 @@ #include "mongo/db/client_basic.h" #include "mongo/db/commands.h" #include "mongo/db/operation_context.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/config.h" #include "mongo/s/distlock.h" @@ -47,6 +49,7 @@ namespace mongo { + using boost::shared_ptr; using std::set; using std::string; @@ -110,15 +113,16 @@ namespace { return false; } - // Flush the configuration. This can't be perfect, but it's better than nothing. - grid.flushConfig(); + // Flush all cached information. This can't be perfect, but it's better than nothing. + grid.catalogCache()->invalidate(dbname); - DBConfigPtr config = grid.getDBConfig(dbname, false); - if (!config) { - errmsg = "can't find db!"; - return false; + auto status = grid.catalogCache()->getDatabase(dbname); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } + shared_ptr<DBConfig> config = status.getValue(); + const string to = cmdObj["to"].valuestrsafe(); if (!to.size()) { errmsg = "you have to specify where you want to move it"; diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp index 169c1d67b1c..bf9ab27627c 100644 --- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp @@ -30,6 +30,7 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> #include <list> #include <set> #include <vector> @@ -44,6 +45,7 @@ #include "mongo/db/commands.h" #include "mongo/db/hasher.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/cluster_write.h" #include "mongo/s/config.h" @@ -52,6 +54,7 @@ namespace mongo { + using boost::shared_ptr; using std::list; using std::set; using std::string; @@ -115,20 +118,26 @@ namespace { } const NamespaceString nsStr(ns); - if (!nsStr.isValid()){ - errmsg = str::stream() << "bad ns[" << ns << "]"; - return false; + if (!nsStr.isValid()) { + return appendCommandStatus( + result, + Status(ErrorCodes::InvalidNamespace, + "invalid collection namespace [" + ns + "]")); } - DBConfigPtr config = grid.getDBConfig(ns); + auto config = uassertStatusOK(grid.catalogCache()->getDatabase(nsStr.db().toString())); if (!config->isShardingEnabled()) { - errmsg = "sharding not enabled for db"; - return false; + return appendCommandStatus( + result, + Status(ErrorCodes::IllegalOperation, + str::stream() << "sharding not enabled for db " << nsStr.db())); } if (config->isSharded(ns)) { - errmsg = "already sharded"; - return false; + return appendCommandStatus( + result, + Status(ErrorCodes::IllegalOperation, + str::stream() << "sharding already enabled for collection " << ns)); } // NOTE: We *must* take ownership of the key here - otherwise the shared BSONObj diff --git a/src/mongo/s/commands/cluster_split_collection_cmd.cpp b/src/mongo/s/commands/cluster_split_collection_cmd.cpp index ce8795a67ef..8673cb00ef0 100644 --- a/src/mongo/s/commands/cluster_split_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_split_collection_cmd.cpp @@ -30,6 +30,7 @@ #include "mongo/platform/basic.h" +#include <boost/shared_ptr.hpp> #include <string> #include <vector> @@ -40,6 +41,7 @@ #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/field_parser.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" @@ -47,6 +49,7 @@ namespace mongo { + using boost::shared_ptr; using std::string; using std::vector; @@ -108,19 +111,25 @@ namespace { ShardConnection::sync(); - const std::string ns = parseNs(dbname, cmdObj); - if (ns.size() == 0) { - errmsg = "no ns"; - return false; + const NamespaceString nss(parseNs(dbname, cmdObj)); + if (nss.size() == 0) { + return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace, + "no namespace specified")); + } + + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } - DBConfigPtr config = grid.getDBConfig(ns); - if (!config->isSharded(ns)) { + boost::shared_ptr<DBConfig> config = status.getValue(); + if (!config->isSharded(nss.ns())) { config->reload(); - if (!config->isSharded(ns)) { - errmsg = "ns not sharded. have to shard before can split"; - return false; + if (!config->isSharded(nss.ns())) { + return appendCommandStatus(result, + Status(ErrorCodes::NamespaceNotSharded, + "ns [" + nss.ns() + " is not sharded.")); } } @@ -179,7 +188,7 @@ namespace { } // This refreshes the chunk metadata if stale. - ChunkManagerPtr info = config->getChunkManager(ns, true); + ChunkManagerPtr info = config->getChunkManager(nss.ns(), true); ChunkPtr chunk; if (!find.isEmpty()) { @@ -255,7 +264,7 @@ namespace { invariant(chunk.get()); log() << "splitting chunk [" << chunk->getMin() << "," << chunk->getMax() << ")" - << " in collection " << ns + << " in collection " << nss.ns() << " on shard " << chunk->getShard().getName(); BSONObj res; diff --git a/src/mongo/s/commands_public.cpp b/src/mongo/s/commands_public.cpp index 6e4f89f4f7a..95fd9da14ec 100644 --- a/src/mongo/s/commands_public.cpp +++ b/src/mongo/s/commands_public.cpp @@ -52,7 +52,8 @@ #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/lite_parsed_query.h" -#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/catalog_cache.h" +#include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/client_info.h" #include "mongo/s/cluster_explain.h" @@ -86,6 +87,24 @@ namespace mongo { using std::stringstream; using std::vector; +namespace { + + bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::string& ns) { + invariant(!status.isOK()); + + if (status == ErrorCodes::DatabaseNotFound) { + result << "result" << BSONArray() + << "cursor" << BSON("id" << 0LL << + "ns" << ns << + "firstBatch" << BSONArray()); + return true; + } + + return Command::appendCommandStatus(result, status); + } + +} + namespace dbgrid_pub_cmds { namespace { @@ -181,12 +200,12 @@ namespace mongo { } virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards) { - string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe(); + const string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe(); - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - uassert(28588, - str::stream() << "Failed to load db sharding metadata for " << fullns, - conf); + auto status = grid.catalogCache()->getDatabase(dbName); + uassertStatusOK(status.getStatus()); + + shared_ptr<DBConfig> conf = status.getValue(); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { shards.insert(conf->getShard(fullns)); @@ -204,25 +223,27 @@ namespace mongo { public: NotAllowedOnShardedCollectionCmd( const char * n ) : PublicGridCommand( n ) {} - // TODO(spencer): remove this in favor of using parseNs - virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) = 0; - - virtual bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { - string fullns = getFullNS( dbName , cmdObj ); - - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + virtual bool run(OperationContext* txn, + const string& dbName, + BSONObj& cmdObj, + int options, + string& errmsg, + BSONObjBuilder& result, + bool fromRepl) { + const string fullns = parseNs(dbName, cmdObj); - if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); + if (!conf->isSharded(fullns)) { return passthrough( conf , cmdObj , options, result ); } - errmsg = "can't do command: " + name + " on sharded collection"; - return false; + + return appendCommandStatus(result, + Status(ErrorCodes::IllegalOperation, + str::stream() << "can't do command: " << name + << " on sharded collection")); } + }; // ---- @@ -524,20 +545,24 @@ namespace mongo { return Status(ErrorCodes::Unauthorized, "unauthorized"); } - bool run(OperationContext* txn, const string& dbName, + bool run(OperationContext* txn, + const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; + + auto status = grid.implicitCreateDb(dbName); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } - return passthrough( conf , cmdObj , result ); + shared_ptr<DBConfig> conf = status.getValue(); + + return passthrough(conf, cmdObj, result); } + } createCmd; class DropCmd : public PublicGridCommand { @@ -550,19 +575,29 @@ namespace mongo { actions.addAction(ActionType::dropCollection); out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } - bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; - DBConfigPtr conf = grid.getDBConfig( dbName , false ); + bool run(OperationContext* txn, + const string& dbName, + BSONObj& cmdObj, + int options, + string& errmsg, + BSONObjBuilder& result, + bool fromRepl) { - log() << "DROP: " << fullns << endl; + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + if (status == ErrorCodes::DatabaseNotFound) { + return true; + } - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; + return appendCommandStatus(result, status.getStatus()); } + shared_ptr<DBConfig> conf = status.getValue(); + + const string fullns = dbName + "." + cmdObj.firstElement().valuestrsafe(); + log() << "DROP: " << fullns; + if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { log() << "\tdrop going to do passthrough" << endl; return passthrough( conf , cmdObj , result ); @@ -607,15 +642,14 @@ namespace mongo { return true; } bool run(OperationContext* txn, const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string fullnsFrom = cmdObj.firstElement().valuestrsafe(); - string dbNameFrom = nsToDatabase( fullnsFrom ); - DBConfigPtr confFrom = grid.getDBConfig( dbNameFrom , false ); + const string fullnsFrom = cmdObj.firstElement().valuestrsafe(); + const string dbNameFrom = nsToDatabase(fullnsFrom); + auto confFrom = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameFrom)); - string fullnsTo = cmdObj["to"].valuestrsafe(); - string dbNameTo = nsToDatabase( fullnsTo ); - DBConfigPtr confTo = grid.getDBConfig( dbNameTo , false ); + const string fullnsTo = cmdObj["to"].valuestrsafe(); + const string dbNameTo = nsToDatabase(fullnsTo); + auto confTo = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameTo)); - uassert(13140, "Don't recognize source or target DB", confFrom && confTo); uassert(13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom)); uassert(13139, "You can't rename to a sharded collection", !confTo->isSharded(fullnsTo)); @@ -639,30 +673,45 @@ namespace mongo { const BSONObj& cmdObj) { return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj); } - bool run(OperationContext* txn, const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string todb = cmdObj.getStringField("todb"); - uassert(13402, "need a todb argument", !todb.empty()); - DBConfigPtr confTo = grid.getDBConfig( todb ); - uassert(13398, "cant copy to sharded DB", !confTo->isShardingEnabled()); + bool run(OperationContext* txn, + const string& dbName, + BSONObj& cmdObj, + int options, + string& errmsg, + BSONObjBuilder& result, + bool fromRepl) { - string fromhost = cmdObj.getStringField("fromhost"); + const string todb = cmdObj.getStringField("todb"); + uassert(ErrorCodes::EmptyFieldName, "missing todb argument", !todb.empty()); + uassert(ErrorCodes::InvalidNamespace, "invalid todb argument", nsIsDbOnly(todb)); + + auto confTo = uassertStatusOK(grid.implicitCreateDb(todb)); + uassert(ErrorCodes::IllegalOperation, + "cannot copy to a sharded database", + !confTo->isShardingEnabled()); + + const string fromhost = cmdObj.getStringField("fromhost"); if (!fromhost.empty()) { return adminPassthrough( confTo , cmdObj , result ); } else { - string fromdb = cmdObj.getStringField("fromdb"); + const string fromdb = cmdObj.getStringField("fromdb"); uassert(13399, "need a fromdb argument", !fromdb.empty()); - DBConfigPtr confFrom = grid.getDBConfig( fromdb , false ); + shared_ptr<DBConfig> confFrom = + uassertStatusOK(grid.catalogCache()->getDatabase(fromdb)); + uassert(13400, "don't know where source DB is", confFrom); uassert(13401, "cant copy from sharded DB", !confFrom->isShardingEnabled()); BSONObjBuilder b; BSONForEach(e, cmdObj) { - if (strcmp(e.fieldName(), "fromhost") != 0) + if (strcmp(e.fieldName(), "fromhost") != 0) { b.append(e); + } } + b.append("fromhost", confFrom->getPrimary().getConnString()); BSONObj fixed = b.obj(); @@ -684,20 +733,16 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; - - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + const string fullns = parseNs(dbName, cmdObj); + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { result.appendBool("sharded", false); result.append( "primary" , conf->getPrimary().getName() ); + return passthrough( conf , cmdObj , result); } + result.appendBool("sharded", true); ChunkManagerPtr cm = conf->getChunkManager( fullns ); @@ -846,16 +891,20 @@ namespace mongo { std::vector<Privilege>* out) { find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out); } - bool run(OperationContext* txn, const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + bool run(OperationContext* txn, + const string& dbName, + BSONObj& cmdObj, + int options, + string& errmsg, + BSONObjBuilder& result, + bool fromRepl) { + const string fullns = parseNs(dbName, cmdObj); + + // findAndModify should only be creating database if upsert is true, but this + // would require that the parsing be pulled into this function. + auto conf = uassertStatusOK(grid.implicitCreateDb(dbName)); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { return passthrough( conf , cmdObj , result); } @@ -869,8 +918,9 @@ namespace mongo { cm->getShardKeyPattern().extractShardKeyFromQuery(filter); // Bad query - if (!status.isOK()) + if (!status.isOK()) { return appendCommandStatus(result, status.getStatus()); + } BSONObj shardKey = status.getValue(); uassert(13343, "query for sharded findAndModify must have shardkey", @@ -912,14 +962,9 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } bool run(OperationContext* txn, const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - string fullns = cmdObj.firstElement().String(); - - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + const string fullns = parseNs(dbName, cmdObj); + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { return passthrough( conf , cmdObj , result); } @@ -984,10 +1029,6 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } - virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) { - return dbName + "." + cmdObj.firstElement().valuestrsafe(); - } - } convertToCappedCmd; @@ -1001,10 +1042,9 @@ namespace mongo { actions.addAction(ActionType::find); out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } + virtual bool passOptions() const { return true; } - virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) { - return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe(); - } + virtual std::string parseNs(const std::string& dbName, const BSONObj& cmdObj) const { return dbName + "." + cmdObj.firstElement() .embeddedObjectUserCheck()["ns"] @@ -1073,9 +1113,6 @@ namespace mongo { virtual std::string parseNs(const string& dbname, const BSONObj& cmdObj) const { return parseNsFullyQualified(dbname, cmdObj); } - virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) { - return parseNs(dbName, cmdObj); - } } splitVectorCmd; @@ -1095,17 +1132,16 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; + const string fullns = parseNs(dbName, cmdObj); - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + return appendEmptyResultSet(result, status.getStatus(), fullns); } + shared_ptr<DBConfig> conf = status.getValue(); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { - return passthrough( conf , cmdObj , options, result ); + return passthrough(conf, cmdObj, options, result); } ChunkManagerPtr cm = conf->getChunkManager( fullns ); @@ -1172,13 +1208,9 @@ namespace mongo { } bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { - const std::string fullns = parseNs(dbName, cmdObj); - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + const string fullns = parseNs(dbName, cmdObj); + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { return passthrough( conf , cmdObj , result ); } @@ -1289,15 +1321,9 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) { - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; - - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + const string fullns = parseNs(dbName, cmdObj); + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { return passthrough( conf , cmdObj , options, result ); } @@ -1498,16 +1524,17 @@ namespace mongo { bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, int retry ) { Timer t; - string collection = cmdObj.firstElement().valuestrsafe(); - string fullns = dbName + "." + collection; + const string collection = cmdObj.firstElement().valuestrsafe(); + const string fullns = dbName + "." + collection; // Abort after two retries, m/r is an expensive operation - if( retry > 2 ){ + if( retry > 2 ) { errmsg = "shard version errors preventing parallel mapreduce, check logs for further info"; return false; } + // Re-check shard version after 1st retry - if( retry > 0 ){ + if( retry > 0 ) { versionManager.forceRemoteCheckShardVersionCB( fullns ); } @@ -1532,24 +1559,35 @@ namespace mongo { finalColLong = outDB + "." + finalColShort; } - DBConfigPtr confIn = grid.getDBConfig( dbName , false ); - if (!confIn) { - errmsg = str::stream() << "Sharding metadata for input database: " << dbName - << " does not exist"; - return false; + // Ensure the input database exists + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } - DBConfigPtr confOut = confIn; + shared_ptr<DBConfig> confIn = status.getValue(); + shared_ptr<DBConfig> confOut; + if (customOutDB) { - confOut = grid.getDBConfig( outDB , true ); + // Create the output database implicitly + confOut = uassertStatusOK(grid.implicitCreateDb(outDB)); + } + else { + confOut = confIn; } - bool shardedInput = confIn && confIn->isShardingEnabled() && confIn->isSharded( fullns ); + bool shardedInput = confIn && confIn->isShardingEnabled() && confIn->isSharded(fullns); bool shardedOutput = customOut.getBoolField("sharded"); - if (!shardedOutput) - uassert( 15920 , "Cannot output to a non-sharded collection, a sharded collection exists" , !confOut->isSharded(finalColLong) ); - // should we also prevent going from non-sharded to sharded? during the transition client may see partial data + if (!shardedOutput) { + uassert(15920, + "Cannot output to a non-sharded collection because " + "sharded collection exists already", + !confOut->isSharded(finalColLong)); + + // TODO: Should we also prevent going from non-sharded to sharded? During the + // transition client may see partial data. + } long long maxChunkSizeBytes = 0; if (shardedOutput) { @@ -1568,7 +1606,10 @@ namespace mongo { // modify command to run on shards with output to tmp collection string badShardedField; verify( maxChunkSizeBytes < 0x7fffffff ); - BSONObj shardedCommand = fixForShards( cmdObj , shardResultCollection , badShardedField, static_cast<int>(maxChunkSizeBytes) ); + BSONObj shardedCommand = fixForShards(cmdObj, + shardResultCollection, + badShardedField, + static_cast<int>(maxChunkSizeBytes)); if ( ! shardedInput && ! shardedOutput && ! customOutDB ) { LOG(1) << "simple MR, just passthrough" << endl; @@ -1887,12 +1928,12 @@ namespace mongo { // $eval isn't allowed to access sharded collections, but we need to leave the // shard to detect that. - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + return appendCommandStatus(result, status.getStatus()); } + shared_ptr<DBConfig> conf = status.getValue(); return passthrough( conf , cmdObj , result ); } } evalCmd; @@ -1964,9 +2005,14 @@ namespace mongo { Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out); } - bool PipelineCommand::run(OperationContext* txn, const string &dbName , BSONObj &cmdObj, - int options, string &errmsg, - BSONObjBuilder &result, bool fromRepl) { + bool PipelineCommand::run(OperationContext* txn, + const string &dbName, + BSONObj &cmdObj, + int options, + string &errmsg, + BSONObjBuilder &result, + bool fromRepl) { + const string fullns = parseNs(dbName, cmdObj); intrusive_ptr<ExpressionContext> pExpCtx = @@ -1980,15 +2026,18 @@ namespace mongo { if (!pPipeline.get()) return false; // there was some parsing error - /* - If the system isn't running sharded, or the target collection - isn't sharded, pass this on to a mongod. - */ - DBConfigPtr conf = grid.getDBConfig(dbName , true); - massert(17015, "getDBConfig shouldn't return NULL", - conf); - if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) + // If the system isn't running sharded, or the target collection isn't sharded, pass + // this on to a mongod. + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + return appendEmptyResultSet(result, status.getStatus(), fullns); + } + + shared_ptr<DBConfig> conf = status.getValue(); + + if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) { return aggPassthrough(conf, cmdObj, result, options); + } /* split the pipeline into pieces for mongods and this mongos */ intrusive_ptr<Pipeline> pShardPipeline(pPipeline->splitForSharded()); @@ -2325,12 +2374,15 @@ namespace mongo { string& errmsg, BSONObjBuilder& result, bool) { - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; + + auto status = grid.catalogCache()->getDatabase(dbName); + if (!status.isOK()) { + return appendEmptyResultSet(result, + status.getStatus(), + dbName + ".$cmd.listCollections"); } + shared_ptr<DBConfig> conf = status.getValue(); bool retval = passthrough( conf, cmdObj, result ); Status storeCursorStatus = storePossibleCursor(conf->getPrimary().getConnString(), @@ -2355,18 +2407,15 @@ namespace mongo { out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions)); } - bool run(OperationContext* txn, const string& dbName, + bool run(OperationContext* txn, + const string& dbName, BSONObj& cmdObj, - int, + int options, string& errmsg, BSONObjBuilder& result, - bool) { - DBConfigPtr conf = grid.getDBConfig( dbName , false ); - if (!conf) { - errmsg = str::stream() << "Failed to load db sharding metadata for " << dbName; - return false; - } + bool fromRepl) { + auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName)); bool retval = passthrough( conf, cmdObj, result ); Status storeCursorStatus = storePossibleCursor(conf->getPrimary().getConnString(), diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp index 3da36a2e69a..1184e04625d 100644 --- a/src/mongo/s/config.cpp +++ b/src/mongo/s/config.cpp @@ -1,32 +1,30 @@ -// config.cpp - /** -* Copyright (C) 2008 10gen Inc. -* -* This program is free software: you can redistribute it and/or modify -* it under the terms of the GNU Affero General Public License, version 3, -* as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Affero General Public License for more details. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see <http://www.gnu.org/licenses/>. -* -* As a special exception, the copyright holders give permission to link the -* code of portions of this program with the OpenSSL library under certain -* conditions as described in each individual source file and distribute -* linked combinations including the program with the OpenSSL library. You -* must comply with the GNU Affero General Public License in all respects -* for all of the code used other than as permitted herein. If you modify -* file(s) with this exception, you may extend this exception to your -* version of the file(s), but you are not obligated to do so. If you do not -* wish to do so, delete this exception statement from your version. If you -* delete this exception statement from all source files in the program, -* then also delete it in the license file. -*/ + * Copyright (C) 2008-2015 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding @@ -43,6 +41,7 @@ #include "mongo/db/lasterror.h" #include "mongo/db/server_options.h" #include "mongo/db/write_concern.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" @@ -160,16 +159,12 @@ namespace mongo { _dirty = false; } + DBConfig::DBConfig(std::string name, const DatabaseType& dbt) + : _name(name) { - DBConfig::DBConfig(std::string name) - : _name(name), - _primary("config", "", 0 /* maxSize */, false /* draining */), - _shardingEnabled(false) { - invariant(!_name.empty()); - } - - DBConfig::~DBConfig() { - + invariant(_name == dbt.getName()); + _primary.reset(dbt.getPrimary()); + _shardingEnabled = dbt.getSharded(); } bool DBConfig::isSharded( const string& ns ) { @@ -401,7 +396,9 @@ namespace mongo { } } - ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload, bool forceReload ) { + boost::shared_ptr<ChunkManager> DBConfig::getChunkManager(const string& ns, + bool shouldReload, + bool forceReload) { BSONObj key; ChunkVersion oldVersion; ChunkManagerPtr oldManager; @@ -637,12 +634,11 @@ namespace mongo { successful = _reload(); } - // // If we aren't successful loading the database entry, we don't want to keep the stale - // object around which has invalid data. We should remove it instead. - // - - if( ! successful ) grid.removeDBIfExists( *this ); + // object around which has invalid data. + if (!successful) { + grid.catalogCache()->invalidate(_name); + } return successful; } @@ -671,7 +667,7 @@ namespace mongo { } // 2 - grid.removeDB(_name); + grid.catalogCache()->invalidate(_name); Status result = grid.catalogManager()->remove(DatabaseType::ConfigNS, BSON(DatabaseType::name(_name)), @@ -777,8 +773,8 @@ namespace mongo { return true; } - void DBConfig::getAllShards(set<Shard>& shards) const { - boost::lock_guard<boost::mutex> lk( _lock ); + void DBConfig::getAllShards(set<Shard>& shards) { + boost::lock_guard<boost::mutex> lk(_lock); shards.insert(getPrimary()); for (CollectionInfoMap::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) { if (it->second.isSharded()) { @@ -787,9 +783,8 @@ namespace mongo { } } - void DBConfig::getAllShardedCollections( set<string>& namespaces ) const { - - boost::lock_guard<boost::mutex> lk( _lock ); + void DBConfig::getAllShardedCollections( set<string>& namespaces ) { + boost::lock_guard<boost::mutex> lk(_lock); for( CollectionInfoMap::const_iterator i = _collections.begin(); i != _collections.end(); i++ ) { log() << "Coll : " << i->first << " sharded? " << i->second.isSharded() << endl; @@ -805,6 +800,10 @@ namespace mongo { return _primary.getConnString(); } + ConnectionString ConfigServer::getConnectionString() const { + return ConnectionString(_primary.getConnString(), ConnectionString::SYNC); + } + bool ConfigServer::init( const std::string& s ) { vector<string> configdbs; splitStringDelim( s, &configdbs, ',' ); @@ -1264,5 +1263,5 @@ namespace mongo { } - ConfigServer& configServer = dynamic_cast<ConfigServer&>(*(new ConfigServer())); + ConfigServer& configServer = *(new ConfigServer()); } diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h index 9a3128fc114..2b54ef74d1b 100644 --- a/src/mongo/s/config.h +++ b/src/mongo/s/config.h @@ -1,37 +1,30 @@ -// config.h - /** -* Copyright (C) 2008 10gen Inc. -* -* This program is free software: you can redistribute it and/or modify -* it under the terms of the GNU Affero General Public License, version 3, -* as published by the Free Software Foundation. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU Affero General Public License for more details. -* -* You should have received a copy of the GNU Affero General Public License -* along with this program. If not, see <http://www.gnu.org/licenses/>. -* -* As a special exception, the copyright holders give permission to link the -* code of portions of this program with the OpenSSL library under certain -* conditions as described in each individual source file and distribute -* linked combinations including the program with the OpenSSL library. You -* must comply with the GNU Affero General Public License in all respects -* for all of the code used other than as permitted herein. If you modify -* file(s) with this exception, you may extend this exception to your -* version of the file(s), but you are not obligated to do so. If you do not -* wish to do so, delete this exception statement from your version. If you -* delete this exception statement from all source files in the program, -* then also delete it in the license file. -*/ - -/* This file is things related to the "grid configuration": - - what machines make up the db component of our cloud - - where various ranges of things live -*/ + * Copyright (C) 2008-2015 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ #pragma once @@ -46,6 +39,7 @@ namespace mongo { class ChunkManager; class ConfigServer; + class DatabaseType; class DBConfig; typedef boost::shared_ptr<DBConfig> DBConfigPtr; @@ -57,17 +51,22 @@ namespace mongo { */ class DBConfig { public: - DBConfig(std::string name); - virtual ~DBConfig(); + DBConfig(std::string name, const DatabaseType& dbt); + + /** + * The name of the database which this entry caches. + */ + const std::string& name() const { return _name; } - const std::string& name() const { return _name; }; + /** + * Whether sharding is enabled for this database. + */ + bool isShardingEnabled() const { return _shardingEnabled; } /** - * @return if anything in this db is partitioned or not + * Reference to the primary shard for this database. */ - bool isShardingEnabled() { - return _shardingEnabled; - } + const Shard& getPrimary() const { return _primary; } void enableSharding( bool save = true ); @@ -109,11 +108,6 @@ namespace mongo { */ ShardPtr getShardIfExists( const std::string& ns ); - const Shard& getPrimary() const { - uassert( 8041 , (std::string)"no primary shard configured for db: " + _name , _primary.ok() ); - return _primary; - } - void setPrimary( const std::string& s ); bool load(); @@ -121,12 +115,8 @@ namespace mongo { bool dropDatabase( std::string& errmsg ); - // model stuff - - // lockless loading - void getAllShards(std::set<Shard>& shards) const; - - void getAllShardedCollections(std::set<std::string>& namespaces) const; + void getAllShards(std::set<Shard>& shards); + void getAllShardedCollections(std::set<std::string>& namespaces); protected: struct CollectionInfo { @@ -183,15 +173,22 @@ namespace mongo { void _save( bool db = true, bool coll = true ); - const std::string _name; // e.g. "alleyinsider" + // Name of the database which this entry caches + const std::string _name; + + // Primary shard name + Shard _primary; - Shard _primary; // e.g. localhost , mongo.foo.com:9999 + // Whether sharding has been enabled for this database bool _shardingEnabled; + // Set of collections and lock to protect access + mongo::mutex _lock; CollectionInfoMap _collections; - mutable mongo::mutex _lock; // TODO: change to r/w lock ?? - mutable mongo::mutex _hitConfigServerLock; + // Ensures that only one thread at a time loads collection configuration data from + // the config server + mongo::mutex _hitConfigServerLock; }; @@ -235,9 +232,7 @@ namespace mongo { void reloadSettings(); - ConnectionString getConnectionString() const { - return ConnectionString( _primary.getConnString() , ConnectionString::SYNC ); - } + ConnectionString getConnectionString() const; void replicaSetChange(const std::string& setName, const std::string& newConnectionString); diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp index ff7ca4f25ff..27d4b51b3d6 100644 --- a/src/mongo/s/grid.cpp +++ b/src/mongo/s/grid.cpp @@ -34,30 +34,24 @@ #include "mongo/s/grid.h" -#include <pcrecpp.h> - +#include "mongo/base/status_with.h" #include "mongo/client/connpool.h" -#include "mongo/db/client.h" -#include "mongo/db/json.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/write_concern.h" +#include "mongo/s/catalog/catalog_cache.h" +#include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/legacy/catalog_manager_legacy.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/cluster_write.h" -#include "mongo/s/mongos_options.h" -#include "mongo/s/shard.h" #include "mongo/s/type_collection.h" -#include "mongo/s/type_database.h" #include "mongo/s/type_settings.h" #include "mongo/util/fail_point_service.h" #include "mongo/util/log.h" -#include "mongo/util/stringutils.h" namespace mongo { + using boost::shared_ptr; using std::endl; using std::map; using std::set; + using std::string; using std::vector; MONGO_FP_DECLARE(neverBalance); @@ -75,139 +69,26 @@ namespace mongo { } _catalogManager.reset(cm.release()); + _catalogCache.reset(new CatalogCache(_catalogManager.get())); return true; } - DBConfigPtr Grid::getDBConfig( StringData ns , bool create , const string& shardNameHint ) { - string database = nsToDatabase( ns ); - uassert( 15918, - str::stream() << "invalid database name: " << database, - NamespaceString::validDBName( database ) ); - - boost::lock_guard<boost::mutex> l( _lock ); - - DBConfigPtr& dbConfig = _databases[database]; - if( ! dbConfig ){ - - dbConfig.reset(new DBConfig( database )); - - // Protect initial load from connectivity errors, otherwise we won't be able - // to perform any task. - bool loaded = false; - try { - try { - loaded = dbConfig->load(); - } - catch ( const DBException& ) { - // Retry again, if the config server are now up, the previous call should have - // cleared all the bad connections in the pool and this should succeed. - loaded = dbConfig->load(); - } - } - catch( DBException& e ){ - e.addContext( "error loading initial database config information" ); - warning() << e.what() << endl; - dbConfig.reset(); - throw; - } - - if( ! loaded ){ - - if( create ){ - - // Protect creation of initial db doc from connectivity errors - try{ - - // note here that cc->primary == 0. - log() << "couldn't find database [" << database << "] in config db" << endl; - - { - // lets check case - ScopedDbConnection conn(configServer.modelServer(), 30); - - BSONObjBuilder b; - b.appendRegex( "_id" , (string)"^" + - pcrecpp::RE::QuoteMeta( database ) + "$" , "i" ); - BSONObj dbObj = conn->findOne( DatabaseType::ConfigNS , b.obj() ); - conn.done(); - - // If our name is exactly the same as the name we want, try loading - // the database again. - if (!dbObj.isEmpty() && - dbObj[DatabaseType::name()].String() == database) - { - if (dbConfig->load()) return dbConfig; - } - - // TODO: This really shouldn't fall through, but without metadata - // management there's no good way to make sure this works all the time - // when the database is getting rapidly created and dropped. - // For now, just do exactly what we used to do. - - if ( ! dbObj.isEmpty() ) { - uasserted( DatabaseDifferCaseCode, str::stream() - << "can't have 2 databases that just differ on case " - << " have: " << dbObj[DatabaseType::name()].String() - << " want to add: " << database ); - } - } - - Shard primary; - if (database == "admin" || database == "config") { - primary = configServer.getPrimary(); - } - else if ( shardNameHint.empty() ) { - primary = Shard::pick(); - } - else { - // use the shard name if provided - Shard shard; - shard.reset( shardNameHint ); - primary = shard; - } - - if ( primary.ok() ) { - dbConfig->setPrimary( primary.getName() ); // saves 'cc' to configDB - log() << "\t put [" << database << "] on: " << primary << endl; - } - else { - uasserted( 10185 , "can't find a shard to put new db on" ); - } - } - catch( DBException& e ){ - e.addContext( "error creating initial database config information" ); - warning() << e.what() << endl; - dbConfig.reset(); - throw; - } - } - else { - dbConfig.reset(); - } - } + StatusWith<shared_ptr<DBConfig>> Grid::implicitCreateDb(const std::string& dbName) { + auto status = catalogCache()->getDatabase(dbName); + if (status.isOK()) { + return status; } - return dbConfig; - } - - void Grid::removeDB( const std::string& database ) { - uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos ); - boost::lock_guard<boost::mutex> l( _lock ); - _databases.erase( database ); - - } - - void Grid::removeDBIfExists(const DBConfig& database) { - boost::lock_guard<boost::mutex> l(_lock); + if (status == ErrorCodes::DatabaseNotFound) { + auto statusCreateDb = catalogManager()->createDatabase(dbName, NULL); + if (statusCreateDb.isOK() || statusCreateDb == ErrorCodes::NamespaceExists) { + return catalogCache()->getDatabase(dbName); + } - map<string, DBConfigPtr>::iterator it = _databases.find(database.name()); - if (it != _databases.end() && it->second.get() == &database) { - _databases.erase(it); - log() << "erased database " << database.name() << " from local registry"; - } - else { - log() << database.name() << "already erased from local registry"; + return statusCreateDb; } + + return status; } bool Grid::allowLocalHost() const { @@ -350,11 +231,6 @@ namespace mongo { return false; } - void Grid::flushConfig() { - boost::lock_guard<boost::mutex> lk( _lock ); - _databases.clear(); - } - BSONObj Grid::getConfigSetting( const std::string& name ) const { ScopedDbConnection conn(configServer.getPrimary().getConnString(), 30); BSONObj result = conn->findOne( SettingsType::ConfigNS, diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h index 36988209572..059925e2857 100644 --- a/src/mongo/s/grid.h +++ b/src/mongo/s/grid.h @@ -31,20 +31,25 @@ #pragma once #include <boost/date_time/posix_time/posix_time.hpp> - -#include "mongo/util/time_support.h" -#include "mongo/util/concurrency/mutex.h" +#include <boost/scoped_ptr.hpp> +#include <boost/shared_ptr.hpp> +#include <string> +#include <vector> #include "mongo/s/config.h" namespace mongo { + class CatalogCache; class CatalogManager; + class DBConfig; class SettingsType; + template<typename T> class StatusWith; + /** - * stores meta-information about the grid - * TODO: used shard_ptr for DBConfig pointers + * Holds the global sharding context. Single instance exists for a running server. Exists on + * both MongoD and MongoS. */ class Grid { public: @@ -59,33 +64,9 @@ namespace mongo { bool initCatalogManager(const std::vector<std::string>& configHosts); /** - * gets the config the db. - * will return an empty DBConfig if not in db already - */ - DBConfigPtr getDBConfig( StringData ns , bool create=true , const std::string& shardNameHint="" ); - - /** - * removes db entry. - * on next getDBConfig call will fetch from db - */ - void removeDB( const std::string& db ); - - /** - * removes db entry - only this DBConfig object will be removed, - * other DBConfigs which may have been created in the meantime will not be harmed - * on next getDBConfig call will fetch from db - * - * Using this method avoids race conditions where multiple threads detect a database - * reload has failed. - * - * Example : N threads receive version exceptions and dbConfig.reload(), while - * simultaneously a dropDatabase is occurring. In the meantime, the dropDatabase call - * attempts to create a DBConfig object if one does not exist, to load the db info, - * but the config is repeatedly deleted as soon as it is created. Using this method - * prevents the deletion of configs we don't know about. - * + * Implicitly creates the specified database as non-sharded. */ - void removeDBIfExists( const DBConfig& database ); + StatusWith<boost::shared_ptr<DBConfig>> implicitCreateDb(const std::string& dbName); /** * @return true if shards and config servers are allowed to use 'localhost' in address @@ -128,6 +109,7 @@ namespace mongo { bool getCollShouldBalance(const std::string& ns) const; CatalogManager* catalogManager() const { return _catalogManager.get(); } + CatalogCache* catalogCache() const { return _catalogCache.get(); } /** * @@ -138,10 +120,6 @@ namespace mongo { */ BSONObj getConfigSetting( const std::string& name ) const; - unsigned long long getNextOpTime() const; - - void flushConfig(); - // exposed methods below are for testing only /** @@ -152,24 +130,8 @@ namespace mongo { static bool _inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ); private: - /** - * @param name is the chose name for the shard. Parameter is mandatory. - * @return true if it managed to generate a shard name. May return false if (currently) - * 10000 shard - */ - bool _getNewShardName( std::string* name ) const; - - /** - * @return whether a give dbname is used for shard "local" databases (e.g., admin or local) - */ - static bool _isSpecialLocalDB( const std::string& dbName ); - - - // Databases catalog map and mutex to protect it - mongo::mutex _lock; - std::map<std::string, DBConfigPtr> _databases; - boost::scoped_ptr<CatalogManager> _catalogManager; + boost::scoped_ptr<CatalogCache> _catalogCache; // can 'localhost' be used in shard addresses? bool _allowLocalShard; diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp index c463e19a49f..c189055c747 100644 --- a/src/mongo/s/request.cpp +++ b/src/mongo/s/request.cpp @@ -82,9 +82,6 @@ namespace mongo { } _clientInfo->getAuthorizationSession()->startRequest(NULL); - - grid.getDBConfig(getns()); - _didInit = true; } diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp index a19bd252aab..12191034629 100644 --- a/src/mongo/s/strategy.cpp +++ b/src/mongo/s/strategy.cpp @@ -49,6 +49,7 @@ #include "mongo/db/query/lite_parsed_query.h" #include "mongo/db/stats/counters.h" #include "mongo/s/bson_serializable.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager_targeter.h" #include "mongo/s/client/dbclient_multi_command.h" #include "mongo/s/client_info.h" @@ -72,6 +73,7 @@ namespace mongo { using boost::scoped_ptr; + using boost::shared_ptr; using std::endl; using std::set; using std::string; @@ -85,14 +87,18 @@ namespace mongo { /** * Returns true if request is a query for sharded indexes. */ - static bool doShardedIndexQuery( Request& r, const QuerySpec& qSpec ) { - + static bool doShardedIndexQuery(Request& r, const QuerySpec& qSpec) { // Extract the ns field from the query, which may be embedded within the "query" or // "$query" field. - string indexNSQuery(qSpec.filter()["ns"].str()); - DBConfigPtr config = grid.getDBConfig( r.getns() ); + const NamespaceString indexNSSQuery(qSpec.filter()["ns"].str()); + + auto status = grid.catalogCache()->getDatabase(indexNSSQuery.db().toString()); + if (!status.isOK()) { + return false; + } - if ( !config->isSharded( indexNSQuery )) { + shared_ptr<DBConfig> config = status.getValue(); + if (!config->isSharded(indexNSSQuery.ns())) { return false; } @@ -102,7 +108,7 @@ namespace mongo { ShardPtr shard; ChunkManagerPtr cm; - config->getChunkManagerOrPrimary( indexNSQuery, cm, shard ); + config->getChunkManagerOrPrimary(indexNSSQuery.ns(), cm, shard); if ( cm ) { set<Shard> shards; cm->getAllShards( shards ); @@ -533,15 +539,15 @@ namespace mongo { // Note that this implementation will not handle targeting retries and fails when the // sharding metadata is too stale - - DBConfigPtr conf = grid.getDBConfig(db , false); - if (!conf) { + auto status = grid.catalogCache()->getDatabase(db); + if (!status.isOK()) { mongoutils::str::stream ss; ss << "Passthrough command failed: " << command.toString() - << " on ns " << versionedNS << ". Cannot find db config info."; + << " on ns " << versionedNS << ". Caused by " << causedBy(status.getStatus()); return Status(ErrorCodes::IllegalOperation, ss); } + shared_ptr<DBConfig> conf = status.getValue(); if (conf->isSharded(versionedNS)) { mongoutils::str::stream ss; ss << "Passthrough command failed: " << command.toString() @@ -577,14 +583,26 @@ namespace mongo { } void Strategy::getMore( Request& r ) { - Timer getMoreTimer; - const char *ns = r.getns(); + const char* ns = r.getns(); + const int ntoreturn = r.d().pullInt(); + const long long id = r.d().pullInt64(); // TODO: Handle stale config exceptions here from coll being dropped or sharded during op // for now has same semantics as legacy request - DBConfigPtr config = grid.getDBConfig( ns ); + const NamespaceString nss(ns); + auto statusGetDb = grid.catalogCache()->getDatabase(nss.db().toString()); + if (statusGetDb == ErrorCodes::DatabaseNotFound) { + cursorCache.remove(id); + replyToQuery(ResultFlag_CursorNotFound, r.p(), r.m(), 0, 0, 0); + return; + } + + uassertStatusOK(statusGetDb); + + shared_ptr<DBConfig> config = statusGetDb.getValue(); + ShardPtr primary; ChunkManagerPtr info; config->getChunkManagerOrPrimary( ns, info, primary ); @@ -592,10 +610,7 @@ namespace mongo { // // TODO: Cleanup cursor cache, consolidate into single codepath // - - int ntoreturn = r.d().pullInt(); - long long id = r.d().pullInt64(); - string host = cursorCache.getRef( id ); + const string host = cursorCache.getRef(id); ShardedClientCursorPtr cursor = cursorCache.get( id ); int cursorMaxTimeMS = cursorCache.getMaxTimeMS( id ); diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp index 959efa92aeb..96f56dcad93 100644 --- a/src/mongo/s/version_manager.cpp +++ b/src/mongo/s/version_manager.cpp @@ -34,16 +34,21 @@ #include "mongo/s/version_manager.h" +#include <boost/shared_ptr.hpp> + +#include "mongo/db/namespace_string.h" +#include "mongo/s/catalog/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_connection.h" #include "mongo/s/config.h" #include "mongo/s/grid.h" -#include "mongo/s/stale_exception.h" // for SendStaleConfigException +#include "mongo/s/stale_exception.h" #include "mongo/util/log.h" namespace mongo { + using boost::shared_ptr; using std::endl; using std::map; using std::string; @@ -137,17 +142,28 @@ namespace mongo { return NULL; } - bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){ + bool VersionManager::forceRemoteCheckShardVersionCB(const string& ns) { + const NamespaceString nss(ns); + + // This will force the database catalog entry to be reloaded + grid.catalogCache()->invalidate(nss.db().toString()); + + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { + return false; + } - DBConfigPtr conf = grid.getDBConfig( ns ); - if ( ! conf ) return false; - conf->reload(); + shared_ptr<DBConfig> conf = status.getValue(); // If we don't have a collection, don't refresh the chunk manager - if( nsGetCollection( ns ).size() == 0 ) return false; + if (nsGetCollection(ns).size() == 0) { + return false; + } - ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true ); - if( ! manager ) return false; + ChunkManagerPtr manager = conf->getChunkManagerIfExists(ns, true, true); + if (!manager) { + return false; + } return true; @@ -247,7 +263,12 @@ namespace mongo { * * @return true if we contacted the remote host */ - bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) { + bool checkShardVersion(DBClientBase* conn_in, + const string& ns, + ChunkManagerPtr refManager, + bool authoritative, + int tryNumber) { + // TODO: cache, optimize, etc... // Empty namespaces are special - we require initialization but not versioning @@ -255,9 +276,13 @@ namespace mongo { return initShardVersionEmptyNS(conn_in); } - DBConfigPtr conf = grid.getDBConfig( ns ); - if ( ! conf ) + const NamespaceString nss(ns); + auto status = grid.catalogCache()->getDatabase(nss.db().toString()); + if (!status.isOK()) { return false; + } + + shared_ptr<DBConfig> conf = status.getValue(); DBClientBase* conn = getVersionable( conn_in ); verify(conn); // errors thrown above |