summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2013-06-05 15:04:12 -0400
committerGreg Studer <greg@10gen.com>2013-06-17 12:23:32 -0400
commit1459353f995228932bf5077aa4da52d8d68a6871 (patch)
tree9bac07141d42608bc4adffa04714993a2a0a4ea4 /src/mongo/s
parenta2056afecf016c4798cd16f594c719ce6e893196 (diff)
downloadmongo-1459353f995228932bf5077aa4da52d8d68a6871.tar.gz
SERVER-8598 hook up CollectionManager in place of ShardChunkManager
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/SConscript8
-rw-r--r--src/mongo/s/collection_manager.h4
-rw-r--r--src/mongo/s/d_chunk_manager.cpp442
-rw-r--r--src/mongo/s/d_chunk_manager.h160
-rw-r--r--src/mongo/s/d_logic.h6
-rw-r--r--src/mongo/s/d_migrate.cpp12
-rw-r--r--src/mongo/s/d_state.cpp94
7 files changed, 89 insertions, 637 deletions
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 91c813659f6..5616cc6dfd4 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -103,13 +103,15 @@ env.Library('upgrade', ['cluster_client_internal.cpp',
env.StaticLibrary('metadata', ['collection_manager.cpp',
'metadata_loader.cpp'],
LIBDEPS=['base',
- '$BUILD_DIR/mongo/clientandshell',
+ '$BUILD_DIR/mongo/bson',
'$BUILD_DIR/mongo/base/base',
- '$BUILD_DIR/mongo/bson'])
+ '$BUILD_DIR/mongo/mongocommon', # DBClient library
+ ])
env.CppUnitTest('chunk_diff_test',
'chunk_diff_test.cpp',
LIBDEPS=['metadata',
+ '$BUILD_DIR/mongo/clientandshell', # CmdLine crutch
'$BUILD_DIR/mongo/db/common'],
NO_CRUTCH=True)
@@ -117,6 +119,7 @@ env.CppUnitTest('collection_manager_test',
'collection_manager_test.cpp',
LIBDEPS=['metadata',
'$BUILD_DIR/mongo/mocklib',
+ '$BUILD_DIR/mongo/clientandshell', # CmdLine crutch
'$BUILD_DIR/mongo/db/common'],
NO_CRUTCH=True)
@@ -124,6 +127,7 @@ env.CppUnitTest('metadata_loader_test',
'metadata_loader_test.cpp',
LIBDEPS=['metadata',
'$BUILD_DIR/mongo/mocklib',
+ '$BUILD_DIR/mongo/clientandshell', # CmdLine crutch
'$BUILD_DIR/mongo/db/common'],
NO_CRUTCH=True);
diff --git a/src/mongo/s/collection_manager.h b/src/mongo/s/collection_manager.h
index c421c6e77c6..5a07ba457d6 100644
--- a/src/mongo/s/collection_manager.h
+++ b/src/mongo/s/collection_manager.h
@@ -25,6 +25,10 @@ namespace mongo {
class MetadataLoader;
+ // For now, we handle lifecycle of CollectionManager via shared_ptrs
+ class CollectionManager;
+ typedef shared_ptr<const CollectionManager> CollectionManagerPtr;
+
/**
* The collection manager has metadata information about a collection, in particular the
* sharding information. It's main goal in life is to be capable of answering if a certain
diff --git a/src/mongo/s/d_chunk_manager.cpp b/src/mongo/s/d_chunk_manager.cpp
deleted file mode 100644
index dde954c6ded..00000000000
--- a/src/mongo/s/d_chunk_manager.cpp
+++ /dev/null
@@ -1,442 +0,0 @@
-// @file d_chunk_manager.cpp
-
-/**
-* Copyright (C) 2010 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#include "pch.h"
-
-#include "mongo/s/d_chunk_manager.h"
-
-#include "mongo/client/connpool.h"
-#include "mongo/client/dbclientmockcursor.h"
-#include "mongo/db/clientcursor.h"
-#include "mongo/db/instance.h"
-#include "mongo/s/chunk_diff.h"
-#include "mongo/s/type_chunk.h"
-#include "mongo/s/type_collection.h"
-
-namespace mongo {
-
- /**
- * This is an adapter so we can use config diffs - mongos and mongod do them slightly
- * differently
- *
- * The mongod adapter here tracks only a single shard, and stores ranges by (min, max)
- */
- class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj,string> {
- public:
- SCMConfigDiffTracker( const string& currShard ) : _currShard( currShard ) {}
-
- virtual bool isTracked( const BSONObj& chunkDoc ) const {
- return chunkDoc[ChunkType::shard()].type() == String &&
- chunkDoc[ChunkType::shard()].String() == _currShard;
- }
-
- virtual BSONObj maxFrom( const BSONObj& val ) const {
- return val;
- }
-
- virtual pair<BSONObj,BSONObj> rangeFor( const BSONObj& chunkDoc, const BSONObj& min, const BSONObj& max ) const {
- return make_pair( min, max );
- }
-
- virtual string shardFor( const string& name ) const {
- return name;
- }
-
- virtual string nameFrom( const string& shard ) const {
- return shard;
- }
-
- string _currShard;
-
- };
-
- ShardChunkManager* ShardChunkManager::make( const string& configServer , const string& ns , const string& shardName, ShardChunkManagerPtr oldManager ) {
- auto_ptr<ShardChunkManager> m( new ShardChunkManager() );
- m->_init( configServer , ns , shardName , oldManager );
- return m.release();
- }
-
- void ShardChunkManager::_init( const string& configServer , const string& ns , const string& shardName, ShardChunkManagerPtr oldManager ) {
-
- // have to get a connection to the config db
- // special case if I'm the configdb since I'm locked and if I connect to myself
- // its a deadlock
- scoped_ptr<ScopedDbConnection> scoped;
- scoped_ptr<DBDirectClient> direct;
- DBClientBase * conn;
- if ( configServer.empty() ) {
- direct.reset( new DBDirectClient() );
- conn = direct.get();
- }
- else {
- scoped.reset(new ScopedDbConnection(configServer, 30.0));
- conn = scoped->get();
- }
-
- // get this collection's sharding key
- BSONObj collectionDoc = conn->findOne(CollectionType::ConfigNS, BSON(CollectionType::ns(ns)));
-
- if( collectionDoc.isEmpty() ){
- warning() << ns << " does not exist as a sharded collection" << endl;
- return;
- }
-
- if( collectionDoc[CollectionType::dropped()].Bool() ){
- warning() << ns << " was dropped. Re-shard collection first." << endl;
- return;
- }
-
- _fillCollectionKey( collectionDoc );
-
- map<string,ChunkVersion> versionMap;
- versionMap[ shardName ] = _version;
- _collVersion = ChunkVersion( 0, OID() );
-
- // Check to see if we have an old ShardChunkManager to use
- if( oldManager && oldManager->_collVersion.isSet() ){
-
- versionMap[ shardName ] = oldManager->_version;
- _collVersion = oldManager->_collVersion;
- // TODO: This could be made more efficient if copying not required, but not as
- // frequently reloaded as in mongos.
- _chunksMap = oldManager->_chunksMap;
-
- LOG(2) << "loading new chunks for collection " << ns << " using old chunk manager w/ version " << _collVersion
- << " and " << _chunksMap.size() << " chunks" << endl;
- }
-
- // Attach our config diff tracker to our range map and versions
- SCMConfigDiffTracker differ( shardName );
- differ.attach( ns, _chunksMap, _collVersion, versionMap );
-
- // Need to do the query ourselves, since we may use direct conns to the db
- Query query = differ.configDiffQuery();
- auto_ptr<DBClientCursor> cursor = conn->query(ChunkType::ConfigNS, query);
-
- uassert( 16181, str::stream() << "could not initialize cursor to config server chunks collection for ns " << ns, cursor.get() );
-
- // Diff tracker should *always* find at least one chunk if collection exists
- int diffsApplied = differ.calculateConfigDiff( *cursor );
- if( diffsApplied > 0 ){
-
- LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << ns
- << " with version " << _collVersion << endl;
-
- // Save the new version of this shard
- _version = versionMap[ shardName ];
- _fillRanges();
-
- }
- else if( diffsApplied == 0 ){
-
- // No chunks were found for the ns
- warning() << "no chunks found when reloading " << ns << ", previous version was " << _collVersion << endl;
-
- _version = ChunkVersion( 0, OID() );
- _collVersion = ChunkVersion( 0, OID() );
- _chunksMap.clear();
- }
- else{
-
- // TODO: make this impossible by making sure we don't migrate / split on this shard during the
- // reload
- // No chunks were found for the ns
- warning() << "invalid chunks found when reloading " << ns << ", previous version was " << _collVersion
- << ", this should be rare" << endl;
-
- // Handle the same way as a connectivity error, for now
- // TODO: handle inline
- uassert( 16229,
- str::stream() << "could not initialize cursor to config server chunks collection for ns "
- << ns, cursor.get() );
- }
-
- if ( scoped.get() )
- scoped->done();
-
- if ( _chunksMap.empty() )
- log() << "no chunk for collection " << ns << " on shard " << shardName << endl;
- }
-
- ShardChunkManager::ShardChunkManager( const BSONObj& collectionDoc , const BSONArray& chunksArr ) {
- _fillCollectionKey( collectionDoc );
-
- scoped_ptr<DBClientMockCursor> c ( new DBClientMockCursor( chunksArr ) );
- _fillChunks( c.get() );
- _fillRanges();
- }
-
- void ShardChunkManager::_fillCollectionKey( const BSONObj& collectionDoc ) {
- BSONElement e = collectionDoc["key"];
- uassert( 13542 , str::stream() << "collection doesn't have a key: " << collectionDoc , ! e.eoo() && e.isABSONObj() );
-
- _keyPattern = e.Obj().getOwned();
- }
-
- void ShardChunkManager::_fillChunks( DBClientCursorInterface* cursor ) {
- verify( cursor );
-
- ChunkVersion version;
- while ( cursor->more() ) {
- BSONObj d = cursor->next();
- _chunksMap.insert(make_pair(d[ChunkType::min()].Obj().getOwned(),
- d[ChunkType::max()].Obj().getOwned()));
-
- ChunkVersion currVersion = ChunkVersion::fromBSON(d[ChunkType::DEPRECATED_lastmod()]);
- if ( currVersion > version ) {
- version = currVersion;
- }
- }
- _version = version;
- }
-
- void ShardChunkManager::_fillRanges() {
- if ( _chunksMap.empty() )
- return;
-
- // load the chunk information, coallesceing their ranges
- // the version for this shard would be the highest version for any of the chunks
- RangeMap::const_iterator it = _chunksMap.begin();
- BSONObj min,max;
- while ( it != _chunksMap.end() ) {
- BSONObj currMin = it->first;
- BSONObj currMax = it->second;
- ++it;
-
- // coalesce the chunk's bounds in ranges if they are adjacent chunks
- if ( min.isEmpty() ) {
- min = currMin;
- max = currMax;
- continue;
- }
- if ( max == currMin ) {
- max = currMax;
- continue;
- }
-
- _rangesMap.insert( make_pair( min , max ) );
-
- min = currMin;
- max = currMax;
- }
- verify( ! min.isEmpty() );
-
- _rangesMap.insert( make_pair( min , max ) );
- }
-
- static bool contains( const BSONObj& min , const BSONObj& max , const BSONObj& key ) {
- return key.woCompare( min ) >= 0 && key.woCompare( max ) < 0;
- }
-
- bool ShardChunkManager::keyBelongsToMe( const BSONObj& key ) const {
-
- if ( _rangesMap.size() == 0 ) return false;
-
- RangeMap::const_iterator it = _rangesMap.upper_bound( key );
- if ( it != _rangesMap.begin() )
- it--;
-
- bool good = contains( it->first , it->second , key );
-
-#if 0
- if ( ! good ) {
- log() << "bad: " << x << " " << it->first << " " << x.woCompare( it->first ) << " " << x.woCompare( it->second ) << endl;
- for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ) {
- log() << "\t" << i->first << "\t" << i->second << "\t" << endl;
- }
- }
-#endif
-
- return good;
- }
-
- bool ShardChunkManager::getNextChunk( const BSONObj& lookupKey, BSONObj* foundMin , BSONObj* foundMax ) const {
- verify( foundMin );
- verify( foundMax );
- *foundMin = BSONObj();
- *foundMax = BSONObj();
-
- if ( _chunksMap.empty() ) {
- return true;
- }
-
- RangeMap::const_iterator it;
- if ( lookupKey.isEmpty() ) {
- it = _chunksMap.begin();
- *foundMin = it->first;
- *foundMax = it->second;
- return _chunksMap.size() == 1;
- }
-
- it = _chunksMap.upper_bound( lookupKey );
- if ( it != _chunksMap.end() ) {
- *foundMin = it->first;
- *foundMax = it->second;
- return false;
- }
-
- return true;
- }
-
- void ShardChunkManager::_assertChunkExists( const BSONObj& min , const BSONObj& max ) const {
- RangeMap::const_iterator it = _chunksMap.find( min );
- if ( it == _chunksMap.end() ) {
- uasserted( 13586 , str::stream() << "couldn't find chunk " << min << "->" << max );
- }
-
- if ( it->second.woCompare( max ) != 0 ) {
- ostringstream os;
- os << "ranges differ, "
- << "requested: " << min << " -> " << max << " "
- << "existing: " << ((it == _chunksMap.end()) ? "<empty>" : it->first.toString() + " -> " + it->second.toString());
- uasserted( 13587 , os.str() );
- }
- }
-
- ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ChunkVersion& version ) {
-
- // check that we have the exact chunk that will be subtracted
- _assertChunkExists( min , max );
-
- auto_ptr<ShardChunkManager> p( new ShardChunkManager );
- p->_keyPattern = this->_keyPattern;
-
- if ( _chunksMap.size() == 1 ) {
- // if left with no chunks, just reset version
- uassert( 13590 , str::stream() << "setting version to " << version.toString() << " on removing last chunk", ! version.isSet() );
-
- p->_version = ChunkVersion( 0, OID() );
- p->_collVersion = _collVersion;
-
- }
- else {
- // can't move version backwards when subtracting chunks
- // this is what guarantees that no read or write would be taken once we subtract data from the current shard
- if ( version <= _version ) {
- uasserted( 13585 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
- }
-
- p->_chunksMap = this->_chunksMap;
- p->_chunksMap.erase( min );
- p->_version = version;
- if( version > _collVersion ) p->_collVersion = version;
- else p->_collVersion = this->_collVersion;
- p->_fillRanges();
- }
-
- return p.release();
- }
-
- static bool overlap( const BSONObj& l1 , const BSONObj& h1 , const BSONObj& l2 , const BSONObj& h2 ) {
- return ! ( ( h1.woCompare( l2 ) <= 0 ) || ( h2.woCompare( l1 ) <= 0 ) );
- }
-
- ShardChunkManager* ShardChunkManager::clonePlus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version ) {
-
- // it is acceptable to move version backwards (e.g., undoing a migration that went bad during commit)
- // but only cloning away the last chunk may reset the version to 0
- uassert( 13591 , "version can't be set to zero" , version.isSet() );
-
- if ( ! _chunksMap.empty() ) {
-
- // check that there isn't any chunk on the interval to be added
- RangeMap::const_iterator it = _chunksMap.lower_bound( max );
- if ( it != _chunksMap.begin() ) {
- --it;
- }
- if ( overlap( min , max , it->first , it->second ) ) {
- ostringstream os;
- os << "ranges overlap, "
- << "requested: " << min << " -> " << max << " "
- << "existing: " << it->first.toString() + " -> " + it->second.toString();
- uasserted( 13588 , os.str() );
- }
- }
-
- auto_ptr<ShardChunkManager> p( new ShardChunkManager );
-
- p->_keyPattern = this->_keyPattern;
- p->_chunksMap = this->_chunksMap;
- p->_chunksMap.insert( make_pair( min.getOwned() , max.getOwned() ) );
- p->_version = version;
- if( version > _collVersion ) p->_collVersion = version;
- else p->_collVersion = this->_collVersion;
- p->_fillRanges();
-
- return p.release();
- }
-
- ShardChunkManager* ShardChunkManager::cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- const ChunkVersion& version ) {
-
- // the version required in both resulting chunks could be simply an increment in the minor portion of the current version
- // however, we are enforcing uniqueness over the attributes <ns, lastmod> of the configdb collection 'chunks'
- // so in practice, a migrate somewhere may force this split to pick up a version that has the major portion higher
- // than the one that this shard has been using
- //
- // TODO drop the uniqueness constraint and tighten the check below so that only the minor portion of version changes
- if ( version <= _version ) {
- uasserted( 14039 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
- }
-
- // check that we have the exact chunk that will be split and that the split point is valid
- _assertChunkExists( min , max );
- for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
- if ( ! contains( min , max , *it ) ) {
- uasserted(14040, str::stream() << "cannot split " << min << " -> " << max
- << " on " << *it);
- }
- }
-
- auto_ptr<ShardChunkManager> p( new ShardChunkManager );
-
- p->_keyPattern = this->_keyPattern;
- p->_chunksMap = this->_chunksMap;
- p->_version = version; // will increment second, third, ... chunks below
-
- BSONObj startKey = min;
- for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
- BSONObj split = *it;
- p->_chunksMap[min] = split.getOwned();
- p->_chunksMap.insert( make_pair( split.getOwned() , max.getOwned() ) );
- p->_version.incMinor();
- startKey = split;
- }
-
- if( version > _collVersion ) p->_collVersion = version;
- else p->_collVersion = this->_collVersion;
-
- p->_fillRanges();
-
- return p.release();
- }
-
- string ShardChunkManager::toString() const {
- StringBuilder ss;
- ss << " ShardChunkManager version: " << _version.toString() << " keyPattern: " << _keyPattern;
- bool first = true;
- for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ) {
- if ( first ) first = false;
- else ss << " , ";
-
- ss << i->first << " -> " << i->second;
- }
- return ss.str();
- }
-
-} // namespace mongo
diff --git a/src/mongo/s/d_chunk_manager.h b/src/mongo/s/d_chunk_manager.h
deleted file mode 100644
index ff3c174e93a..00000000000
--- a/src/mongo/s/d_chunk_manager.h
+++ /dev/null
@@ -1,160 +0,0 @@
-// @file d_chunk_manager.h
-
-/**
-* Copyright (C) 2008 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#pragma once
-
-#include "mongo/pch.h"
-
-#include "mongo/db/jsobj.h"
-#include "mongo/s/chunk_version.h"
-
-namespace mongo {
-
- class ClientCursor;
- class DBClientCursorInterface;
-
- class ShardChunkManager;
- typedef shared_ptr<ShardChunkManager> ShardChunkManagerPtr;
-
- /**
- * Controls the boundaries of all the chunks for a given collection that live in this shard.
- *
- * ShardChunkManager instances never change after construction. There are methods provided that would generate a
- * new manager if new chunks are added, subtracted, or split.
- *
- * TODO
- * The responsibility of maintaining the version for a shard is still shared between this class and its caller. The
- * manager does check corner cases (e.g. cloning out the last chunk generates a manager with version 0) but ultimately
- * still cannot be responsible to set all versions. Currently, they are a function of the global state as opposed to
- * the per-shard one.
- */
- class ShardChunkManager : public boost::noncopyable {
- public:
-
- /**
- * Loads the ShardChunkManager with all boundaries for chunks of a given collection that live in an given
- * shard.
- *
- * @param configServer name of the server where the configDB currently is. Can be empty to indicate
- * that the configDB is running locally
- * @param ns namespace for the collections whose chunks we're interested
- * @param shardName name of the shard that this chunk matcher should track
- *
- * This constructor throws if collection is dropped/malformed and on connectivity errors
- */
- static ShardChunkManager* make( const string& configServer , const string& ns , const string& shardName, ShardChunkManagerPtr oldManager = ShardChunkManagerPtr() );
-
- /**
- * Same as the regular constructor but used in unittest (no access to configDB required).
- *
- * @param collectionDoc simulates config.collection's entry for one collection
- * @param chunksDocs simulates config.chunks' entries for one collection's shard
- */
- ShardChunkManager( const BSONObj& collectionDoc , const BSONArray& chunksDoc );
-
- ~ShardChunkManager() {}
-
- /**
- * Generates a new manager based on 'this's state minus a given chunk.
- *
- * @param min max chunk boundaries for the chunk to subtract
- * @param version that the resulting manager should be at. The version has to be higher than the current one.
- * When cloning away the last chunk, verstion must be 0.
- * @return a new ShardChunkManager, to be owned by the caller
- */
- ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version );
-
- /**
- * Generates a new manager based on 'this's state plus a given chunk.
- *
- * @param min max chunk boundaries for the chunk to add
- * @param version that the resulting manager should be at. It can never be 0, though (see CloneMinus).
- * @return a new ShardChunkManager, to be owned by the caller
- */
- ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ChunkVersion& version );
-
- /**
- * Generates a new manager by splitting an existing chunk at one or more points.
- *
- * @param min max boundaries of chunk to be split
- * @param splitKeys points to split original chunk at
- * @param version to be used in first chunk. The subsequent chunks would increment the minor version.
- * @return a new ShardChunkManager with the chunk split, to be owned by the caller
- */
- ShardChunkManager* cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- const ChunkVersion& version );
-
- /**
- * Checks whether a document key belongs to the collection on this shard.
- *
- * Note that !keyBelongsToMe() does not necessarily imply the document is orphaned - it
- * might be part of a migration.
- *
- * @param key the full shard key
- */
- bool keyBelongsToMe( const BSONObj& key ) const;
-
- /**
- * Given a chunk's min key (or empty doc), gets the boundary of the chunk following that one (the first).
- *
- * @param lookupKey is the min key for a previously obtained chunk or the empty document
- * @param foundMin IN/OUT min for chunk following the one starting at lookupKey
- * @param foundMax IN/OUT max for the above chunk
- * @return true if the chunk returned is the last one
- */
- bool getNextChunk( const BSONObj& lookupKey, BSONObj* foundMin , BSONObj* foundMax ) const;
-
- // accessors
-
- ChunkVersion getVersion() const { return _version; }
- ChunkVersion getCollVersion() const { return _collVersion; }
- BSONObj getKeyPattern() const { return _keyPattern.getOwned(); }
- unsigned getNumChunks() const { return _chunksMap.size(); }
-
- string toString() const;
- private:
- void _init( const string& configServer , const string& ns , const string& shardName, ShardChunkManagerPtr oldManager = ShardChunkManagerPtr() );
-
- ChunkVersion _collVersion;
- // highest ChunkVersion for which this ShardChunkManager's information is accurate
- ChunkVersion _version;
-
- // key pattern for chunks under this range
- BSONObj _keyPattern;
-
- // a map from a min key into the chunk's (or range's) max boundary
- typedef map< BSONObj, BSONObj , BSONObjCmp > RangeMap;
- RangeMap _chunksMap;
-
- // a map from a min key into a range or continguous chunks
- // redundant but we expect high chunk continguity, expecially in small installations
- RangeMap _rangesMap;
-
- /** constructors helpers */
- void _fillCollectionKey( const BSONObj& collectionDoc );
- void _fillChunks( DBClientCursorInterface* cursor );
- void _fillRanges();
-
- /** throws if the exact chunk is not in the chunks' map */
- void _assertChunkExists( const BSONObj& min , const BSONObj& max ) const;
-
- /** can only be used in the cloning calls */
- ShardChunkManager() {}
- };
-
-} // namespace mongo
diff --git a/src/mongo/s/d_logic.h b/src/mongo/s/d_logic.h
index 0c437c423f5..9b51314c50f 100644
--- a/src/mongo/s/d_logic.h
+++ b/src/mongo/s/d_logic.h
@@ -21,7 +21,7 @@
#include "mongo/pch.h"
#include "mongo/db/jsobj.h"
-#include "mongo/s/d_chunk_manager.h"
+#include "mongo/s/collection_manager.h"
#include "mongo/s/chunk_version.h"
#include "mongo/util/concurrency/ticketholder.h"
@@ -94,7 +94,7 @@ namespace mongo {
// querying support
bool needShardChunkManager( const string& ns ) const;
- ShardChunkManagerPtr getShardChunkManager( const string& ns );
+ CollectionManagerPtr getShardChunkManager( const string& ns );
// chunk migrate and split support
@@ -163,7 +163,7 @@ namespace mongo {
// map from a namespace into the ensemble of chunk ranges that are stored in this mongod
// a ShardChunkManager carries all state we need for a collection at this shard, including its version information
- typedef map<string,ShardChunkManagerPtr> ChunkManagersMap;
+ typedef map<string,CollectionManagerPtr> ChunkManagersMap;
ChunkManagersMap _chunks;
};
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 47f02bde586..9f4f9fd3d08 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -947,7 +947,7 @@ namespace mongo {
// 3.
- ShardChunkManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
+ CollectionManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
verify( chunkManager != NULL );
BSONObj shardKeyPattern = chunkManager->getKeyPattern();
if ( shardKeyPattern.isEmpty() ){
@@ -1176,14 +1176,18 @@ namespace mongo {
// we can figure that out by grabbing the chunkManager installed on 5.a
// TODO expose that manager when installing it
- ShardChunkManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
+ CollectionManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
if( chunkManager->getNumChunks() > 0 ) {
// get another chunk on that shard
BSONObj lookupKey;
- BSONObj bumpMin, bumpMax;
+ BSONObj bumpMin;
+ BSONObj bumpMax;
do {
- chunkManager->getNextChunk( lookupKey , &bumpMin , &bumpMax );
+ ChunkType bumpChunk;
+ chunkManager->getNextChunk( lookupKey , &bumpChunk );
+ bumpMin = bumpChunk.getMin();
+ bumpMax = bumpChunk.getMax();
lookupKey = bumpMin;
}
while( bumpMin == min );
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index 4a671f14372..6d827bd4d11 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -39,6 +39,7 @@
#include "mongo/s/chunk_version.h"
#include "mongo/s/config.h"
#include "mongo/s/d_logic.h"
+#include "mongo/s/metadata_loader.h"
#include "mongo/s/shard.h"
#include "mongo/util/queue.h"
#include "mongo/util/concurrency/mutex.h"
@@ -137,8 +138,8 @@ namespace mongo {
if ( it == _chunks.end() )
return false;
- ShardChunkManagerPtr p = it->second;
- version = p->getVersion();
+ CollectionManagerPtr p = it->second;
+ version = p->getShardVersion();
return true;
}
@@ -147,8 +148,8 @@ namespace mongo {
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it != _chunks.end() ) {
- ShardChunkManagerPtr p = it->second;
- return p->getVersion();
+ CollectionManagerPtr p = it->second;
+ return p->getShardVersion();
}
else {
return ChunkVersion( 0, OID() );
@@ -160,12 +161,20 @@ namespace mongo {
ChunkManagersMap::const_iterator it = _chunks.find( ns );
verify( it != _chunks.end() ) ;
- ShardChunkManagerPtr p = it->second;
+ CollectionManagerPtr p = it->second;
// empty shards should have version 0
version = ( p->getNumChunks() > 1 ) ? version : ChunkVersion( 0 , OID() );
- ShardChunkManagerPtr cloned( p->cloneMinus( min , max , version ) );
+ ChunkType chunk;
+ chunk.setMin( min );
+ chunk.setMax( max );
+ string errMsg;
+
+ CollectionManagerPtr cloned( p->cloneMinus( chunk, version, &errMsg ) );
+ // Errors reported via assertions here
+ uassert( 16844, errMsg, NULL != cloned.get() );
+
// TODO: a bit dangerous to have two different zero-version states - no-manager and
// no-version
_chunks[ns] = cloned;
@@ -177,18 +186,40 @@ namespace mongo {
ChunkManagersMap::const_iterator it = _chunks.find( ns );
verify( it != _chunks.end() ) ;
- ShardChunkManagerPtr p( it->second->clonePlus( min , max , version ) );
- _chunks[ns] = p;
+
+ ChunkType chunk;
+ chunk.setMin( min );
+ chunk.setMax( max );
+ string errMsg;
+
+ CollectionManagerPtr cloned( it->second->clonePlus( chunk, version, &errMsg ) );
+ // Errors reported via assertions here
+ uassert( 16845, errMsg, NULL != cloned.get() );
+
+ _chunks[ns] = cloned;
}
- void ShardingState::splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
- ChunkVersion version ) {
+ void ShardingState::splitChunk( const string& ns,
+ const BSONObj& min,
+ const BSONObj& max,
+ const vector<BSONObj>& splitKeys,
+ ChunkVersion version )
+ {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
verify( it != _chunks.end() ) ;
- ShardChunkManagerPtr p( it->second->cloneSplit( min , max , splitKeys , version ) );
- _chunks[ns] = p;
+
+ ChunkType chunk;
+ chunk.setMin( min );
+ chunk.setMax( max );
+ string errMsg;
+
+ CollectionManagerPtr cloned( it->second->cloneSplit( chunk, splitKeys, version, &errMsg ) );
+ // Errors reported via assertions here
+ uassert( 16846, errMsg, NULL != cloned.get() );
+
+ _chunks[ns] = cloned;
}
void ShardingState::resetVersion( const string& ns ) {
@@ -224,7 +255,7 @@ namespace mongo {
// one triggered the 'slow path' (below)
// when the second's request gets here, the version is already current
ChunkVersion storedVersion;
- ShardChunkManagerPtr currManager;
+ CollectionManagerPtr currManager;
{
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
@@ -236,7 +267,7 @@ namespace mongo {
}
else{
currManager = it->second;
- if( ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
+ if( ( storedVersion = it->second->getShardVersion() ).isEquivalentTo( version ) )
return true;
}
}
@@ -255,7 +286,12 @@ namespace mongo {
// + a stale client request a version that's not current anymore
// Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
- const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
+
+ string errMsg;
+ ConnectionString configLoc = ConnectionString::parse( _configServer, errMsg );
+ uassert( 16847, str::stream() << "bad config server connection string" << _configServer
+ << causedBy( errMsg ),
+ configLoc.type() != ConnectionString::INVALID );
// If our epochs aren't compatible, it's not useful to use the old manager for chunk diffs
if( currManager && ! currManager->getCollVersion().hasCompatibleEpoch( version ) ){
@@ -266,12 +302,16 @@ namespace mongo {
currManager.reset();
}
- ShardChunkManagerPtr p( ShardChunkManager::make( c , ns , _shardName, currManager ) );
+ MetadataLoader mdLoader( configLoc );
+ CollectionManagerPtr newManager( mdLoader.makeCollectionManager( ns,
+ _shardName,
+ currManager.get(),
+ &errMsg ) );
- // Handle the case where the collection isn't sharded more gracefully
- if( p->getKeyPattern().isEmpty() ){
+ if ( !newManager ) {
version = ChunkVersion( 0, OID() );
- // There was an error getting any data for this collection, return false
+ warning() << errMsg << endl;
+ // There was an error getting sharded data for this collection, return false
return false;
}
@@ -285,12 +325,14 @@ namespace mongo {
// since we loaded the chunk manager unlocked, other thread may have done the same
// make sure we keep the freshest config info only
ChunkManagersMap::const_iterator it = _chunks.find( ns );
- if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
- _chunks[ns] = p;
+ if ( it == _chunks.end()
+ || newManager->getShardVersion() >= it->second->getShardVersion() )
+ {
+ _chunks[ns] = newManager;
}
ChunkVersion oldVersion = version;
- version = p->getVersion();
+ version = newManager->getShardVersion();
return oldVersion.isEquivalentTo( version );
}
}
@@ -310,8 +352,8 @@ namespace mongo {
scoped_lock lk(_mutex);
for ( ChunkManagersMap::iterator it = _chunks.begin(); it != _chunks.end(); ++it ) {
- ShardChunkManagerPtr p = it->second;
- bb.appendTimestamp( it->first , p->getVersion().toLong() );
+ CollectionManagerPtr p = it->second;
+ bb.appendTimestamp( it->first , p->getShardVersion().toLong() );
}
bb.done();
}
@@ -328,12 +370,12 @@ namespace mongo {
return true;
}
- ShardChunkManagerPtr ShardingState::getShardChunkManager( const string& ns ) {
+ CollectionManagerPtr ShardingState::getShardChunkManager( const string& ns ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it == _chunks.end() ) {
- return ShardChunkManagerPtr();
+ return CollectionManagerPtr();
}
else {
return it->second;