summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2011-05-11 03:04:25 -0400
committerEliot Horowitz <eliot@10gen.com>2011-05-11 03:04:25 -0400
commit87e30d6af67abe735230fa1b7876f51f3d34a8ac (patch)
treec14577a5b303cf5f0e4a2f85f8f56fbb2810b341
parent2ca9d0e5f6d23760d1e7a9694bae3878370bffe0 (diff)
downloadmongo-87e30d6af67abe735230fa1b7876f51f3d34a8ac.tar.gz
manual backport of some the fixes to mongos
-rw-r--r--s/chunk.cpp69
-rw-r--r--s/chunk.h10
-rw-r--r--s/commands_admin.cpp8
-rw-r--r--s/config.cpp4
4 files changed, 50 insertions, 41 deletions
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 0892af08a77..34a53821f95 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -175,7 +175,7 @@ namespace mongo {
conn.done();
}
- ChunkPtr Chunk::singleSplit( bool force , BSONObj& res ) {
+ bool Chunk::singleSplit( bool force , BSONObj& res , ChunkPtr* low, ChunkPtr* high) {
vector<BSONObj> splitPoint;
// if splitting is not obligatory we may return early if there are not enough data
@@ -190,7 +190,7 @@ namespace mongo {
// 1 split point means we have between half the chunk size to full chunk size
// so we shouldn't split
log(1) << "chunk not full enough to trigger auto-split" << endl;
- return ChunkPtr();
+ return false;
}
splitPoint.push_back( candidates.front() );
@@ -228,13 +228,24 @@ namespace mongo {
if ( splitPoint.empty() || _min == splitPoint.front() || _max == splitPoint.front() ) {
log() << "want to split chunk, but can't find split point chunk " << toString()
<< " got: " << ( splitPoint.empty() ? "<empty>" : splitPoint.front().toString() ) << endl;
- return ChunkPtr();
+ return false;
+ }
+
+ if (!multiSplit( splitPoint , res , true ))
+ return false;
+
+ if (low && high) {
+ low->reset( new Chunk(_manager, _min, splitPoint[0], _shard));
+ high->reset(new Chunk(_manager, splitPoint[0], _max, _shard));
+ }
+ else {
+ assert(!low && !high); // can't have one without the other
}
- return multiSplit( splitPoint , res );
+ return true;
}
- ChunkPtr Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res ) {
+ bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res , bool resetIfSplit) {
const size_t maxSplitPoints = 8192;
uassert( 10165 , "can't split as shard doesn't have a manager" , _manager );
@@ -263,25 +274,17 @@ namespace mongo {
// but we issue here so that mongos may refresh wihtout needing to be written/read against
_manager->_reload();
- return ChunkPtr();
+ return false;
}
conn.done();
- _manager->_reload();
- // The previous multisplit logic adjusted the boundaries of 'this' chunk. Any call to 'this' object hereafter
- // will see a different _max for the chunk.
- // TODO Untie this dependency since, for metadata purposes, the reload() above already fixed boundaries
- {
- rwlock lk( _manager->_lock , true );
+ if ( resetIfSplit ) {
+ // force reload of chunks
+ grid.getDBConfig(_manager->getns())->getChunkManager(_manager->getns(), true);
+ }
- setMax(m[0].getOwned());
- DEV assert( shared_from_this() );
- _manager->_chunkMap[_max] = shared_from_this();
- }
-
- // return the second half, if a single split, or the first new chunk, if a multisplit.
- return _manager->findChunk( m[0] );
+ return true;
}
bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) {
@@ -334,21 +337,23 @@ namespace mongo {
_dataWritten = 0; // reset so we check often enough
BSONObj res;
- ChunkPtr newShard = singleSplit( false /* does not force a split if not enough data */ , res );
- if ( newShard.get() == NULL ) {
+ ChunkPtr low;
+ ChunkPtr high;
+ bool worked = singleSplit( false /* does not force a split if not enough data */ , res , &low, &high);
+ if ( !worked ) {
// singleSplit would have issued a message if we got here
_dataWritten = 0; // this means there wasn't enough data to split, so don't want to try again until considerable more data
return false;
}
log() << "autosplitted " << _manager->getns() << " shard: " << toString()
- << " on: " << newShard->getMax() << "(splitThreshold " << splitThreshold << ")"
+ << " on: " << low->getMax() << "(splitThreshold " << splitThreshold << ")"
#ifdef _DEBUG
<< " size: " << getPhysicalSize() // slow - but can be usefule when debugging
#endif
<< endl;
- moveIfShould( newShard );
+ low->moveIfShould( high );
return true;
@@ -874,24 +879,26 @@ namespace mongo {
configServer.logChange( "dropCollection" , _ns , BSONObj() );
}
- void ChunkManager::maybeChunkCollection() {
+ bool ChunkManager::maybeChunkCollection() {
+ ensureIndex_inlock();
+
uassert( 13346 , "can't pre-split already splitted collection" , (_chunkMap.size() == 1) );
-
+
ChunkPtr soleChunk = _chunkMap.begin()->second;
vector<BSONObj> splitPoints;
soleChunk->pickSplitVector( splitPoints , Chunk::MaxChunkSize );
if ( splitPoints.empty() ) {
log(1) << "not enough data to warrant chunking " << getns() << endl;
- return;
+ return false;
}
-
+
BSONObj res;
- ChunkPtr p;
- p = soleChunk->multiSplit( splitPoints , res );
- if ( p.get() == NULL ) {
+ bool worked = soleChunk->multiSplit( splitPoints , res , false );
+ if (!worked) {
log( LL_WARNING ) << "could not split '" << getns() << "': " << res << endl;
- return;
+ return false;
}
+ return true;
}
ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
diff --git a/s/chunk.h b/s/chunk.h
index b4bcc357adc..03996823a2f 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -108,18 +108,18 @@ namespace mongo {
* @param force if set to true, will split the chunk regardless if the split is really necessary size wise
* if set to false, will only split if the chunk has reached the currently desired maximum size
* @param res the object containing details about the split execution
- * @return if found a key, return a pointer to the first chunk, otherwise return a null pointer
+ * @return if found a key and split successfully
*/
- ChunkPtr singleSplit( bool force , BSONObj& res );
+ bool singleSplit( bool force , BSONObj& res , ChunkPtr* low=NULL, ChunkPtr* high=NULL);
/**
* Splits this chunk at the given key (or keys)
*
* @param splitPoints the vector of keys that should be used to divide this chunk
* @param res the object containing details about the split execution
- * @return shared pointer to the first new Chunk or null pointer if failed
+ * @return if split was successful
*/
- ChunkPtr multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res );
+ bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res , bool resetIfSplit );
/**
* Asks the mongod holding this chunk to find a key that approximately divides this chunk in two
@@ -314,7 +314,7 @@ namespace mongo {
const ShardKeyPattern& getShardKey() const { return _key; }
bool isUnique() const { return _unique; }
- void maybeChunkCollection();
+ bool maybeChunkCollection();
void getShardsForQuery( set<Shard>& shards , const BSONObj& query );
void getAllShards( set<Shard>& all );
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 1ca983fc13c..7677265c341 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -526,9 +526,9 @@ namespace mongo {
log() << "splitting: " << ns << " shard: " << chunk << endl;
BSONObj res;
- ChunkPtr p;
+ bool worked;
if ( middle.isEmpty() ) {
- p = chunk->singleSplit( true /* force a split even if not enough data */ , res );
+ worked = chunk->singleSplit( true /* force a split even if not enough data */ , res );
}
else {
@@ -540,10 +540,10 @@ namespace mongo {
vector<BSONObj> splitPoints;
splitPoints.push_back( middle );
- p = chunk->multiSplit( splitPoints , res );
+ worked = chunk->multiSplit( splitPoints , res , true );
}
- if ( p.get() == NULL ) {
+ if ( !worked ) {
errmsg = "split failed";
result.append( "cause" , res );
return false;
diff --git a/s/config.cpp b/s/config.cpp
index 35a3be2d334..9ed3207edd3 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -143,7 +143,9 @@ namespace mongo {
_save();
try {
- cm->maybeChunkCollection();
+ if ( cm->maybeChunkCollection() ) {
+ _load();
+ }
}
catch ( UserException& e ) {
// failure to chunk is not critical enough to abort the command (and undo the _save()'d configDB state)