summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2013-08-12 17:19:33 -0400
committerGreg Studer <greg@10gen.com>2013-08-15 15:46:01 -0400
commitd53c3240e3f83080f6a9e36edb9b118ff9f97f9b (patch)
tree5357d0732e803f953f9ffc3fbcd682740ec65cb8
parentc841975ca8752c27a4bb1484876f23d5360afef9 (diff)
downloadmongo-d53c3240e3f83080f6a9e36edb9b118ff9f97f9b.tar.gz
SERVER-8869 mongos command passthrough for mergeChunks plus tests
-rw-r--r--jstests/sharding/merge_chunks_test.js113
-rw-r--r--jstests/sharding/merge_chunks_test_with_data.js56
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js57
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/dbtests/merge_chunk_tests.cpp104
-rw-r--r--src/mongo/s/merge_chunks_cmd.cpp179
6 files changed, 476 insertions, 34 deletions
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
new file mode 100644
index 00000000000..22d0e8fc0fa
--- /dev/null
+++ b/jstests/sharding/merge_chunks_test.js
@@ -0,0 +1,113 @@
+//
+// Tests that merging chunks via mongos works/doesn't work with different chunk configurations
+//
+
+var options = { separateConfig : true, shardOptions : { verbose : 0 } };
+
+var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+
+// Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first shard
+jsTest.log( "Creating ranges..." );
+
+assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 10 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 20 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 40 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 50 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 90 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 100 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 110 } }).ok );
+
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 10 }, to : shards[1]._id }).ok );
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 90 }, to : shards[1]._id }).ok );
+
+st.printShardingStatus();
+
+// Insert some data into each of the consolidated ranges
+coll.insert({ _id : 0 });
+coll.insert({ _id : 40 });
+coll.insert({ _id : 110 });
+assert.eq( null, coll.getDB().getLastError() );
+
+var staleCollection = staleMongos.getCollection( coll + "" );
+
+jsTest.log( "Trying merges that should fail..." );
+
+// Make sure merging three chunks is invalid (for now)
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 90 }] }).ok );
+
+// Make sure merging non-exact chunks is invalid
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : MinKey }, { _id : 5 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 5 }, { _id : 10 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 15 }, { _id : 50 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 55 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 105 }, { _id : MaxKey }] }).ok );
+
+// Make sure merging single chunks is invalid
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : MinKey }, { _id : 0 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 40 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 110 }, { _id : MaxKey }] }).ok );
+
+// Make sure merging over holes is invalid
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 0 }, { _id : 40 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 50 }, { _id : 110 }] }).ok );
+
+// Make sure merging between shards is invalid
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 0 }, { _id : 20 }] }).ok );
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 10 }, { _id : 40 }] }).ok );
+
+assert.eq( 3, staleCollection.find().itcount() );
+
+jsTest.log( "Trying merges that should succeed..." );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : MinKey }, { _id : 10 }] }).ok );
+
+assert.eq( 3, staleCollection.find().itcount() );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 50 }] }).ok );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 90 }] }).ok );
+
+assert.eq( 3, staleCollection.find().itcount() );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 100 }, { _id : MaxKey }] }).ok );
+
+assert.eq( 3, staleCollection.find().itcount() );
+
+st.printShardingStatus();
+
+st.stop();
+
diff --git a/jstests/sharding/merge_chunks_test_with_data.js b/jstests/sharding/merge_chunks_test_with_data.js
new file mode 100644
index 00000000000..0f057787454
--- /dev/null
+++ b/jstests/sharding/merge_chunks_test_with_data.js
@@ -0,0 +1,56 @@
+//
+// Tests that we can only merge empty chunks
+//
+
+var options = { separateConfig : true, shardOptions : { verbose : 0 } };
+
+var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+
+// Create ranges MIN->0, 0->10,10->20, 20->30,30->40, 40->50,50->60, 60->MAX
+jsTest.log( "Creating ranges..." );
+
+assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 10 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 20 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 30 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 40 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 50 } }).ok );
+assert( admin.runCommand({ split : coll + "", middle : { _id : 60 } }).ok );
+
+st.printShardingStatus();
+
+// Insert data to allow 0->20 and 40->60 to be merged, but too much for 20->40
+coll.insert({ _id : 0 });
+coll.insert({ _id : 20 });
+coll.insert({ _id : 30 });
+coll.insert({ _id : 40 });
+assert.eq( null, coll.getDB().getLastError() );
+
+jsTest.log( "Merging chunks with another empty chunk..." );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 0 }, { _id : 20 }] }).ok );
+
+assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 40 }, { _id : 60 }] }).ok );
+
+jsTest.log( "Merging two full chunks should fail..." );
+
+assert( !admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : 20 }, { _id : 40 }] }).ok );
+
+st.printShardingStatus();
+
+st.stop();
+
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
new file mode 100644
index 00000000000..f2bebb0bfce
--- /dev/null
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -0,0 +1,57 @@
+//
+// Tests that merging chunks does not prevent cluster from doing other metadata ops
+//
+
+var options = { separateConfig : true, shardOptions : { verbose : 0 } };
+
+var st = new ShardingTest({ shards : 2, mongos : 2, other : options });
+st.stopBalancer();
+
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
+
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+
+st.printShardingStatus();
+
+// Split and merge the first chunk repeatedly
+jsTest.log( "Splitting and merging repeatedly..." );
+
+for ( var i = 0; i < 5; i++ ) {
+ assert( admin.runCommand({ split : coll + "", middle : { _id : i } }).ok );
+ assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : MinKey }, { _id : MaxKey }] }).ok );
+ printjson( mongos.getDB("config").chunks.find().toArray() );
+}
+
+// Move the first chunk to the other shard
+jsTest.log( "Moving to another shard..." );
+
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
+
+// Split and merge the chunk repeatedly
+jsTest.log( "Splitting and merging repeatedly (again)..." );
+
+for ( var i = 0; i < 5; i++ ) {
+ assert( admin.runCommand({ split : coll + "", middle : { _id : i } }).ok );
+ assert( admin.runCommand({ mergeChunks : coll + "",
+ bounds : [{ _id : MinKey }, { _id : MaxKey }] }).ok );
+ printjson( mongos.getDB("config").chunks.find().toArray() );
+}
+
+// Move the chunk back to the original shard
+jsTest.log( "Moving to original shard..." );
+
+assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[0]._id }).ok );
+
+st.printShardingStatus();
+
+jsTest.log( "DONE!" );
+
+st.stop();
+
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index dabc1aeb0f6..a91e76a46b4 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -580,6 +580,7 @@ mongosLibraryFiles = [
"s/strategy_single.cpp",
"s/commands_admin.cpp",
"s/commands_public.cpp",
+ "s/merge_chunks_cmd.cpp",
"s/request.cpp",
"s/client_info.cpp",
"s/config_server_checker_service.cpp",
diff --git a/src/mongo/dbtests/merge_chunk_tests.cpp b/src/mongo/dbtests/merge_chunk_tests.cpp
index 14b9155dd7f..5463b7fbdcc 100644
--- a/src/mongo/dbtests/merge_chunk_tests.cpp
+++ b/src/mongo/dbtests/merge_chunk_tests.cpp
@@ -15,6 +15,7 @@
*/
#include "mongo/dbtests/config_server_fixture.h"
+#include "mongo/s/chunk.h" // for genID
#include "mongo/s/chunk_version.h"
#include "mongo/s/collection_metadata.h"
#include "mongo/s/d_logic.h"
@@ -32,6 +33,9 @@ namespace mongo {
class MergeChunkFixture: public ConfigServerFixture {
public:
+ /**
+ * Stores ranges for a particular collection and shard starting from some version
+ */
void storeCollectionRanges( const NamespaceString& nss,
const string& shardName,
const vector<KeyRange>& ranges,
@@ -56,7 +60,9 @@ namespace mongo {
for ( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
ChunkType chunk;
- chunk.setName( OID::gen().toString() );
+ // TODO: We should not rely on the serialized ns, minkey being unique in the future,
+ // causes problems since it links string serialization to correctness.
+ chunk.setName( Chunk::genID( nss, it->minKey ) );
chunk.setShard( shardName );
chunk.setNS( nss.ns() );
chunk.setVersion( nextVersion );
@@ -68,6 +74,39 @@ namespace mongo {
}
}
+ /**
+ * Makes sure that all the ranges here no longer exist on disk but the merged range does
+ */
+ void assertWrittenAsMerged( const vector<KeyRange>& ranges ) {
+
+ dumpServer();
+
+ BSONObj rangeMin;
+ BSONObj rangeMax;
+
+ // Ensure written
+ for( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
+
+ Query query( BSON( ChunkType::min( it->minKey ) <<
+ ChunkType::max( it->maxKey ) <<
+ ChunkType::shard( shardName() ) ) );
+ ASSERT( client().findOne( ChunkType::ConfigNS, query ).isEmpty() );
+
+ if ( rangeMin.isEmpty() || rangeMin.woCompare( it->minKey ) > 0 ) {
+ rangeMin = it->minKey;
+ }
+
+ if ( rangeMax.isEmpty() || rangeMax.woCompare( it->maxKey ) < 0 ) {
+ rangeMax = it->maxKey;
+ }
+ }
+
+ Query query( BSON( ChunkType::min( rangeMin ) <<
+ ChunkType::max( rangeMax ) <<
+ ChunkType::shard( shardName() ) ) );
+ ASSERT( !client().findOne( ChunkType::ConfigNS, query ).isEmpty() );
+ }
+
string shardName() { return "shard0000"; }
protected:
@@ -281,30 +320,8 @@ namespace mongo {
ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
latestVersion.minorVersion() );
- }
-
- TEST_F(MergeChunkTests, BasicMergeWritten) {
-
- const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
- const OID epoch = OID::gen();
- vector<KeyRange> ranges;
-
- // Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << 1 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 1 ), BSON( "x" << 2 ), kp ) );
- storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
-
- // Do merge
- string errMsg;
- bool result = mergeChunks( nss, BSON( "x" << 0 ), BSON( "x" << 2 ), epoch, false, &errMsg );
- ASSERT_EQUALS( errMsg, "" );
- ASSERT( result );
- for( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
- Query query( BSON( ChunkType::min( it->minKey ) << ChunkType::max( it->maxKey ) ) );
- ASSERT( !client().findOne( ChunkType::ConfigNS, query ).isEmpty() );
- }
+ assertWrittenAsMerged( ranges );
}
TEST_F(MergeChunkTests, BasicMergeMinMax ) {
@@ -343,31 +360,50 @@ namespace mongo {
ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
latestVersion.minorVersion() );
+
+ assertWrittenAsMerged( ranges );
}
- TEST_F(MergeChunkTests, BasicMergeMinMaxWritten ) {
+ TEST_F(MergeChunkTests, CompoundMerge ) {
const NamespaceString nss( "foo.bar" );
- const BSONObj kp = BSON( "x" << 1 );
+ const BSONObj kp = BSON( "x" << 1 << "y" << 1 );
const OID epoch = OID::gen();
vector<KeyRange> ranges;
// Setup chunk metadata
- ranges.push_back( KeyRange( nss, BSON( "x" << MINKEY ), BSON( "x" << 0 ), kp ) );
- ranges.push_back( KeyRange( nss, BSON( "x" << 0 ), BSON( "x" << MAXKEY ), kp ) );
+ ranges.push_back( KeyRange( nss, BSON( "x" << 0 << "y" << 1 ),
+ BSON( "x" << 1 << "y" << 0 ), kp ) );
+ ranges.push_back( KeyRange( nss, BSON( "x" << 1 << "y" << 0 ),
+ BSON( "x" << 2 << "y" << 1 ), kp ) );
storeCollectionRanges( nss, shardName(), ranges, ChunkVersion( 1, 0, epoch ) );
+ // Get latest version
+ ChunkVersion latestVersion;
+ shardingState.refreshMetadataNow( nss, &latestVersion );
+ shardingState.resetMetadata( nss );
+
// Do merge
string errMsg;
- bool result = mergeChunks( nss, BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ), epoch, false, &errMsg );
+ bool result = mergeChunks( nss, BSON( "x" << 0 << "y" << 1 ),
+ BSON( "x" << 2 << "y" << 1 ), epoch, false, &errMsg );
ASSERT_EQUALS( errMsg, "" );
ASSERT( result );
- for( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) {
- Query query( BSON( ChunkType::min( it->minKey ) << ChunkType::max( it->maxKey ) ) );
- ASSERT( !client().findOne( ChunkType::ConfigNS, query ).isEmpty() );
- }
+ // Verify result
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss );
+
+ ChunkType chunk;
+ ASSERT( metadata->getNextChunk( BSON( "x" << 0 << "y" << 1 ), &chunk ) );
+ ASSERT( chunk.getMin().woCompare( BSON( "x" << 0 << "y" << 1 ) ) == 0 );
+ ASSERT( chunk.getMax().woCompare( BSON( "x" << 2 << "y" << 1 ) ) == 0 );
+ ASSERT_EQUALS( metadata->getNumChunks(), 1u );
+
+ ASSERT_EQUALS( metadata->getShardVersion().majorVersion(), latestVersion.majorVersion() );
+ ASSERT_GREATER_THAN( metadata->getShardVersion().minorVersion(),
+ latestVersion.minorVersion() );
+
+ assertWrittenAsMerged( ranges );
}
} // end namespace
diff --git a/src/mongo/s/merge_chunks_cmd.cpp b/src/mongo/s/merge_chunks_cmd.cpp
new file mode 100644
index 00000000000..c019a3e3e02
--- /dev/null
+++ b/src/mongo/s/merge_chunks_cmd.cpp
@@ -0,0 +1,179 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mongo/base/init.h"
+#include "mongo/client/connpool.h"
+#include "mongo/db/auth/action_type.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/privilege.h"
+#include "mongo/db/commands.h"
+#include "mongo/db/field_parser.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/s/config.h" // For config server and DBConfig and version refresh
+#include "mongo/s/grid.h"
+#include "mongo/s/shard.h"
+
+namespace mongo {
+
+ /**
+ * Mongos-side command for merging chunks, passes command to appropriate shard.
+ */
+ class MergeChunksPassCommand : public Command {
+ public:
+ MergeChunksPassCommand() : Command("mergeChunks") {}
+
+ virtual void help(stringstream& h) const {
+ h << "Merge Chunks command\n"
+ << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ] }";
+ }
+
+ virtual Status checkAuthForCommand( ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj ) {
+ return client->getAuthorizationSession()->checkAuthForPrivilege(
+ Privilege( AuthorizationManager::CLUSTER_RESOURCE_NAME,
+ ActionType::mergeChunks ) );
+ }
+
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return NONE; }
+
+ // Required
+ static BSONField<string> nsField;
+ static BSONField<vector<BSONObj> > boundsField;
+
+ // Used to send sharding state
+ static BSONField<string> shardNameField;
+ static BSONField<string> configField;
+
+ // TODO: Same limitations as other mongos metadata commands, sometimes we'll be stale here
+ // and fail. Need to better integrate targeting with commands.
+ ShardPtr guessMergeShard( const NamespaceString& nss, const BSONObj& minKey ) {
+
+ DBConfigPtr config = grid.getDBConfig( nss.ns() );
+ if ( !config->isSharded( nss ) ) {
+ config->reload();
+ if ( !config->isSharded( nss ) ) {
+ return ShardPtr();
+ }
+ }
+
+ ChunkManagerPtr manager = config->getChunkManager( nss );
+ if ( !manager ) return ShardPtr();
+ ChunkPtr chunk = manager->findChunkForDoc( minKey );
+ if ( !chunk ) return ShardPtr();
+ return ShardPtr( new Shard( chunk->getShard() ) );
+ }
+
+ // TODO: This refresh logic should be consolidated
+ void refreshChunkCache( const NamespaceString& nss ) {
+
+ DBConfigPtr config = grid.getDBConfig( nss.ns() );
+ if ( !config->isSharded( nss ) ) return;
+
+ // Refreshes chunks as a side-effect
+ config->getChunkManagerIfExists( nss, true );
+ }
+
+
+ bool run( const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool ) {
+
+ string ns;
+ if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
+ return false;
+ }
+
+ if ( ns.size() == 0 ) {
+ errmsg = "no namespace specified";
+ return false;
+ }
+
+ vector<BSONObj> bounds;
+ if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
+ return false;
+ }
+
+ if ( bounds.size() == 0 ) {
+ errmsg = "no bounds were specified";
+ return false;
+ }
+
+ if ( bounds.size() != 2 ) {
+ errmsg = "only a min and max bound may be specified";
+ return false;
+ }
+
+ BSONObj minKey = bounds[0];
+ BSONObj maxKey = bounds[1];
+
+ if ( minKey.isEmpty() ) {
+ errmsg = "no min key specified";
+ return false;
+ }
+
+ if ( maxKey.isEmpty() ) {
+ errmsg = "no max key specified";
+ return false;
+ }
+
+ ShardPtr mergeShard = guessMergeShard( NamespaceString( ns ), minKey );
+
+ if ( !mergeShard ) {
+ errmsg = (string)"could not find shard for merge range starting at "
+ + minKey.toString();
+ return false;
+ }
+
+ BSONObjBuilder remoteCmdObjB;
+ remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::nsField() ] );
+ remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::boundsField() ] );
+ remoteCmdObjB.append( MergeChunksPassCommand::configField(),
+ configServer.getPrimary().getAddress().toString() );
+ remoteCmdObjB.append( MergeChunksPassCommand::shardNameField(),
+ mergeShard->getName() );
+
+ BSONObj remoteResult;
+ // Throws, but handled at level above. Don't want to rewrap to preserve exception
+ // formatting.
+ ScopedDbConnection conn( mergeShard->getAddress() );
+ bool ok = conn->runCommand( "admin", remoteCmdObjB.obj(), remoteResult );
+ conn.done();
+
+ // Always refresh our chunks afterwards
+ refreshChunkCache( NamespaceString( ns ) );
+
+ result.appendElements( remoteResult );
+ return ok;
+ }
+ };
+
+ BSONField<string> MergeChunksPassCommand::nsField( "mergeChunks" );
+ BSONField<vector<BSONObj> > MergeChunksPassCommand::boundsField( "bounds" );
+
+ BSONField<string> MergeChunksPassCommand::configField( "config" );
+ BSONField<string> MergeChunksPassCommand::shardNameField( "shardName" );
+
+ MONGO_INITIALIZER(InitMergeChunksPassCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new MergeChunksPassCommand();
+ return Status::OK();
+ }
+}