summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-21 10:06:34 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-21 12:10:38 -0400
commitf8cdaf32b6c44528123d8ffef7ea801a2f611745 (patch)
tree439a5b537649c5225b06b3d38b7859f2e386abc5
parentf24e4ddcc542959a5bf3f8cbbcb08796c3f8856f (diff)
downloadmongo-f8cdaf32b6c44528123d8ffef7ea801a2f611745.tar.gz
SERVER-19855 Use ShardConnection for migrations and splits
This ensures that prior to sending the operation on the wire, the host will be primed to know that it is part of a sharded cluster.
-rw-r--r--jstests/aggregation/bugs/server6118.js42
-rw-r--r--jstests/aggregation/bugs/server6179.js40
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js30
-rw-r--r--jstests/sharding/sharded_profile.js30
-rw-r--r--src/mongo/s/chunk.cpp15
-rw-r--r--src/mongo/s/chunk.h2
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp4
-rw-r--r--src/mongo/s/d_migrate.cpp5
8 files changed, 92 insertions, 76 deletions
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js
index 2f2e7599ef8..7f622c568b2 100644
--- a/jstests/aggregation/bugs/server6118.js
+++ b/jstests/aggregation/bugs/server6118.js
@@ -1,35 +1,41 @@
// SERVER-6118: support for sharded sorts
+(function() {
-// Set up a sharding test.
-s = new ShardingTest( "aggregation_sort1", 2, 0, 2 );
-s.adminCommand( { enablesharding:"test" } );
+var s = new ShardingTest({ name: "aggregation_sort1", shards: 2, mongos: 1, verbose: 0 });
+s.stopBalancer();
+
+s.adminCommand({ enablesharding:"test" });
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
+s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
-// Test does it's own balancing.
-s.stopBalancer();
+var d = s.getDB( "test" );
-d = s.getDB( "test" );
+// Insert _id values 0 - 99
+var N = 100;
-// Insert _id values 0 - 99.
-N = 100;
-for( i = 0; i < N; ++i ) {
- d.data.insert( { _id:i } )
+var bulkOp = d.data.initializeOrderedBulkOp();
+for(var i = 0; i < N; ++i) {
+ bulkOp.insert({ _id: i });
}
+bulkOp.execute();
-// Split the data into 3 chunks.
+// Split the data into 3 chunks
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard.
-s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
- to:s.getOther( s.getServer( "test" ) ).name } );
+// Migrate the middle chunk to another shard
+s.adminCommand({ movechunk: "test.data",
+ find: { _id: 50 },
+ to: s.getOther(s.getServer("test")).name });
// Check that the results are in order.
-result = d.data.aggregate( { $sort: { _id:1 } } ).toArray();
+var result = d.data.aggregate({ $sort: { _id: 1 } }).toArray();
printjson(result);
-for( i = 0; i < N; ++i ) {
- assert.eq( i, result[ i ]._id );
+
+for(var i = 0; i < N; ++i) {
+ assert.eq(i, result[i]._id);
}
s.stop()
+
+})();
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 260fa38c983..8af02645a5c 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -1,31 +1,37 @@
// SERVER-6179: support for two $groups in sharded agg
+(function() {
-// Set up a sharding test.
-s = new ShardingTest( "aggregation_multiple_group", 2, 0, 2 );
-s.adminCommand( { enablesharding:"test" } );
+var s = new ShardingTest({ name: "aggregation_multiple_group", shards: 2, mongos: 1, verbose: 0 });
+s.stopBalancer();
+
+s.adminCommand({ enablesharding:"test" });
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
-s.stopBalancer()
+s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
+
+var d = s.getDB( "test" );
-d = s.getDB( "test" );
+// Insert _id values 0 - 99
+var N = 100;
-// Populate
-for( i = 0; i < 100; ++i ) {
- d.data.insert( { _id:i, i:i%10 } )
+var bulkOp = d.data.initializeOrderedBulkOp();
+for(var i = 0; i < N; ++i) {
+ bulkOp.insert({ _id: i, i: i%10 });
}
+bulkOp.execute();
-// Split the data into 3 chunks.
+// Split the data into 3 chunks
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard.
-s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
- to:s.getOther( s.getServer( "test" ) ).name } );
+// Migrate the middle chunk to another shard
+s.adminCommand({ movechunk: "test.data",
+ find: { _id: 50 },
+ to: s.getOther(s.getServer("test")).name });
// Check that we get results rather than an error
-result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}}).toArray();
+var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}}).toArray();
expected = [
{
"_id" : 0,
@@ -72,3 +78,5 @@ expected = [
assert.eq(result, expected);
s.stop();
+
+})();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index 608e7c7674c..01b140f1583 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -1,21 +1,24 @@
// Hash sharding with initial chunk count set.
-// @tags : [ hashed ]
+// @tags: [ hashed ]
+
+(function() {
+
+var s = new ShardingTest({ shards: 3, mongos: 1, verbose: 1 });
+s.stopBalancer();
-var s = new ShardingTest({ shards : 3, mongos : 1, verbose : 1 });
var dbname = "test";
var coll = "foo";
var db = s.getDB(dbname);
-db.adminCommand({ enablesharding : dbname });
+
+assert.commandWorked(db.adminCommand({ enablesharding: dbname }));
s.ensurePrimaryShard(dbname, 'shard0001');
-//for simplicity turn off balancer
-s.stopBalancer();
+assert.commandWorked(db.adminCommand({ shardcollection: dbname + "." + coll,
+ key: { a: "hashed" },
+ numInitialChunks: 500 }));
-var res = db.adminCommand({ shardcollection : dbname + "." + coll,
- key : { a : "hashed" },
- numInitialChunks : 500 });
-assert.eq(res.ok, 1, "shardcollection didn't work");
db.printShardingStatus();
+
var numChunks = s.config.chunks.count();
assert.eq(numChunks, 500 , "should be exactly 500 chunks");
@@ -23,15 +26,16 @@ var shards = s.config.shards.find();
shards.forEach(
// check that each shard has one third the numInitialChunks
function (shard){
- var numChunksOnShard = s.config.chunks.find({"shard" : shard._id}).count();
+ var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
assert.gte(numChunksOnShard, Math.floor(500/3));
}
);
-// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting
-// fails to create the collection on all shards).
-res = db.runCommand({ "drop" : coll });
+// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails to
+// create the collection on all shards).
+res = db.runCommand({ "drop": coll });
assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
s.stop();
+})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index 77f927bdcf5..f3ab4fdaa6d 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -1,36 +1,36 @@
-//
// Tests whether profiling can trigger stale config errors and interfere with write batches
// SERVER-13413
-//
-var st = new ShardingTest({ shards : 1, mongos : 2 });
+(function() {
+
+var st = new ShardingTest({ shards: 1, mongos: 2 });
st.stopBalancer();
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = st.s0.getDB('admin');
+var shards = st.s0.getCollection('config.shards').find().toArray();
+var coll = st.s0.getCollection('foo.bar');
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert(admin.runCommand({ enableSharding: coll.getDB() + '' }).ok);
+assert(admin.runCommand({ shardCollection: coll + '', key: { _id: 1 } }).ok);
st.printShardingStatus();
-jsTest.log( "Turning on profiling..." );
+jsTest.log('Turning on profiling on ' + st.shard0);
st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
+
var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
-var inserts = [{ _id : 0 }, { _id : 1 }, { _id : 2 }];
-var staleColl = st.s1.getCollection(coll.toString());
+var inserts = [{ _id: 0 }, { _id: 1 }, { _id: 2 }];
-assert.writeOK(staleColl.insert(inserts));
+assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
printjson(profileColl.find().toArray());
for (var i = 0; i < inserts.length; i++) {
- assert.neq(null, profileColl.findOne({ 'query._id' : i }));
+ assert.neq(null, profileColl.findOne({ 'query._id': i }));
}
-jsTest.log( "DONE!" );
st.stop();
+
+})();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index da6cbc196df..23cbf463778 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -435,8 +435,6 @@ Status Chunk::multiSplit(OperationContext* txn, const vector<BSONObj>& m, BSONOb
uassert(13333, "can't split a chunk in that many parts", m.size() < maxSplitPoints);
uassert(13003, "can't split a chunk with only one distinct value", _min.woCompare(_max));
- ScopedDbConnection conn(_getShardConnectionString());
-
BSONObjBuilder cmd;
cmd.append("splitChunk", _manager->getns());
cmd.append("keyPattern", _manager->getShardKeyPattern().toBSON());
@@ -453,6 +451,7 @@ Status Chunk::multiSplit(OperationContext* txn, const vector<BSONObj>& m, BSONOb
res = &dummy;
}
+ ShardConnection conn(_getShardConnectionString(), "");
if (!conn->runCommand("admin", cmdObj, *res)) {
string msg(str::stream() << "splitChunk failed - cmd: " << cmdObj << " result: " << *res);
warning() << msg;
@@ -481,17 +480,15 @@ bool Chunk::moveAndCommit(OperationContext* txn,
log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") "
<< getShardId() << " -> " << toShardId;
- const auto from = grid.shardRegistry()->getShard(getShardId());
-
BSONObjBuilder builder;
builder.append("moveChunk", _manager->getns());
- builder.append("from", from->getConnString().toString());
+ builder.append("from", _getShardConnectionString().toString());
{
const auto toShard = grid.shardRegistry()->getShard(toShardId);
builder.append("to", toShard->getConnString().toString());
}
// NEEDED FOR 2.0 COMPATIBILITY
- builder.append("fromShard", from->getId());
+ builder.append("fromShard", getShardId());
builder.append("toShard", toShardId);
///////////////////////////////
builder.append("min", _min);
@@ -515,7 +512,7 @@ bool Chunk::moveAndCommit(OperationContext* txn,
builder.append(LiteParsedQuery::cmdOptionMaxTimeMS, maxTimeMS);
builder.append("epoch", _manager->getVersion().epoch());
- ScopedDbConnection fromconn(from->getConnString());
+ ShardConnection fromconn(_getShardConnectionString(), "");
bool worked = fromconn->runCommand("admin", builder.done(), res);
fromconn.done();
@@ -626,9 +623,9 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) const {
}
}
-std::string Chunk::_getShardConnectionString() const {
+ConnectionString Chunk::_getShardConnectionString() const {
const auto shard = grid.shardRegistry()->getShard(getShardId());
- return shard->getConnString().toString();
+ return shard->getConnString();
}
long Chunk::getPhysicalSize() const {
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 1ec9a9447cb..822ac534380 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -261,7 +261,7 @@ private:
/**
* Returns the connection string for the shard on which this chunk resides.
*/
- std::string _getShardConnectionString() const;
+ ConnectionString _getShardConnectionString() const;
// if min/max key is pos/neg infinity
bool _minIsInf() const;
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 2c3802a8f3b..084b63661f3 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -38,6 +38,7 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/chunk_manager.h"
+#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
@@ -179,6 +180,7 @@ public:
grid.shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(), firstChunk->getShardId());
+
BSONObj remoteResult;
// Throws, but handled at level above. Don't want to rewrap to preserve exception
@@ -191,7 +193,7 @@ public:
str::stream() << "Can't find shard for chunk: " << firstChunk->toString()));
}
- ScopedDbConnection conn(shard->getConnString());
+ ShardConnection conn(shard->getConnString(), "");
bool ok = conn->runCommand("admin", remoteCmdObjB.obj(), remoteResult);
conn.done();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 269a6d5a5e7..4138c2242a9 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -573,7 +573,6 @@ public:
return false;
}
- ScopedDbConnection connTo(toShardCS);
BSONObj res;
bool ok;
@@ -596,7 +595,9 @@ public:
}
try {
+ ScopedDbConnection connTo(toShardCS);
ok = connTo->runCommand("admin", recvChunkStartBuilder.done(), res);
+ connTo.done();
} catch (DBException& e) {
errmsg = str::stream() << "moveChunk could not contact to: shard " << toShardName
<< " to start transfer" << causedBy(e);
@@ -604,8 +605,6 @@ public:
return false;
}
- connTo.done();
-
if (!ok) {
errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
verify(res["errmsg"].type());