summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/aggregation/bugs/server6118.js42
-rw-r--r--jstests/aggregation/bugs/server6179.js40
-rw-r--r--jstests/gle/gle_sharded_wc.js6
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js30
-rw-r--r--jstests/sharding/sharded_profile.js30
-rw-r--r--src/mongo/s/chunk.cpp15
-rw-r--r--src/mongo/s/chunk.h2
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp4
-rw-r--r--src/mongo/s/d_migrate.cpp5
9 files changed, 78 insertions, 96 deletions
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js
index 7f622c568b2..2f2e7599ef8 100644
--- a/jstests/aggregation/bugs/server6118.js
+++ b/jstests/aggregation/bugs/server6118.js
@@ -1,41 +1,35 @@
// SERVER-6118: support for sharded sorts
-(function() {
-var s = new ShardingTest({ name: "aggregation_sort1", shards: 2, mongos: 1, verbose: 0 });
-s.stopBalancer();
-
-s.adminCommand({ enablesharding:"test" });
+// Set up a sharding test.
+s = new ShardingTest( "aggregation_sort1", 2, 0, 2 );
+s.adminCommand( { enablesharding:"test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
+s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
-var d = s.getDB( "test" );
+// Test does it's own balancing.
+s.stopBalancer();
-// Insert _id values 0 - 99
-var N = 100;
+d = s.getDB( "test" );
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i });
+// Insert _id values 0 - 99.
+N = 100;
+for( i = 0; i < N; ++i ) {
+ d.data.insert( { _id:i } )
}
-bulkOp.execute();
-// Split the data into 3 chunks
+// Split the data into 3 chunks.
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getServer("test")).name });
+// Migrate the middle chunk to another shard.
+s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
+ to:s.getOther( s.getServer( "test" ) ).name } );
// Check that the results are in order.
-var result = d.data.aggregate({ $sort: { _id: 1 } }).toArray();
+result = d.data.aggregate( { $sort: { _id:1 } } ).toArray();
printjson(result);
-
-for(var i = 0; i < N; ++i) {
- assert.eq(i, result[i]._id);
+for( i = 0; i < N; ++i ) {
+ assert.eq( i, result[ i ]._id );
}
s.stop()
-
-})();
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 8af02645a5c..260fa38c983 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -1,37 +1,31 @@
// SERVER-6179: support for two $groups in sharded agg
-(function() {
-var s = new ShardingTest({ name: "aggregation_multiple_group", shards: 2, mongos: 1, verbose: 0 });
-s.stopBalancer();
-
-s.adminCommand({ enablesharding:"test" });
+// Set up a sharding test.
+s = new ShardingTest( "aggregation_multiple_group", 2, 0, 2 );
+s.adminCommand( { enablesharding:"test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
-
-var d = s.getDB( "test" );
+s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
+s.stopBalancer()
-// Insert _id values 0 - 99
-var N = 100;
+d = s.getDB( "test" );
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i, i: i%10 });
+// Populate
+for( i = 0; i < 100; ++i ) {
+ d.data.insert( { _id:i, i:i%10 } )
}
-bulkOp.execute();
-// Split the data into 3 chunks
+// Split the data into 3 chunks.
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getServer("test")).name });
+// Migrate the middle chunk to another shard.
+s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
+ to:s.getOther( s.getServer( "test" ) ).name } );
// Check that we get results rather than an error
-var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}}).toArray();
+result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}}).toArray();
expected = [
{
"_id" : 0,
@@ -78,5 +72,3 @@ expected = [
assert.eq(result, expected);
s.stop();
-
-})();
diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
index bdde00cab25..375f3787a23 100644
--- a/jstests/gle/gle_sharded_wc.js
+++ b/jstests/gle/gle_sharded_wc.js
@@ -1,7 +1,7 @@
+//
// Tests of sharded GLE enforcing write concern against operations in a cluster
// Basic sharded GLE operation is tested elsewhere.
-
-(function() {
+//
// Options for a cluster with two replica set shards, the first with two nodes the second with one
// This lets us try a number of GLE scenarios
@@ -128,5 +128,3 @@ assert.eq(coll.count({ _id : 1 }), 1);
jsTest.log( "DONE!" );
st.stop();
-
-})();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index 01b140f1583..608e7c7674c 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -1,24 +1,21 @@
// Hash sharding with initial chunk count set.
-// @tags: [ hashed ]
-
-(function() {
-
-var s = new ShardingTest({ shards: 3, mongos: 1, verbose: 1 });
-s.stopBalancer();
+// @tags : [ hashed ]
+var s = new ShardingTest({ shards : 3, mongos : 1, verbose : 1 });
var dbname = "test";
var coll = "foo";
var db = s.getDB(dbname);
-
-assert.commandWorked(db.adminCommand({ enablesharding: dbname }));
+db.adminCommand({ enablesharding : dbname });
s.ensurePrimaryShard(dbname, 'shard0001');
-assert.commandWorked(db.adminCommand({ shardcollection: dbname + "." + coll,
- key: { a: "hashed" },
- numInitialChunks: 500 }));
+//for simplicity turn off balancer
+s.stopBalancer();
+var res = db.adminCommand({ shardcollection : dbname + "." + coll,
+ key : { a : "hashed" },
+ numInitialChunks : 500 });
+assert.eq(res.ok, 1, "shardcollection didn't work");
db.printShardingStatus();
-
var numChunks = s.config.chunks.count();
assert.eq(numChunks, 500 , "should be exactly 500 chunks");
@@ -26,16 +23,15 @@ var shards = s.config.shards.find();
shards.forEach(
// check that each shard has one third the numInitialChunks
function (shard){
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ var numChunksOnShard = s.config.chunks.find({"shard" : shard._id}).count();
assert.gte(numChunksOnShard, Math.floor(500/3));
}
);
-// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails to
-// create the collection on all shards).
-res = db.runCommand({ "drop": coll });
+// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting
+// fails to create the collection on all shards).
+res = db.runCommand({ "drop" : coll });
assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
s.stop();
-})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index f3ab4fdaa6d..77f927bdcf5 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -1,36 +1,36 @@
+//
// Tests whether profiling can trigger stale config errors and interfere with write batches
// SERVER-13413
+//
-(function() {
-
-var st = new ShardingTest({ shards: 1, mongos: 2 });
+var st = new ShardingTest({ shards : 1, mongos : 2 });
st.stopBalancer();
-var admin = st.s0.getDB('admin');
-var shards = st.s0.getCollection('config.shards').find().toArray();
-var coll = st.s0.getCollection('foo.bar');
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
-assert(admin.runCommand({ enableSharding: coll.getDB() + '' }).ok);
-assert(admin.runCommand({ shardCollection: coll + '', key: { _id: 1 } }).ok);
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
st.printShardingStatus();
-jsTest.log('Turning on profiling on ' + st.shard0);
+jsTest.log( "Turning on profiling..." );
st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
-
var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
-var inserts = [{ _id: 0 }, { _id: 1 }, { _id: 2 }];
+var inserts = [{ _id : 0 }, { _id : 1 }, { _id : 2 }];
+var staleColl = st.s1.getCollection(coll.toString());
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.writeOK(staleColl.insert(inserts));
printjson(profileColl.find().toArray());
for (var i = 0; i < inserts.length; i++) {
- assert.neq(null, profileColl.findOne({ 'query._id': i }));
+ assert.neq(null, profileColl.findOne({ 'query._id' : i }));
}
+jsTest.log( "DONE!" );
st.stop();
-
-})();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 23cbf463778..da6cbc196df 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -435,6 +435,8 @@ Status Chunk::multiSplit(OperationContext* txn, const vector<BSONObj>& m, BSONOb
uassert(13333, "can't split a chunk in that many parts", m.size() < maxSplitPoints);
uassert(13003, "can't split a chunk with only one distinct value", _min.woCompare(_max));
+ ScopedDbConnection conn(_getShardConnectionString());
+
BSONObjBuilder cmd;
cmd.append("splitChunk", _manager->getns());
cmd.append("keyPattern", _manager->getShardKeyPattern().toBSON());
@@ -451,7 +453,6 @@ Status Chunk::multiSplit(OperationContext* txn, const vector<BSONObj>& m, BSONOb
res = &dummy;
}
- ShardConnection conn(_getShardConnectionString(), "");
if (!conn->runCommand("admin", cmdObj, *res)) {
string msg(str::stream() << "splitChunk failed - cmd: " << cmdObj << " result: " << *res);
warning() << msg;
@@ -480,15 +481,17 @@ bool Chunk::moveAndCommit(OperationContext* txn,
log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") "
<< getShardId() << " -> " << toShardId;
+ const auto from = grid.shardRegistry()->getShard(getShardId());
+
BSONObjBuilder builder;
builder.append("moveChunk", _manager->getns());
- builder.append("from", _getShardConnectionString().toString());
+ builder.append("from", from->getConnString().toString());
{
const auto toShard = grid.shardRegistry()->getShard(toShardId);
builder.append("to", toShard->getConnString().toString());
}
// NEEDED FOR 2.0 COMPATIBILITY
- builder.append("fromShard", getShardId());
+ builder.append("fromShard", from->getId());
builder.append("toShard", toShardId);
///////////////////////////////
builder.append("min", _min);
@@ -512,7 +515,7 @@ bool Chunk::moveAndCommit(OperationContext* txn,
builder.append(LiteParsedQuery::cmdOptionMaxTimeMS, maxTimeMS);
builder.append("epoch", _manager->getVersion().epoch());
- ShardConnection fromconn(_getShardConnectionString(), "");
+ ScopedDbConnection fromconn(from->getConnString());
bool worked = fromconn->runCommand("admin", builder.done(), res);
fromconn.done();
@@ -623,9 +626,9 @@ bool Chunk::splitIfShould(OperationContext* txn, long dataWritten) const {
}
}
-ConnectionString Chunk::_getShardConnectionString() const {
+std::string Chunk::_getShardConnectionString() const {
const auto shard = grid.shardRegistry()->getShard(getShardId());
- return shard->getConnString();
+ return shard->getConnString().toString();
}
long Chunk::getPhysicalSize() const {
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 822ac534380..1ec9a9447cb 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -261,7 +261,7 @@ private:
/**
* Returns the connection string for the shard on which this chunk resides.
*/
- ConnectionString _getShardConnectionString() const;
+ std::string _getShardConnectionString() const;
// if min/max key is pos/neg infinity
bool _minIsInf() const;
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 084b63661f3..2c3802a8f3b 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -38,7 +38,6 @@
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
@@ -180,7 +179,6 @@ public:
grid.shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(), firstChunk->getShardId());
-
BSONObj remoteResult;
// Throws, but handled at level above. Don't want to rewrap to preserve exception
@@ -193,7 +191,7 @@ public:
str::stream() << "Can't find shard for chunk: " << firstChunk->toString()));
}
- ShardConnection conn(shard->getConnString(), "");
+ ScopedDbConnection conn(shard->getConnString());
bool ok = conn->runCommand("admin", remoteCmdObjB.obj(), remoteResult);
conn.done();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 4138c2242a9..269a6d5a5e7 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -573,6 +573,7 @@ public:
return false;
}
+ ScopedDbConnection connTo(toShardCS);
BSONObj res;
bool ok;
@@ -595,9 +596,7 @@ public:
}
try {
- ScopedDbConnection connTo(toShardCS);
ok = connTo->runCommand("admin", recvChunkStartBuilder.done(), res);
- connTo.done();
} catch (DBException& e) {
errmsg = str::stream() << "moveChunk could not contact to: shard " << toShardName
<< " to start transfer" << causedBy(e);
@@ -605,6 +604,8 @@ public:
return false;
}
+ connTo.done();
+
if (!ok) {
errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
verify(res["errmsg"].type());