summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2015-08-21 22:16:04 -0400
committerBenety Goh <benety@mongodb.com>2015-08-21 22:16:04 -0400
commit169a7ca486099a93e8d3ea19cc556dc2c55fcd0e (patch)
treef44b0a210cb157b291a45b93a86098748bfdfd65 /jstests
parent045cd1070cae1e7827255850c2fe35194e48b24e (diff)
downloadmongo-169a7ca486099a93e8d3ea19cc556dc2c55fcd0e.tar.gz
Revert "SERVER-19855 Use ShardConnection for migrations and splits"
This reverts commit fdc2aec174d2facc5edeacd2b7f4c078836f1dfb.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/aggregation/bugs/server6118.js42
-rw-r--r--jstests/aggregation/bugs/server6179.js40
-rw-r--r--jstests/gle/gle_sharded_wc.js6
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js30
-rw-r--r--jstests/sharding/sharded_profile.js30
5 files changed, 64 insertions, 84 deletions
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js
index 7f622c568b2..2f2e7599ef8 100644
--- a/jstests/aggregation/bugs/server6118.js
+++ b/jstests/aggregation/bugs/server6118.js
@@ -1,41 +1,35 @@
// SERVER-6118: support for sharded sorts
-(function() {
-var s = new ShardingTest({ name: "aggregation_sort1", shards: 2, mongos: 1, verbose: 0 });
-s.stopBalancer();
-
-s.adminCommand({ enablesharding:"test" });
+// Set up a sharding test.
+s = new ShardingTest( "aggregation_sort1", 2, 0, 2 );
+s.adminCommand( { enablesharding:"test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
+s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
-var d = s.getDB( "test" );
+// Test does it's own balancing.
+s.stopBalancer();
-// Insert _id values 0 - 99
-var N = 100;
+d = s.getDB( "test" );
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i });
+// Insert _id values 0 - 99.
+N = 100;
+for( i = 0; i < N; ++i ) {
+ d.data.insert( { _id:i } )
}
-bulkOp.execute();
-// Split the data into 3 chunks
+// Split the data into 3 chunks.
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getServer("test")).name });
+// Migrate the middle chunk to another shard.
+s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
+ to:s.getOther( s.getServer( "test" ) ).name } );
// Check that the results are in order.
-var result = d.data.aggregate({ $sort: { _id: 1 } }).toArray();
+result = d.data.aggregate( { $sort: { _id:1 } } ).toArray();
printjson(result);
-
-for(var i = 0; i < N; ++i) {
- assert.eq(i, result[i]._id);
+for( i = 0; i < N; ++i ) {
+ assert.eq( i, result[ i ]._id );
}
s.stop()
-
-})();
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 8af02645a5c..260fa38c983 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -1,37 +1,31 @@
// SERVER-6179: support for two $groups in sharded agg
-(function() {
-var s = new ShardingTest({ name: "aggregation_multiple_group", shards: 2, mongos: 1, verbose: 0 });
-s.stopBalancer();
-
-s.adminCommand({ enablesharding:"test" });
+// Set up a sharding test.
+s = new ShardingTest( "aggregation_multiple_group", 2, 0, 2 );
+s.adminCommand( { enablesharding:"test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
-
-var d = s.getDB( "test" );
+s.adminCommand( { shardcollection:"test.data", key:{ _id:1 } } );
+s.stopBalancer()
-// Insert _id values 0 - 99
-var N = 100;
+d = s.getDB( "test" );
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i, i: i%10 });
+// Populate
+for( i = 0; i < 100; ++i ) {
+ d.data.insert( { _id:i, i:i%10 } )
}
-bulkOp.execute();
-// Split the data into 3 chunks
+// Split the data into 3 chunks.
s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getServer("test")).name });
+// Migrate the middle chunk to another shard.
+s.adminCommand( { movechunk:"test.data", find:{ _id:50 },
+ to:s.getOther( s.getServer( "test" ) ).name } );
// Check that we get results rather than an error
-var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}}).toArray();
+result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}}).toArray();
expected = [
{
"_id" : 0,
@@ -78,5 +72,3 @@ expected = [
assert.eq(result, expected);
s.stop();
-
-})();
diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
index bdde00cab25..375f3787a23 100644
--- a/jstests/gle/gle_sharded_wc.js
+++ b/jstests/gle/gle_sharded_wc.js
@@ -1,7 +1,7 @@
+//
// Tests of sharded GLE enforcing write concern against operations in a cluster
// Basic sharded GLE operation is tested elsewhere.
-
-(function() {
+//
// Options for a cluster with two replica set shards, the first with two nodes the second with one
// This lets us try a number of GLE scenarios
@@ -128,5 +128,3 @@ assert.eq(coll.count({ _id : 1 }), 1);
jsTest.log( "DONE!" );
st.stop();
-
-})();
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index 01b140f1583..608e7c7674c 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -1,24 +1,21 @@
// Hash sharding with initial chunk count set.
-// @tags: [ hashed ]
-
-(function() {
-
-var s = new ShardingTest({ shards: 3, mongos: 1, verbose: 1 });
-s.stopBalancer();
+// @tags : [ hashed ]
+var s = new ShardingTest({ shards : 3, mongos : 1, verbose : 1 });
var dbname = "test";
var coll = "foo";
var db = s.getDB(dbname);
-
-assert.commandWorked(db.adminCommand({ enablesharding: dbname }));
+db.adminCommand({ enablesharding : dbname });
s.ensurePrimaryShard(dbname, 'shard0001');
-assert.commandWorked(db.adminCommand({ shardcollection: dbname + "." + coll,
- key: { a: "hashed" },
- numInitialChunks: 500 }));
+//for simplicity turn off balancer
+s.stopBalancer();
+var res = db.adminCommand({ shardcollection : dbname + "." + coll,
+ key : { a : "hashed" },
+ numInitialChunks : 500 });
+assert.eq(res.ok, 1, "shardcollection didn't work");
db.printShardingStatus();
-
var numChunks = s.config.chunks.count();
assert.eq(numChunks, 500 , "should be exactly 500 chunks");
@@ -26,16 +23,15 @@ var shards = s.config.shards.find();
shards.forEach(
// check that each shard has one third the numInitialChunks
function (shard){
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ var numChunksOnShard = s.config.chunks.find({"shard" : shard._id}).count();
assert.gte(numChunksOnShard, Math.floor(500/3));
}
);
-// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails to
-// create the collection on all shards).
-res = db.runCommand({ "drop": coll });
+// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting
+// fails to create the collection on all shards).
+res = db.runCommand({ "drop" : coll });
assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
s.stop();
-})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index f3ab4fdaa6d..77f927bdcf5 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -1,36 +1,36 @@
+//
// Tests whether profiling can trigger stale config errors and interfere with write batches
// SERVER-13413
+//
-(function() {
-
-var st = new ShardingTest({ shards: 1, mongos: 2 });
+var st = new ShardingTest({ shards : 1, mongos : 2 });
st.stopBalancer();
-var admin = st.s0.getDB('admin');
-var shards = st.s0.getCollection('config.shards').find().toArray();
-var coll = st.s0.getCollection('foo.bar');
+var mongos = st.s0;
+var admin = mongos.getDB( "admin" );
+var shards = mongos.getCollection( "config.shards" ).find().toArray();
+var coll = mongos.getCollection( "foo.bar" );
-assert(admin.runCommand({ enableSharding: coll.getDB() + '' }).ok);
-assert(admin.runCommand({ shardCollection: coll + '', key: { _id: 1 } }).ok);
+assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
+assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
st.printShardingStatus();
-jsTest.log('Turning on profiling on ' + st.shard0);
+jsTest.log( "Turning on profiling..." );
st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
-
var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
-var inserts = [{ _id: 0 }, { _id: 1 }, { _id: 2 }];
+var inserts = [{ _id : 0 }, { _id : 1 }, { _id : 2 }];
+var staleColl = st.s1.getCollection(coll.toString());
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+assert.writeOK(staleColl.insert(inserts));
printjson(profileColl.find().toArray());
for (var i = 0; i < inserts.length; i++) {
- assert.neq(null, profileColl.findOne({ 'query._id': i }));
+ assert.neq(null, profileColl.findOne({ 'query._id' : i }));
}
+jsTest.log( "DONE!" );
st.stop();
-
-})();