summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorCharlie Swanson <charlie.swanson@mongodb.com>2015-09-01 16:19:57 -0400
committerCharlie Swanson <charlie.swanson@mongodb.com>2015-09-09 13:14:48 -0400
commitc15f4bb96d2ee86874582d45d1865e9358168e7e (patch)
tree92c788863ae91a8b9c5801e1d536d7ef1a02ec05 /jstests/sharding
parentf5e063d4785b0460ab41de8cc4b537e5e2151338 (diff)
downloadmongo-c15f4bb96d2ee86874582d45d1865e9358168e7e.tar.gz
SERVER-18272 Update jstests to use allocatePort() instead of hard coding ports
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/addshard2.js47
-rw-r--r--jstests/sharding/addshard3.js6
-rw-r--r--jstests/sharding/addshard4.js4
-rw-r--r--jstests/sharding/auth.js100
-rw-r--r--jstests/sharding/csrs_upgrade.js6
-rw-r--r--jstests/sharding/diffservers1.js10
-rw-r--r--jstests/sharding/names.js4
-rw-r--r--jstests/sharding/remove2.js5
-rw-r--r--jstests/sharding/sync_cluster_config/configdb_str.js4
-rw-r--r--jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js4
-rw-r--r--jstests/sharding/sync_cluster_config/sync7.js2
-rw-r--r--jstests/sharding/version1.js4
-rw-r--r--jstests/sharding/version2.js17
13 files changed, 118 insertions, 95 deletions
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index e6ebac9ab5f..b582ff6ed7a 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -1,35 +1,34 @@
+// Don't start any shards, yet
+var s = new ShardingTest("add_shard2", 1, 0, 1, {useHostname : true});
-// don't start any shards, yet
-s = new ShardingTest( "add_shard2", 1, 0, 1, {useHostname : true} );
-
+// Start two new instances, which will be used for shards
var conn1 = MongoRunner.runMongod({useHostname: true});
var conn2 = MongoRunner.runMongod({useHostname: true});
-var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 , startPort : 31200 } );
+var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 } );
rs1.startSet();
rs1.initiate();
var master1 = rs1.getMaster();
-var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 , startPort : 31203 } );
+var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 } );
rs2.startSet();
rs2.initiate();
var master2 = rs2.getMaster();
// replica set with set name = 'config'
-var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3, startPort: 31206 });
+var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3 });
rs3.startSet();
rs3.initiate();
// replica set with set name = 'admin'
-var rs4 = new ReplSetTest({ 'name': 'admin', nodes: 3, startPort: 31209 });
+var rs4 = new ReplSetTest({ 'name': 'admin', nodes: 3 });
rs4.startSet();
rs4.initiate();
// replica set with configsvr: true should *not* be allowed to be added as a shard
-var rs5 = new ReplSetTest({ name: 'csrs',
- nodes: 3,
- startPort: 31212,
- nodeOptions: {configsvr: "", storageEngine: "wiredTiger" }});
+var rs5 = new ReplSetTest({name: 'csrs',
+ nodes: 3,
+ nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
rs5.startSet();
var conf = rs5.getReplSetConfig();
conf.configsvr = true;
@@ -44,13 +43,16 @@ assert(shard, "shard wasn't found");
assert.eq("bar", shard._id, "shard has incorrect name");
// step 2. replica set
-assert(s.admin.runCommand({"addshard" : "add_shard2_rs1/"+getHostName()+":31200"}).ok, "failed to add shard in step 2");
+assert(s.admin.runCommand(
+ {"addshard" : "add_shard2_rs1/" + getHostName() + ":" + master1.port}).ok,
+ "failed to add shard in step 2");
shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar"]}});
assert(shard, "shard wasn't found");
assert.eq("add_shard2_rs1", shard._id, "t2 name");
// step 3. replica set w/ name given
-assert(s.admin.runCommand({"addshard" : "add_shard2_rs2/"+getHostName()+":31203", "name" : "myshard"}).ok,
+assert(s.admin.runCommand({"addshard" : "add_shard2_rs2/" + getHostName() + ":" + master2.port,
+ "name" : "myshard"}).ok,
"failed to add shard in step 4");
shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1"]}});
assert(shard, "shard wasn't found");
@@ -66,28 +68,37 @@ assert.eq("shard0001", shard._id, "t4 name");
assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
// step 5. replica set w/ a wrong host
-assert(!s.admin.runCommand({"addshard" : "add_shard2_rs2/NonExistingHost:31203"}).ok, "accepted bad hostname in step 5");
+var portWithoutHostRunning = allocatePort();
+assert(!s.admin.runCommand({
+ addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning
+ }).ok,
+ "accepted bad hostname in step 5");
// step 6. replica set w/ mixed wrong/right hosts
-assert(!s.admin.runCommand({"addshard" : "add_shard2_rs2/"+getHostName()+":31203,foo:9999"}).ok,
+assert(!s.admin.runCommand({
+ addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port +
+ ",foo:" + portWithoutHostRunning
+ }).ok,
"accepted bad hostname in step 6");
//
// SERVER-17231 Adding replica set w/ set name = 'config'
//
-var configReplURI = 'config/' + getHostName() + ':31206';
+var configReplURI = 'config/' + getHostName() + ':' + rs3.getMaster().port;
+
assert(!s.admin.runCommand({ 'addshard': configReplURI }).ok,
'accepted replica set shard with set name "config"');
// but we should be allowed to add that replica set using a different shard name
assert(s.admin.runCommand({ 'addshard': configReplURI, name: 'not_config' }).ok,
'unable to add replica set using valid replica set name');
+
shard = s.getDB('config').shards.findOne({ '_id': 'not_config' });
assert(shard, 'shard with name "not_config" not found');
//
// SERVER-17232 Try inserting into shard with name 'admin'
//
-assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':31209' }).ok,
+assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getMaster().port}).ok,
'adding replica set with name "admin" should work');
var wRes = s.getDB('test').foo.insert({ x: 1 });
assert(!wRes.hasWriteError() && wRes.nInserted === 1,
@@ -101,4 +112,4 @@ rs1.stopSet();
rs2.stopSet();
rs3.stopSet();
rs4.stopSet();
-rs5.stopSet(); \ No newline at end of file
+rs5.stopSet();
diff --git a/jstests/sharding/addshard3.js b/jstests/sharding/addshard3.js
index aa5a21efa92..f8d43587fc0 100644
--- a/jstests/sharding/addshard3.js
+++ b/jstests/sharding/addshard3.js
@@ -1,9 +1,11 @@
+(function() {
-s = new ShardingTest( "add_shard3", 1 );
+var st = new ShardingTest("add_shard3", 1);
-var result = s.admin.runCommand({"addshard" : "localhost:31000"});
+var result = st.admin.runCommand({addshard: st.s.host});
printjson(result);
assert.eq(result.ok, 0, "don't add mongos as a shard");
+})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index ce459ac2824..885227a1d7c 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -2,7 +2,7 @@
s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
-r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 31100});
+var r = new ReplSetTest({name: "addshard4", nodes: 3});
r.startSet();
var config = r.getReplSetConfig();
@@ -31,7 +31,7 @@ var result = s.adminCommand({"addshard" : shardName});
printjson(result);
assert.eq(result, true);
-r = new ReplSetTest({name : "addshard42", nodes : 3, startPort : 31200});
+r = new ReplSetTest({name : "addshard42", nodes : 3});
r.startSet();
config = r.getReplSetConfig();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 4e519126ec1..d6d018719e5 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -1,31 +1,36 @@
-adminUser = {
+(function() {
+
+'use strict';
+
+var adminUser = {
db : "admin",
username : "foo",
password : "bar"
};
-testUser = {
+var testUser = {
db : "test",
username : "bar",
password : "baz"
};
-testUserReadOnly = {
+var testUserReadOnly = {
db : "test",
username : "sad",
password : "bat"
};
-function login(userObj , thingToUse ) {
- if ( ! thingToUse )
+function login(userObj, thingToUse) {
+ if (!thingToUse) {
thingToUse = s;
+ }
thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
}
-function logout(userObj, thingToUse ) {
- if ( ! thingToUse )
+function logout(userObj, thingToUse) {
+ if (!thingToUse)
thingToUse = s;
s.getDB(userObj.db).runCommand({logout:1});
@@ -38,17 +43,6 @@ function getShardName(rsTest) {
return config._id+"/"+members.join(",");
}
-function setupTest() {
- var s = new ShardingTest( "auth1", 0 , 0 , 1 ,
- {
- rs: true,
- extraOptions : {"keyFile" : "jstests/libs/key1"},
- noChunkSize : true,
- enableBalancer:true
- } );
- return s;
-}
-
function runTest(s) {
print("adding user");
s.getDB(adminUser.db).createUser({user: adminUser.username,
@@ -60,15 +54,16 @@ function runTest(s) {
{ $set: { value : 1 }}, true ));
printjson(s.getDB("config").settings.find().toArray());
- print("restart mongos");
- MongoRunner.stopMongos(31000);
- var opts = { port : 31000, v : 2, configdb : s._configDB, keyFile : "jstests/libs/key1", chunkSize : 1 };
- var conn = startMongos( opts );
- s.s = s._mongos[0] = s["s0"] = conn;
+ print("restart mongos with different auth options");
+ s.restartMongos(0, { port: s.port,
+ v: 2,
+ configdb: s._configDB,
+ keyFile: "jstests/libs/key1",
+ chunkSize: 1 });
login(adminUser);
- d1 = new ReplSetTest({name : "d1", nodes : 3, startPort : 31100, useHostName : true });
+ var d1 = new ReplSetTest({name : "d1", nodes : 3, useHostName : true });
d1.startSet({keyFile : "jstests/libs/key2", verbose : 0});
d1.initiate();
@@ -99,12 +94,14 @@ function runTest(s) {
assert(thrown);
print("start rs w/correct key");
+
d1.stopSet();
d1.startSet({keyFile : "jstests/libs/key1", verbose : 0});
d1.initiate();
+
var master = d1.getMaster();
- print("adding shard w/auth "+shardName);
+ print("adding shard w/auth " + shardName);
result = s.getDB("admin").runCommand({addShard : shardName});
assert.eq(result.ok, 1, tojson(result));
@@ -125,12 +122,12 @@ function runTest(s) {
print("query try");
var e = assert.throws(function() {
- conn.getDB("foo").bar.findOne();
+ s.s.getDB("foo").bar.findOne();
});
printjson(e);
print("cmd try");
- assert.eq( 0, conn.getDB("foo").runCommand({listDatabases:1}).ok );
+ assert.eq(0, s.s.getDB("foo").runCommand({listDatabases:1}).ok);
print("insert try 1");
s.getDB("test").foo.insert({x:1});
@@ -144,7 +141,7 @@ function runTest(s) {
logout(testUser);
- d2 = new ReplSetTest({name : "d2", nodes : 3, startPort : 31200, useHostName : true });
+ var d2 = new ReplSetTest({name : "d2", nodes : 3, useHostName : true });
d2.startSet({keyFile : "jstests/libs/key1", verbose : 0});
d2.initiate();
d2.awaitSecondaryNodes();
@@ -218,7 +215,6 @@ function runTest(s) {
assert(false, "Number of docs found does not equal the number inserted. Missing docs: " + missingDocNumbers);
}
-
// We're only sure we aren't duplicating documents iff there's no balancing going on here
// This call also waits for any ongoing balancing to stop
s.stopBalancer(60000);
@@ -253,9 +249,10 @@ function runTest(s) {
login(testUser);
print( "testing map reduce" );
- /* sharded map reduce can be tricky since all components talk to each other.
- for example SERVER-4114 is triggered when 1 mongod connects to another for final reduce
- it's not properly tested here since addresses are localhost, which is more permissive */
+
+ // Sharded map reduce can be tricky since all components talk to each other. For example
+ // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+ // properly tested here since addresses are localhost, which is more permissive.
var res = s.getDB("test").runCommand(
{mapreduce : "foo",
map : function() { emit(this.x, 1); },
@@ -265,18 +262,20 @@ function runTest(s) {
printjson(res);
assert.commandWorked(res);
- // check that dump doesn't get stuck with auth
- var x = runMongoProgram( "mongodump", "--host", "127.0.0.1:31000", "-d", testUser.db, "-u",
- testUser.username, "-p", testUser.password, "--authenticationMechanism",
- "SCRAM-SHA-1");
- print("result: "+x);
-
- // test read only users
+ // Check that dump doesn't get stuck with auth
+ var x = runMongoProgram("mongodump",
+ "--host", s.s.host,
+ "-d", testUser.db,
+ "-u", testUser.username,
+ "-p", testUser.password,
+ "--authenticationMechanism", "SCRAM-SHA-1");
+ print("result: " + x);
+ // Test read only users
print( "starting read only tests" );
- readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host )
- readOnlyDB = readOnlyS.getDB( "test" );
+ var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host )
+ var readOnlyDB = readOnlyS.getDB( "test" );
print( " testing find that should fail" );
assert.throws( function(){ readOnlyDB.foo.findOne(); } )
@@ -296,6 +295,7 @@ function runTest(s) {
print("make sure currentOp/killOp fail");
assert.commandFailed(readOnlyDB.currentOp());
assert.commandFailed(readOnlyDB.killOp(123));
+
// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
/*
broken because of SERVER-4156
@@ -307,20 +307,30 @@ function runTest(s) {
out:"blarg"
}));
*/
+
print( " testing logout (should succeed)" );
assert.commandWorked(readOnlyDB.runCommand({logout : 1}));
print("make sure currentOp/killOp fail again");
assert.commandFailed(readOnlyDB.currentOp());
assert.commandFailed(readOnlyDB.killOp(123));
- // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
}
-var s = setupTest();
-if (s.getDB( "admin" ).runCommand( "buildInfo" ).bits < 64) {
- print("Skipping test on 32-bit platforms");
+var s = new ShardingTest("auth1", 0, 0, 1,
+ {
+ rs: true,
+ extraOptions: { "keyFile": "jstests/libs/key1" },
+ noChunkSize: true,
+ enableBalancer: true
+ });
+
+if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
}
else {
runTest(s);
}
+
s.stop();
+
+})();
diff --git a/jstests/sharding/csrs_upgrade.js b/jstests/sharding/csrs_upgrade.js
index b8b6b0d6de2..a94cd2bf4e0 100644
--- a/jstests/sharding/csrs_upgrade.js
+++ b/jstests/sharding/csrs_upgrade.js
@@ -207,9 +207,11 @@ var st;
var sconfig = Object.extend({}, st.s0.fullOptions, /* deep */ true);
delete sconfig.port;
sconfig.configdb = csrsName + "/" + csrs[0].name;
- assertCanSplit(startMongos(sconfig), "when mongos started with --configdb=" + sconfig.configdb);
+ assertCanSplit(MongoRunner.runMongos(sconfig),
+ "when mongos started with --configdb=" + sconfig.configdb);
sconfig.configdb = st.s0.fullOptions.configdb;
- assertCanSplit(startMongos(sconfig), "when mongos started with --configdb=" + sconfig.configdb);
+ assertCanSplit(MongoRunner.runMongos(sconfig),
+ "when mongos started with --configdb=" + sconfig.configdb);
assertCanSplit(st.s0, "on mongos that drove the upgrade");
assertCanSplit(st.s1, "on mongos that was previously unaware of the upgrade");
}());
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 8a7f2bac0a2..0d477147be8 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,6 +1,4 @@
-
-
-s = new ShardingTest( "diffservers1" , 2 );
+var s = new ShardingTest( "diffservers1" , 2 );
assert.eq( 2 , s.config.shards.count() , "server count wrong" );
assert.eq( 2 , s._configServers[0].getDB( "config" ).shards.count() , "where are servers!" );
@@ -14,8 +12,10 @@ test1.save( { a : 3 } );
assert( 3 , test1.count() );
assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
-assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" } ).ok , "host not up" );
-assert( ! s.admin.runCommand( { addshard: "10.0.0.1:43415" } ).ok , "allowed shard in IP when config is localhost" );
+var portWithoutHostRunning = allocatePort();
+assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok, "host not up");
+assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
+ "allowed shard in IP when config is localhost" );
s.stop();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index ff775184116..17e98f82b30 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -2,8 +2,8 @@
var st = new ShardingTest( name = "test", shards = 0, verbose = 2, mongos = 2, other = { rs : true } )
-var rsA = new ReplSetTest({ nodes : 2, name : "rsA", startPort : 28000 })
-var rsB = new ReplSetTest({ nodes : 2, name : "rsB", startPort : 28010 })
+var rsA = new ReplSetTest({ nodes : 2, name : "rsA" })
+var rsB = new ReplSetTest({ nodes : 2, name : "rsB" })
rsA.startSet()
rsB.startSet()
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 48bb95e1e95..74f760050db 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -6,9 +6,6 @@ seedString = function(replTest) {
};
removeShard = function(st, replTest) {
-
-
-
print( "Removing shard with name: " + replTest.name );
res = st.admin.runCommand( { removeshard: replTest.name } )
printjson(res);
@@ -192,7 +189,7 @@ print( "Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time
sleep( 20000 );
-var rst2 = new ReplSetTest({name : rst1.name, nodes : 2, startPort : rst1.startPort + 1500, useHostName : true});
+var rst2 = new ReplSetTest({name : rst1.name, nodes : 2, useHostName : true});
rst2.startSet();
rst2.initiate();
rst2.awaitReplication();
diff --git a/jstests/sharding/sync_cluster_config/configdb_str.js b/jstests/sharding/sync_cluster_config/configdb_str.js
index 6cd592bbda4..e7a5985836e 100644
--- a/jstests/sharding/sync_cluster_config/configdb_str.js
+++ b/jstests/sharding/sync_cluster_config/configdb_str.js
@@ -10,12 +10,12 @@ st.stopBalancer();
var badConfStr = st.c1.name + ',' + st.c0.name + ',' + st.c2.name;
-var otherMongos = MongoRunner.runMongos({ port: 30998, configdb: badConfStr });
+var otherMongos = MongoRunner.runMongos({configdb: badConfStr});
var configDB = otherMongos.getDB('config');
var res = configDB.user.insert({ x: 1 });
assert.writeError(res);
-MongoRunner.stopMongos(30998);
+MongoRunner.stopMongos(otherMongos.port);
st.stop();
diff --git a/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js b/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js
index cd1efbbd8d6..d40cbf25e21 100644
--- a/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/sync_cluster_config/rs_stepdown_and_pooling.js
@@ -37,9 +37,7 @@ else {
conns.push(new Mongo(mongos.host));
conns[i].getCollection(coll + "").findOne();
}
-
- assert.eq(primary.port, 31100);
-
+
jsTest.log("Returning the connections back to the pool.");
for ( var i = 0; i < conns.length; i++ ) {
diff --git a/jstests/sharding/sync_cluster_config/sync7.js b/jstests/sharding/sync_cluster_config/sync7.js
index 65ac5b48d1b..33cf31bc899 100644
--- a/jstests/sharding/sync_cluster_config/sync7.js
+++ b/jstests/sharding/sync_cluster_config/sync7.js
@@ -13,7 +13,7 @@ s._configServers[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew :
// We need to start another mongos after skewing the clock, since the first mongos will have already
// tested the config servers (via the balancer) before we manually skewed them
-otherMongos = startMongos( { port : 30020, v : 2, configdb : s._configDB } );
+var otherMongos = MongoRunner.runMongos({v: 2, configdb: s._configDB});
// Initialize DB data
initDB = function(name) {
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index c7a8eb0d1a5..afe3f709fad 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -32,7 +32,7 @@ assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
version: new Timestamp(2, 0),
authoritative: true,
shard: "shard0000",
- shardHost: "localhost:30000" }),
+ shardHost: s.s.host }),
"should have failed because version is config is 1|0" );
var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
@@ -42,7 +42,7 @@ assert.commandWorked( a.runCommand({ setShardVersion: "alleyinsider.foo",
versionEpoch: epoch,
authoritative: true,
shard: "shard0000",
- shardHost: "localhost:30000" }),
+ shardHost: s.s.host }),
"should have worked" );
assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index f41342dcb8a..5a375f89c21 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -14,13 +14,16 @@ assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s
assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i, 0 );
var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch;
-assert( a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- authoritative: true,
- version: new Timestamp(1, 0),
- versionEpoch: fooEpoch,
- shard: "shard0000",
- shardHost: "localhost:30000" }).ok == 1 );
+assert.commandWorked(
+ a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ authoritative: true,
+ version: new Timestamp(1, 0),
+ versionEpoch: fooEpoch,
+ shard: "shard0000",
+ shardHost: s.s.host,
+ }));
printjson( s.config.chunks.findOne() );