diff options
211 files changed, 915 insertions, 612 deletions
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js index d04e9a590c2..2e8a3445004 100644 --- a/jstests/aggregation/bugs/server6179.js +++ b/jstests/aggregation/bugs/server6179.js @@ -6,7 +6,7 @@ var s = new ShardingTest({shards: 2}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {_id: 1}})); var d = s.getDB("test"); diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js index 4394fc4f3bb..5ed2d58a10e 100644 --- a/jstests/aggregation/bugs/server7781.js +++ b/jstests/aggregation/bugs/server7781.js @@ -153,7 +153,7 @@ var sharded = new ShardingTest({shards: 3, mongos: 1}); assert.commandWorked(sharded.s0.adminCommand({enablesharding: "test"})); - sharded.ensurePrimaryShard('test', 'shard0001'); + sharded.ensurePrimaryShard('test', sharded.shard1.shardName); test(sharded.getDB('test'), true, '2d'); test(sharded.getDB('test'), true, '2dsphere'); diff --git a/jstests/aggregation/mongos_merge.js b/jstests/aggregation/mongos_merge.js index 39d1055f971..56e4cd9f89e 100644 --- a/jstests/aggregation/mongos_merge.js +++ b/jstests/aggregation/mongos_merge.js @@ -37,9 +37,9 @@ assert.commandWorked( mongosDB.adminCommand({setParameter: 1, internalQueryAlwaysMergeOnPrimaryShard: true})); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is shard0. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); - st.ensurePrimaryShard(mongosDB.getName(), "shard0000"); + st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName); // Shard the test collection on _id. assert.commandWorked( @@ -59,11 +59,11 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}})); - // Move the [0, 100) and [100, MaxKey) chunks to shard0001. + // Move the [0, 100) and [100, MaxKey) chunks to shard1. assert.commandWorked(mongosDB.adminCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: "shard0001"})); + {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName})); assert.commandWorked(mongosDB.adminCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: "shard0001"})); + {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName})); // Create a random geo co-ord generator for testing. var georng = new GeoNearRandomTest(mongosColl); diff --git a/jstests/aggregation/shard_targeting.js b/jstests/aggregation/shard_targeting.js index 8b6241d77cf..e6f9d95a84a 100644 --- a/jstests/aggregation/shard_targeting.js +++ b/jstests/aggregation/shard_targeting.js @@ -47,9 +47,9 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); - st.ensurePrimaryShard(mongosDB.getName(), "shard0000"); + st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName); // Shard the test collection on _id. assert.commandWorked( @@ -63,11 +63,11 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 100}})); - // Move the [0, 100) and [100, MaxKey) chunks to shard0001. + // Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: "shard0001"})); + {moveChunk: mongosColl.getFullName(), find: {_id: 50}, to: st.shard1.shardName})); assert.commandWorked(mongosDB.adminCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: "shard0001"})); + {moveChunk: mongosColl.getFullName(), find: {_id: 150}, to: st.shard1.shardName})); // Write one document into each of the chunks. assert.writeOK(mongosColl.insert({_id: -150})); @@ -96,7 +96,7 @@ let testName, outColl; // Test that a range query is passed through if the chunks encompassed by the query all lie - // on a single shard, in this case shard0000. + // on a single shard, in this case st.shard0.shardName. testName = "agg_shard_targeting_range_single_shard_all_chunks_on_same_shard"; assert.eq(mongosColl .aggregate([{$match: {_id: {$gte: -150, $lte: -50}}}].concat(splitPoint), @@ -162,12 +162,14 @@ // Test that a passthrough will back out and split the pipeline if we try to target a single // shard, get a stale config exception, and find that more than one shard is now involved. - // Move the _id: [-100, 0) chunk from shard0000 to shard0001 via mongosForMove. + // Move the _id: [-100, 0) chunk from st.shard0.shardName to st.shard1.shardName via + // mongosForMove. assert.commandWorked(mongosForMove.getDB("admin").runCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: -50}, to: "shard0001"})); + {moveChunk: mongosColl.getFullName(), find: {_id: -50}, to: st.shard1.shardName})); // Run the same aggregation that targeted a single shard via the now-stale mongoS. It should - // attempt to send the aggregation to shard0000, hit a stale config exception, split the + // attempt to send the aggregation to st.shard0.shardName, hit a stale config exception, + // split the // pipeline and redispatch. We append an $_internalSplitPipeline stage in order to force a // shard merge rather than a mongoS merge. testName = "agg_shard_targeting_backout_passthrough_and_split_if_cache_is_stale"; @@ -180,22 +182,25 @@ 2); // Before the first dispatch: - // - mongosForMove and shard0000 (the donor shard) are up to date. - // - mongosForAgg and shard0001 are stale. mongosForAgg incorrectly believes that the - // necessary data is all on shard0000. + // - mongosForMove and st.shard0.shardName (the donor shard) are up to date. + // - mongosForAgg and st.shard1.shardName are stale. mongosForAgg incorrectly believes that + // the + // necessary data is all on st.shard0.shardName. // We therefore expect that: // - mongosForAgg will throw a stale config error when it attempts to establish a - // single-shard cursor on shard0000 (attempt 1). + // single-shard cursor on st.shard0.shardName (attempt 1). // - mongosForAgg will back out, refresh itself, split the pipeline and redispatch to both // shards. - // - shard0001 will throw a stale config and refresh itself when the split pipeline is sent + // - st.shard1.shardName will throw a stale config and refresh itself when the split + // pipeline is sent // to it (attempt 2). // - mongosForAgg will back out, retain the split pipeline and redispatch (attempt 3). // - The aggregation will succeed on the third dispatch. // We confirm this behaviour via the following profiler results: - // - One aggregation on shard0000 with a shard version exception (indicating that the mongoS + // - One aggregation on st.shard0.shardName with a shard version exception (indicating that + // the mongoS // was stale). profilerHasSingleMatchingEntryOrThrow({ profileDB: shard0DB, @@ -207,7 +212,8 @@ } }); - // - One aggregation on shard0001 with a shard version exception (indicating that the shard + // - One aggregation on st.shard1.shardName with a shard version exception (indicating that + // the shard // was stale). profilerHasSingleMatchingEntryOrThrow({ profileDB: shard1DB, @@ -219,8 +225,10 @@ } }); - // - At most two aggregations on shard0000 with no stale config exceptions. The first, if - // present, is an aborted cursor created if the command reaches shard0000 before shard0001 + // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The + // first, if + // present, is an aborted cursor created if the command reaches st.shard0.shardName before + // st.shard1.shardName // throws its stale config exception during attempt 2. The second profiler entry is from the // aggregation which succeeded. profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({ @@ -234,7 +242,7 @@ maxExpectedMatches: 2 }); - // - One aggregation on shard0001 with no stale config exception. + // - One aggregation on st.shard1.shardName with no stale config exception. profilerHasSingleMatchingEntryOrThrow({ profileDB: shard1DB, filter: { @@ -245,7 +253,8 @@ } }); - // - One $mergeCursors aggregation on primary shard0000, since we eventually target both + // - One $mergeCursors aggregation on primary st.shard0.shardName, since we eventually + // target both // shards after backing out the passthrough and splitting the pipeline. profilerHasSingleMatchingEntryOrThrow({ profileDB: primaryShardDB, @@ -259,9 +268,10 @@ // Test that a split pipeline will back out and reassemble the pipeline if we target // multiple shards, get a stale config exception, and find that we can now target a single // shard. - // Move the _id: [-100, 0) chunk back from shard0001 to shard0000 via mongosForMove. + // Move the _id: [-100, 0) chunk back from st.shard1.shardName to st.shard0.shardName via + // mongosForMove. assert.commandWorked(mongosForMove.getDB("admin").runCommand( - {moveChunk: mongosColl.getFullName(), find: {_id: -50}, to: "shard0000"})); + {moveChunk: mongosColl.getFullName(), find: {_id: -50}, to: st.shard0.shardName})); // Run the same aggregation via the now-stale mongoS. It should split the pipeline, hit a // stale config exception, and reset to the original single-shard pipeline upon refresh. We @@ -277,15 +287,17 @@ 2); // Before the first dispatch: - // - mongosForMove and shard0001 (the donor shard) are up to date. - // - mongosForAgg and shard0000 are stale. mongosForAgg incorrectly believes that the + // - mongosForMove and st.shard1.shardName (the donor shard) are up to date. + // - mongosForAgg and st.shard0.shardName are stale. mongosForAgg incorrectly believes that + // the // necessary data is spread across both shards. // We therefore expect that: // - mongosForAgg will throw a stale config error when it attempts to establish a cursor on - // shard0001 (attempt 1). + // st.shard1.shardName (attempt 1). // - mongosForAgg will back out, refresh itself, coalesce the split pipeline into a single - // pipeline and redispatch to shard0000. - // - shard0000 will throw a stale config and refresh itself when the pipeline is sent to it + // pipeline and redispatch to st.shard0.shardName. + // - st.shard0.shardName will throw a stale config and refresh itself when the pipeline is + // sent to it // (attempt 2). // - mongosForAgg will back out, retain the single-shard pipeline and redispatch (attempt // 3). @@ -293,7 +305,8 @@ // We confirm this behaviour via the following profiler results: - // - One aggregation on shard0001 with a shard version exception (indicating that the mongoS + // - One aggregation on st.shard1.shardName with a shard version exception (indicating that + // the mongoS // was stale). profilerHasSingleMatchingEntryOrThrow({ profileDB: shard1DB, @@ -305,7 +318,8 @@ } }); - // - One aggregation on shard0000 with a shard version exception (indicating that the shard + // - One aggregation on st.shard0.shardName with a shard version exception (indicating that + // the shard // was stale). profilerHasSingleMatchingEntryOrThrow({ profileDB: shard0DB, @@ -317,8 +331,10 @@ } }); - // - At most two aggregations on shard0000 with no stale config exceptions. The first, if - // present, is an aborted cursor created if the command reaches shard0000 before shard0001 + // - At most two aggregations on st.shard0.shardName with no stale config exceptions. The + // first, if + // present, is an aborted cursor created if the command reaches st.shard0.shardName before + // st.shard1.shardName // throws its stale config exception during attempt 1. The second profiler entry is the // aggregation which succeeded. profilerHasAtLeastOneAtMostNumMatchingEntriesOrThrow({ @@ -332,8 +348,9 @@ maxExpectedMatches: 2 }); - // No $mergeCursors aggregation on primary shard0000, since after backing out the split - // pipeline we eventually target only shard0000. + // No $mergeCursors aggregation on primary st.shard0.shardName, since after backing out the + // split + // pipeline we eventually target only st.shard0.shardName. profilerHasZeroMatchingEntriesOrThrow({ profileDB: primaryShardDB, filter: { diff --git a/jstests/aggregation/sharded_agg_cleanup_on_error.js b/jstests/aggregation/sharded_agg_cleanup_on_error.js index f8b3aad263d..2403a8e1d01 100644 --- a/jstests/aggregation/sharded_agg_cleanup_on_error.js +++ b/jstests/aggregation/sharded_agg_cleanup_on_error.js @@ -13,7 +13,8 @@ const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch"; const kFailpointOptions = {shouldCheckForInterrupt: true}; - const st = new ShardingTest({shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}}); const kDBName = "test"; const kDivideByZeroErrCode = 16608; const mongosDB = st.s.getDB(kDBName); diff --git a/jstests/aggregation/sources/collStats/shard_host_info.js b/jstests/aggregation/sources/collStats/shard_host_info.js index 2d504d6a9b5..6151ec302fa 100644 --- a/jstests/aggregation/sources/collStats/shard_host_info.js +++ b/jstests/aggregation/sources/collStats/shard_host_info.js @@ -47,8 +47,8 @@ ]) .toArray(), [ - {_id: {shard: "shard0000", host: st.shard0.host}}, - {_id: {shard: "shard0001", host: st.shard1.host}}, + {_id: {shard: st.shard0.shardName, host: st.rs0.getPrimary().host}}, + {_id: {shard: st.shard1.shardName, host: st.rs1.getPrimary().host}}, ]); st.stop(); diff --git a/jstests/aggregation/sources/facet/use_cases.js b/jstests/aggregation/sources/facet/use_cases.js index 465daa4b5ee..1295f638910 100644 --- a/jstests/aggregation/sources/facet/use_cases.js +++ b/jstests/aggregation/sources/facet/use_cases.js @@ -166,9 +166,10 @@ // Make sure there is a chunk on each shard, so that our aggregations are targeted to multiple // shards. assert.commandWorked(st.admin.runCommand({split: testNs, middle: {_id: nDocs / 2}})); - assert.commandWorked(st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: "shard0000"})); assert.commandWorked( - st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: "shard0001"})); + st.admin.runCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard0.shardName})); + assert.commandWorked( + st.admin.runCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName})); doExecutionTest(st.s0); diff --git a/jstests/aggregation/sources/graphLookup/sharded.js b/jstests/aggregation/sources/graphLookup/sharded.js index b2b1b496ac6..b78649d5824 100644 --- a/jstests/aggregation/sources/graphLookup/sharded.js +++ b/jstests/aggregation/sources/graphLookup/sharded.js @@ -9,7 +9,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode. var st = new ShardingTest({name: "aggregation_graph_lookup", shards: 2, mongos: 1}); st.adminCommand({enableSharding: "graphLookup"}); - st.ensurePrimaryShard("graphLookup", "shard0001"); + st.ensurePrimaryShard("graphLookup", st.shard1.shardName); st.adminCommand({shardCollection: "graphLookup.local", key: {_id: 1}}); var foreign = st.getDB("graphLookup").foreign; diff --git a/jstests/aggregation/testshard1.js b/jstests/aggregation/testshard1.js index 66737a12f6c..6ecd68bfc27 100644 --- a/jstests/aggregation/testshard1.js +++ b/jstests/aggregation/testshard1.js @@ -10,7 +10,7 @@ jsTestLog("Setting up sharded cluster"); shardedAggTest.adminCommand({enablesharding: "aggShard"}); db = shardedAggTest.getDB("aggShard"); assert.commandWorked(db.adminCommand({setParameter: 1, logComponentVerbosity: {network: 0}})); -shardedAggTest.ensurePrimaryShard('aggShard', 'shard0000'); +shardedAggTest.ensurePrimaryShard('aggShard', shardedAggTest.shard0.shardName); /* make sure its cleaned up */ db.ts1.drop(); diff --git a/jstests/auth/access_control_with_unreachable_configs.js b/jstests/auth/access_control_with_unreachable_configs.js index 644fd4c63cb..65b0f212242 100644 --- a/jstests/auth/access_control_with_unreachable_configs.js +++ b/jstests/auth/access_control_with_unreachable_configs.js @@ -4,13 +4,20 @@ // there are. var dopts = {smallfiles: "", nopreallocj: ""}; + +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ shards: 1, mongos: 1, config: 1, keyFile: 'jstests/libs/key1', useHostname: false, // Needed when relying on the localhost exception - other: {shardOptions: dopts, configOptions: dopts, mongosOptions: {verbose: 1}} + other: { + shardOptions: dopts, + configOptions: dopts, + mongosOptions: {verbose: 1}, + shardAsReplicaSet: false + } }); var mongos = st.s; var config = st.config0; diff --git a/jstests/auth/auth_mechanism_discovery.js b/jstests/auth/auth_mechanism_discovery.js index e613adff8c3..4c76049ce53 100644 --- a/jstests/auth/auth_mechanism_discovery.js +++ b/jstests/auth/auth_mechanism_discovery.js @@ -36,8 +36,13 @@ MongoRunner.stopMongod(m); // Test sharded. - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); runTest(st.s0); st.stop(); })(); diff --git a/jstests/auth/authentication_restrictions.js b/jstests/auth/authentication_restrictions.js index decd671c532..ad12fd33f35 100644 --- a/jstests/auth/authentication_restrictions.js +++ b/jstests/auth/authentication_restrictions.js @@ -200,6 +200,7 @@ rst.stopSet(); print("Testing sharded cluster"); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ mongos: 2, config: 3, @@ -208,7 +209,8 @@ other: { mongosOptions: {bind_ip_all: "", auth: null}, configOptions: {auth: null}, - shardOptions: {auth: null} + shardOptions: {auth: null}, + shardAsReplicaSet: false } }); testConnection(st.s0, diff --git a/jstests/auth/authentication_restrictions_role.js b/jstests/auth/authentication_restrictions_role.js index 8e28756c398..41e3a133cab 100644 --- a/jstests/auth/authentication_restrictions_role.js +++ b/jstests/auth/authentication_restrictions_role.js @@ -398,6 +398,7 @@ rst.stopSet(); print("Testing sharded cluster"); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ mongos: 2, config: 3, @@ -406,7 +407,8 @@ other: { mongosOptions: {bind_ip_all: "", auth: null}, configOptions: {auth: null}, - shardOptions: {auth: null} + shardOptions: {auth: null}, + shardAsReplicaSet: false } }); testRestrictionCreationAndEnforcement( diff --git a/jstests/auth/authz_modifications_access_control.js b/jstests/auth/authz_modifications_access_control.js index 483a54be4f8..874135a2d3e 100644 --- a/jstests/auth/authz_modifications_access_control.js +++ b/jstests/auth/authz_modifications_access_control.js @@ -299,6 +299,8 @@ runTest(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s); st.stop(); diff --git a/jstests/auth/basic_role_auth.js b/jstests/auth/basic_role_auth.js index fc4e39089ba..3203fec9cac 100644 --- a/jstests/auth/basic_role_auth.js +++ b/jstests/auth/basic_role_auth.js @@ -512,7 +512,9 @@ runTests(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 1, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = + new ShardingTest({shards: 1, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTests(st.s); st.stop(); diff --git a/jstests/auth/clac_system_colls.js b/jstests/auth/clac_system_colls.js index 3ac67e28291..b62a9df3e5a 100644 --- a/jstests/auth/clac_system_colls.js +++ b/jstests/auth/clac_system_colls.js @@ -60,6 +60,8 @@ runTest(conn.getDB("admin")); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s.getDB("admin")); st.stop(); diff --git a/jstests/auth/commands_builtin_roles.js b/jstests/auth/commands_builtin_roles.js index 14da86a505b..e0ac4786d10 100644 --- a/jstests/auth/commands_builtin_roles.js +++ b/jstests/auth/commands_builtin_roles.js @@ -153,7 +153,12 @@ authCommandsLib.runTests(conn, impls); MongoRunner.stopMongod(conn); // run all tests sharded -conn = new ShardingTest( - {shards: 2, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: opts}}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +conn = new ShardingTest({ + shards: 2, + mongos: 1, + keyFile: "jstests/libs/key1", + other: {shardOptions: opts, shardAsReplicaSet: false} +}); authCommandsLib.runTests(conn, impls); conn.stop(); diff --git a/jstests/auth/commands_user_defined_roles.js b/jstests/auth/commands_user_defined_roles.js index 3c8de27192f..739f4eb7a4f 100644 --- a/jstests/auth/commands_user_defined_roles.js +++ b/jstests/auth/commands_user_defined_roles.js @@ -204,7 +204,12 @@ authCommandsLib.runTests(conn, impls); MongoRunner.stopMongod(conn); // run all tests sharded -conn = new ShardingTest( - {shards: 2, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: opts}}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +conn = new ShardingTest({ + shards: 2, + mongos: 1, + keyFile: "jstests/libs/key1", + other: {shardOptions: opts, shardAsReplicaSet: false} +}); authCommandsLib.runTests(conn, impls); conn.stop(); diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js index 569376740fb..ae6c58a8578 100644 --- a/jstests/auth/getMore.js +++ b/jstests/auth/getMore.js @@ -260,8 +260,13 @@ MongoRunner.stopMongod(conn); // Run the test on a sharded cluster. - let cluster = new ShardingTest( - {shards: 1, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: {auth: ""}}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + let cluster = new ShardingTest({ + shards: 1, + mongos: 1, + keyFile: "jstests/libs/key1", + other: {shardOptions: {auth: ""}, shardAsReplicaSet: false} + }); runTest(cluster); cluster.stop(); }()); diff --git a/jstests/auth/kill_cursors.js b/jstests/auth/kill_cursors.js index 146f3253282..d8a9173c321 100644 --- a/jstests/auth/kill_cursors.js +++ b/jstests/auth/kill_cursors.js @@ -147,8 +147,13 @@ runTest(mongod); MongoRunner.stopMongod(mongod); - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); runTest(st.s0); st.stop(); })(); diff --git a/jstests/auth/list_all_local_sessions.js b/jstests/auth/list_all_local_sessions.js index 05914b2c2e6..a5c46d9c257 100644 --- a/jstests/auth/list_all_local_sessions.js +++ b/jstests/auth/list_all_local_sessions.js @@ -44,8 +44,13 @@ runListAllLocalSessionsTest(mongod); MongoRunner.stopMongod(mongod); - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); runListAllLocalSessionsTest(st.s0); st.stop(); })(); diff --git a/jstests/auth/list_all_sessions.js b/jstests/auth/list_all_sessions.js index 763178e9ac1..f3552006b58 100644 --- a/jstests/auth/list_all_sessions.js +++ b/jstests/auth/list_all_sessions.js @@ -51,8 +51,13 @@ runListAllSessionsTest(mongod); MongoRunner.stopMongod(mongod); - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); // Ensure that the sessions collection exists. st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}); diff --git a/jstests/auth/list_local_sessions.js b/jstests/auth/list_local_sessions.js index 40cc66d991e..4693d694666 100644 --- a/jstests/auth/list_local_sessions.js +++ b/jstests/auth/list_local_sessions.js @@ -60,8 +60,13 @@ runListLocalSessionsTest(mongod); MongoRunner.stopMongod(mongod); - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); runListLocalSessionsTest(st.s0); st.stop(); })(); diff --git a/jstests/auth/list_sessions.js b/jstests/auth/list_sessions.js index b130999a3ef..aa3eb0e627c 100644 --- a/jstests/auth/list_sessions.js +++ b/jstests/auth/list_sessions.js @@ -69,8 +69,13 @@ runListSessionsTest(mongod); MongoRunner.stopMongod(mongod); - const st = - new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); runListSessionsTest(st.s0); st.stop(); })(); diff --git a/jstests/auth/mongos_cache_invalidation.js b/jstests/auth/mongos_cache_invalidation.js index 40b4caeb6b3..14571b7e3fc 100644 --- a/jstests/auth/mongos_cache_invalidation.js +++ b/jstests/auth/mongos_cache_invalidation.js @@ -9,6 +9,7 @@ var hasAuthzError = function(result) { assert.eq(authzErrorCode, result.code); }; +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ shards: 2, config: 3, @@ -17,7 +18,8 @@ var st = new ShardingTest({ {setParameter: "userCacheInvalidationIntervalSecs=5"}, {setParameter: "userCacheInvalidationIntervalSecs=600"} ], - keyFile: 'jstests/libs/key1' + keyFile: 'jstests/libs/key1', + other: {shardAsReplicaSet: false} }); st.s1.getDB('admin').createUser({user: 'root', pwd: 'pwd', roles: ['root']}); diff --git a/jstests/auth/pseudo_commands.js b/jstests/auth/pseudo_commands.js index 921c1c3f548..42f1996aa54 100644 --- a/jstests/auth/pseudo_commands.js +++ b/jstests/auth/pseudo_commands.js @@ -190,6 +190,8 @@ runTest(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s); st.stop(); diff --git a/jstests/auth/resource_pattern_matching.js b/jstests/auth/resource_pattern_matching.js index 8173bb0e1a6..0fd30650982 100644 --- a/jstests/auth/resource_pattern_matching.js +++ b/jstests/auth/resource_pattern_matching.js @@ -242,6 +242,7 @@ rst.stopSet(); print('--- done with the rs tests ---'); print('--- sharding test ---'); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ mongos: 2, shard: 1, @@ -249,7 +250,8 @@ var st = new ShardingTest({ other: { mongosOptions: {'auth': null}, configOptions: {'auth': null}, - shardOptions: {'auth': null} + shardOptions: {'auth': null}, + shardAsReplicaSet: false } }); run_tests(st.s0.getDB('admin'), st.s1.getDB('admin')); diff --git a/jstests/auth/role_management_commands_edge_cases.js b/jstests/auth/role_management_commands_edge_cases.js index 8adb7571692..390c90cc505 100644 --- a/jstests/auth/role_management_commands_edge_cases.js +++ b/jstests/auth/role_management_commands_edge_cases.js @@ -379,6 +379,8 @@ runTest(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s); st.stop(); diff --git a/jstests/auth/role_management_commands_sharded_wc_1.js b/jstests/auth/role_management_commands_sharded_wc_1.js index 78ce948802b..417bad0fadf 100644 --- a/jstests/auth/role_management_commands_sharded_wc_1.js +++ b/jstests/auth/role_management_commands_sharded_wc_1.js @@ -3,8 +3,14 @@ load('jstests/auth/role_management_commands_lib.js'); - var st = - new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1', useHostname: false}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest({ + shards: 2, + config: 3, + keyFile: 'jstests/libs/key1', + useHostname: false, + other: {shardAsReplicaSet: false} + }); runAllRoleManagementCommandsTests(st.s, {w: 1}); st.stop(); })(); diff --git a/jstests/auth/role_management_commands_sharded_wc_majority.js b/jstests/auth/role_management_commands_sharded_wc_majority.js index 19aa8e2c37e..ba44cf49dc3 100644 --- a/jstests/auth/role_management_commands_sharded_wc_majority.js +++ b/jstests/auth/role_management_commands_sharded_wc_majority.js @@ -3,8 +3,14 @@ load('jstests/auth/role_management_commands_lib.js'); - var st = - new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1', useHostname: false}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest({ + shards: 2, + config: 3, + keyFile: 'jstests/libs/key1', + useHostname: false, + other: {shardAsReplicaSet: false} + }); runAllRoleManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000}); st.stop(); })(); diff --git a/jstests/auth/user_defined_roles.js b/jstests/auth/user_defined_roles.js index 017770cbad6..ffdca820d98 100644 --- a/jstests/auth/user_defined_roles.js +++ b/jstests/auth/user_defined_roles.js @@ -145,6 +145,8 @@ runTest(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s); st.stop(); diff --git a/jstests/auth/user_management_commands_edge_cases.js b/jstests/auth/user_management_commands_edge_cases.js index 704043e24b3..5c0104226b7 100644 --- a/jstests/auth/user_management_commands_edge_cases.js +++ b/jstests/auth/user_management_commands_edge_cases.js @@ -282,6 +282,8 @@ runTest(conn); MongoRunner.stopMongod(conn); jsTest.log('Test sharding'); -var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. +var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runTest(st.s); st.stop(); diff --git a/jstests/auth/user_management_commands_sharded_wc_1.js b/jstests/auth/user_management_commands_sharded_wc_1.js index ff5bc0cfc43..6f1922d67f6 100644 --- a/jstests/auth/user_management_commands_sharded_wc_1.js +++ b/jstests/auth/user_management_commands_sharded_wc_1.js @@ -3,7 +3,9 @@ load('jstests/auth/user_management_commands_lib.js'); - var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runAllUserManagementCommandsTests(st.s, {w: 1}); st.stop(); })(); diff --git a/jstests/auth/user_management_commands_sharded_wc_majority.js b/jstests/auth/user_management_commands_sharded_wc_majority.js index a18bc70e96a..b7a9e04b22a 100644 --- a/jstests/auth/user_management_commands_sharded_wc_majority.js +++ b/jstests/auth/user_management_commands_sharded_wc_majority.js @@ -3,7 +3,9 @@ load('jstests/auth/user_management_commands_lib.js'); - var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 2, config: 3, keyFile: 'jstests/libs/key1', other: {shardAsReplicaSet: false}}); runAllUserManagementCommandsTests(st.s, {w: 'majority', wtimeout: 60 * 1000}); st.stop(); })(); diff --git a/jstests/auth/views_authz.js b/jstests/auth/views_authz.js index 4134a983ce9..68c28ec03ea 100644 --- a/jstests/auth/views_authz.js +++ b/jstests/auth/views_authz.js @@ -142,8 +142,13 @@ MongoRunner.stopMongod(mongod); // Run the test on a sharded cluster. - let cluster = new ShardingTest( - {shards: 1, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: {auth: ""}}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + let cluster = new ShardingTest({ + shards: 1, + mongos: 1, + keyFile: "jstests/libs/key1", + other: {shardOptions: {auth: ""}, shardAsReplicaSet: false} + }); runTest(cluster); cluster.stop(); }()); diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js index f8789869164..159982fc575 100644 --- a/jstests/gle/gle_sharded_write.js +++ b/jstests/gle/gle_sharded_write.js @@ -155,7 +155,8 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; coll.insert([{_id: 1}, {_id: -1}]); // Wait for write to be written to shards before shutting it down. printjson(gle = coll.getDB().runCommand({getLastError: 1})); - MongoRunner.stopMongod(st.shard0); + st.rs0.stopSet(); + printjson(gle = coll.getDB().runCommand({getLastError: 1})); // Should get an error about contacting dead host. assert(!gle.ok); diff --git a/jstests/libs/chunk_manipulation_util.js b/jstests/libs/chunk_manipulation_util.js index ba69691ac2f..399874271ce 100644 --- a/jstests/libs/chunk_manipulation_util.js +++ b/jstests/libs/chunk_manipulation_util.js @@ -14,7 +14,7 @@ load('./jstests/libs/test_background_ops.js'); // shard key values of a chunk to move. Specify either the // bounds field or the find field but not both. // ns: Like 'dbName.collectionName'. -// toShardId: Like 'shard0001'. +// toShardId: Like st.shard1.shardName. // // Returns a join function; call it to wait for moveChunk to complete. // diff --git a/jstests/multiVersion/2_test_launching_cluster.js b/jstests/multiVersion/2_test_launching_cluster.js index 14ec9149172..f26d3e78ac0 100644 --- a/jstests/multiVersion/2_test_launching_cluster.js +++ b/jstests/multiVersion/2_test_launching_cluster.js @@ -29,7 +29,8 @@ load('./jstests/multiVersion/libs/verify_versions.js'); mongosOptions: {binVersion: versionsToCheckMongos}, configOptions: {binVersion: versionsToCheckConfig}, shardOptions: {binVersion: versionsToCheck}, - enableBalancer: true + enableBalancer: true, + shardAsReplicaSet: false } }); diff --git a/jstests/multiVersion/balancer_multiVersion_detect.js b/jstests/multiVersion/balancer_multiVersion_detect.js index 062eb6abed7..b6d8bd4bed6 100644 --- a/jstests/multiVersion/balancer_multiVersion_detect.js +++ b/jstests/multiVersion/balancer_multiVersion_detect.js @@ -8,7 +8,8 @@ var options = { mongosOptions: {verbose: 1, useLogFiles: true}, configOptions: {}, shardOptions: {binVersion: ["latest", "last-stable"]}, - enableBalancer: true + enableBalancer: true, + other: {shardAsReplicaSet: false} }; var st = new ShardingTest({shards: 3, mongos: 1, other: options}); @@ -18,7 +19,7 @@ var admin = mongos.getDB("admin"); var coll = mongos.getCollection("foo.bar"); printjson(admin.runCommand({enableSharding: coll.getDB() + ""})); -st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); +st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); assert.soon(function() { diff --git a/jstests/multiVersion/migration_between_mixed_FCV_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_FCV_mixed_version_mongods.js index f2e5ef33ffe..fe78152e548 100644 --- a/jstests/multiVersion/migration_between_mixed_FCV_mixed_version_mongods.js +++ b/jstests/multiVersion/migration_between_mixed_FCV_mixed_version_mongods.js @@ -10,7 +10,8 @@ let st = new ShardingTest({ shards: [{binVersion: "latest"}, {binVersion: "last-stable"}], - mongos: {binVersion: "latest"} + mongos: {binVersion: "latest"}, + other: {shardAsReplicaSet: false}, }); let testDB = st.s.getDB("test"); diff --git a/jstests/multiVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_version_mongods.js index 419310e9478..a2539d87ddd 100644 --- a/jstests/multiVersion/migration_between_mixed_version_mongods.js +++ b/jstests/multiVersion/migration_between_mixed_version_mongods.js @@ -20,7 +20,7 @@ load("./jstests/multiVersion/libs/verify_versions.js"); {binVersion: "latest"} ], mongos: 1, - other: {mongosOptions: {binVersion: "last-stable"}} + other: {mongosOptions: {binVersion: "last-stable"}, shardAsReplicaSet: false} }; var st = new ShardingTest(options); diff --git a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js index 3c8c3806d9c..9bd702323f4 100644 --- a/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js +++ b/jstests/multiVersion/new_mongos_old_mongod_wire_version_clash.js @@ -26,6 +26,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; other: { mongosOptions: {binVersion: 'last-stable'}, shardOptions: {binVersion: 'last-stable'}, + shardAsReplicaSet: false } }); diff --git a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js index 64d7c13726b..804251c63a2 100644 --- a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js +++ b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js @@ -30,11 +30,12 @@ } // Start the sharding test with auth on. + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. const st = new ShardingTest({ mongos: 1, manualAddShard: true, mongosWaitsForKeys: true, - other: {keyFile: "jstests/libs/key1"} + other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false} }); // Create admin user and authenticate as them. diff --git a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js index 04eadff0242..008a208734a 100644 --- a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js +++ b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js @@ -2,7 +2,9 @@ // getMore will leave the cursor unaffected, so that a subsequent getMore by the original author // will work. (function() { - const st = new ShardingTest({shards: 2, config: 1, other: {keyFile: "jstests/libs/key1"}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const st = new ShardingTest( + {shards: 2, config: 1, other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}}); const kDBName = "test"; const adminDB = st.s.getDB('admin'); const testDB = st.s.getDB(kDBName); diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js index d88fdc5f7f1..efd4f6cf815 100644 --- a/jstests/noPassthrough/logical_session_cursor_checks.js +++ b/jstests/noPassthrough/logical_session_cursor_checks.js @@ -74,8 +74,13 @@ }; function Sharding() { - this.st = new ShardingTest( - {shards: 1, config: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + this.st = new ShardingTest({ + shards: 1, + config: 1, + mongos: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); } Sharding.prototype.stop = function() { diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js index ab739ba064e..9ae6cfb44ef 100644 --- a/jstests/noPassthrough/unsupported_change_stream_deployments.js +++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js @@ -28,8 +28,13 @@ assert.eq(0, MongoRunner.stopMongod(conn)); // Test a sharded cluster with standalone shards. - const clusterWithStandalones = new ShardingTest( - {shards: 2, other: {shardOptions: {enableMajorityReadConcern: ""}}, config: 1}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + const clusterWithStandalones = new ShardingTest({ + shards: 2, + other: {shardOptions: {enableMajorityReadConcern: ""}}, + config: 1, + shardAsReplicaSet: false + }); // Make sure the database exists before running any commands. const mongosDB = clusterWithStandalones.getDB("test"); // enableSharding will create the db at the cluster level but not on the shards. $changeStream diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js index 970eae90591..46838744251 100644 --- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js +++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js @@ -3,7 +3,8 @@ // @tags: [requires_sharding] // -var st = new ShardingTest({shards: 2, mongos: 1}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({shards: 2, mongos: 1, other: {shardAsReplicaSet: false}}); var mongos = st.s0; var coll = mongos.getCollection("foo.bar"); diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js index ccbca19d098..197832f6933 100644 --- a/jstests/noPassthroughWithMongod/ttl_sharded.js +++ b/jstests/noPassthroughWithMongod/ttl_sharded.js @@ -17,7 +17,7 @@ t = s.getDB(dbname).getCollection(coll); // enable sharding of the collection. Only 1 chunk initially s.adminCommand({enablesharding: dbname}); -s.ensurePrimaryShard(dbname, 'shard0001'); +s.ensurePrimaryShard(dbname, s.shard1.shardName); s.adminCommand({shardcollection: ns, key: {_id: 1}}); // insert 24 docs, with timestamps at one hour intervals diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js index 434a2b694a9..ab9db5e14cf 100644 --- a/jstests/readonly/lib/read_only_test.js +++ b/jstests/readonly/lib/read_only_test.js @@ -52,7 +52,13 @@ var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN; }; ShardedFixture.prototype.runLoadPhase = function runLoadPhase(test) { - this.shardingTest = new ShardingTest({nopreallocj: true, mongos: 1, shards: this.nShards}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + this.shardingTest = new ShardingTest({ + nopreallocj: true, + mongos: 1, + shards: this.nShards, + other: {shardAsReplicaSet: false} + }); this.paths = this.shardingTest.getDBPaths(); diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js index a98161f101e..ed30be53bc6 100644 --- a/jstests/sharding/SERVER-7379.js +++ b/jstests/sharding/SERVER-7379.js @@ -1,7 +1,7 @@ var st = new ShardingTest({shards: 2}); st.adminCommand({enablesharding: "test"}); -st.ensurePrimaryShard('test', 'shard0001'); +st.ensurePrimaryShard('test', st.shard1.shardName); st.adminCommand( {shardcollection: "test.offerChange", key: {"categoryId": 1, "store": 1, "_id": 1}}); diff --git a/jstests/sharding/advance_cluster_time_action_type.js b/jstests/sharding/advance_cluster_time_action_type.js index d01357ec338..fac7f803774 100644 --- a/jstests/sharding/advance_cluster_time_action_type.js +++ b/jstests/sharding/advance_cluster_time_action_type.js @@ -5,8 +5,16 @@ (function() { "use strict"; - let st = new ShardingTest( - {mongos: 1, config: 1, shards: 1, keyFile: 'jstests/libs/key1', mongosWaitsForKeys: true}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + let st = new ShardingTest({ + mongos: 1, + config: 1, + shards: 1, + keyFile: 'jstests/libs/key1', + mongosWaitsForKeys: true, + other: {shardAsReplicaSet: false} + }); + let adminDB = st.s.getDB('admin'); assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]})); diff --git a/jstests/sharding/advance_logical_time_with_valid_signature.js b/jstests/sharding/advance_logical_time_with_valid_signature.js index ef2f5e44e81..59e8449fac1 100644 --- a/jstests/sharding/advance_logical_time_with_valid_signature.js +++ b/jstests/sharding/advance_logical_time_with_valid_signature.js @@ -13,8 +13,11 @@ st._configServers.forEach(function(configSvr) { configSvr.disconnect(st.s1); }); - st._connections.forEach(function(conn) { - conn.disconnect(st.s1); + + st._rsObjects.forEach(function(rsNodes) { + rsNodes.nodes.forEach(function(conn) { + conn.disconnect(st.s1); + }); }); let connectedDB = st.s0.getDB("test"); diff --git a/jstests/sharding/agg_sort.js b/jstests/sharding/agg_sort.js index 2f2503e63e1..2aebb8e0ded 100644 --- a/jstests/sharding/agg_sort.js +++ b/jstests/sharding/agg_sort.js @@ -10,7 +10,7 @@ coll.drop(); assert.commandWorked(shardingTest.s0.adminCommand({enableSharding: db.getName()})); - shardingTest.ensurePrimaryShard(db.getName(), 'shard0001'); + shardingTest.ensurePrimaryShard(db.getName(), shardingTest.shard1.shardName); assert.commandWorked( shardingTest.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js index 58e5aab6e06..e7bc96ea151 100644 --- a/jstests/sharding/allow_partial_results.js +++ b/jstests/sharding/allow_partial_results.js @@ -12,7 +12,8 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; const collName = "foo"; const ns = dbName + "." + collName; - const st = new ShardingTest({shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}}); jsTest.log("Insert some data."); const nDocs = 100; diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js index 41e18aa9c80..f3ac5caf1c7 100644 --- a/jstests/sharding/auth2.js +++ b/jstests/sharding/auth2.js @@ -1,9 +1,15 @@ (function() { 'use strict'; + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ shards: 2, - other: {chunkSize: 1, useHostname: true, keyFile: 'jstests/libs/key1'}, + other: { + chunkSize: 1, + useHostname: true, + keyFile: 'jstests/libs/key1', + shardAsReplicaSet: false + }, }); var mongos = st.s; diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js index 24b512b6f18..6655d4d5248 100644 --- a/jstests/sharding/authConnectionHook.js +++ b/jstests/sharding/authConnectionHook.js @@ -16,8 +16,16 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; (function() { 'use strict'; - var st = new ShardingTest( - {shards: 2, other: {keyFile: 'jstests/libs/key1', useHostname: true, chunkSize: 1}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest({ + shards: 2, + other: { + keyFile: 'jstests/libs/key1', + useHostname: true, + chunkSize: 1, + shardAsReplicaSet: false + } + }); var mongos = st.s; var adminDB = mongos.getDB('admin'); diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js index c08b69b9afa..f898f5caecc 100644 --- a/jstests/sharding/auth_add_shard.js +++ b/jstests/sharding/auth_add_shard.js @@ -14,7 +14,9 @@ var adminUser = {db: "admin", username: "foo", password: "bar"}; // set up a 2 shard cluster with keyfile - var st = new ShardingTest({shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}}); var mongos = st.s0; var admin = mongos.getDB("admin"); diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js index 35f5295d0d3..1c732546b2b 100644 --- a/jstests/sharding/auth_copydb.js +++ b/jstests/sharding/auth_copydb.js @@ -3,7 +3,9 @@ 'use strict'; load('jstests/libs/feature_compatibility_version.js'); - var st = new ShardingTest({shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}}); var mongos = st.s0; var destAdminDB = mongos.getDB('admin'); var destTestDB = mongos.getDB('test'); diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js index e06527c738b..cb71ca0ef74 100644 --- a/jstests/sharding/auth_no_config_primary.js +++ b/jstests/sharding/auth_no_config_primary.js @@ -14,7 +14,9 @@ TestData.skipCheckDBHashes = true; (function() { 'use strict'; - var st = new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}}); st.s.getDB('admin').createUser({user: 'root', pwd: 'pass', roles: ['root']}); st.s.getDB('admin').auth('root', 'pass'); diff --git a/jstests/sharding/auth_sharding_cmd_metadata.js b/jstests/sharding/auth_sharding_cmd_metadata.js index 73dea60d1a7..352c31d199c 100644 --- a/jstests/sharding/auth_sharding_cmd_metadata.js +++ b/jstests/sharding/auth_sharding_cmd_metadata.js @@ -5,7 +5,9 @@ "use strict"; - var st = new ShardingTest({shards: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 1, other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}}); var adminUser = {db: "admin", username: "foo", password: "bar"}; diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js index 51cfd97f62f..c2cb71baf38 100644 --- a/jstests/sharding/auth_slaveok_routing.js +++ b/jstests/sharding/auth_slaveok_routing.js @@ -39,7 +39,9 @@ } var rsOpts = {oplogSize: 50}; - var st = new ShardingTest({shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest( + {shards: 1, rs: rsOpts, other: {keyFile: 'jstests/libs/key1', ShardAsReplicaSet: false}}); var mongos = st.s; var replTest = st.rs0; diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js index 82f0b40b2fc..0f444f6208d 100644 --- a/jstests/sharding/authmr.js +++ b/jstests/sharding/authmr.js @@ -28,8 +28,13 @@ assert.writeOK(collection.insert(obj)); } - var cluster = new ShardingTest( - {name: "authmr", shards: 1, mongos: 1, other: {keyFile: "jstests/libs/key1"}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var cluster = new ShardingTest({ + name: "authmr", + shards: 1, + mongos: 1, + other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false} + }); // Set up the test data. (function() { diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js index 71a62c896c0..95e0b0d7b45 100644 --- a/jstests/sharding/authwhere.js +++ b/jstests/sharding/authwhere.js @@ -28,8 +28,13 @@ assert.writeOK(collection.insert(obj)); } - var cluster = new ShardingTest( - {name: "authwhere", shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var cluster = new ShardingTest({ + name: "authwhere", + shards: 1, + mongos: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); // Set up the test data. (function() { diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js index c7078a6898a..c27fef1fa31 100644 --- a/jstests/sharding/auto_rebalance_parallel.js +++ b/jstests/sharding/auto_rebalance_parallel.js @@ -26,7 +26,7 @@ assert.commandWorked(st.splitAt(collName, {Key: 20})); assert.commandWorked(st.splitAt(collName, {Key: 30})); - // Move two of the chunks to shard0001 so we have option to do parallel balancing + // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName)); diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js index 15215f3e38f..965219c56ff 100644 --- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js +++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js @@ -26,7 +26,7 @@ assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20})); assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30})); - // Move two of the chunks to shard0001 so we have option to do parallel balancing + // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName)); diff --git a/jstests/sharding/autosplit.js b/jstests/sharding/autosplit.js index 52cec4ac859..42128b4ede0 100644 --- a/jstests/sharding/autosplit.js +++ b/jstests/sharding/autosplit.js @@ -12,7 +12,7 @@ }); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}})); var bigString = ""; diff --git a/jstests/sharding/autosplit_with_balancer.js b/jstests/sharding/autosplit_with_balancer.js index 7130925edf2..c6bce40aa9a 100644 --- a/jstests/sharding/autosplit_with_balancer.js +++ b/jstests/sharding/autosplit_with_balancer.js @@ -4,7 +4,7 @@ var s = new ShardingTest({shards: 2, mongos: 2, other: {enableAutoSplit: true}}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); var bigString = ""; diff --git a/jstests/sharding/balancer_window.js b/jstests/sharding/balancer_window.js index b9082134483..a60654b7d89 100644 --- a/jstests/sharding/balancer_window.js +++ b/jstests/sharding/balancer_window.js @@ -52,7 +52,7 @@ configDB.adminCommand({split: 'test.user', middle: {_id: x}}); } - var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count(); + var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count(); var startDate = new Date(); var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes()); @@ -70,7 +70,8 @@ st.awaitBalancerRound(); - var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count(); + var shard0ChunksAfter = + configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count(); assert.eq(shard0Chunks, shard0ChunksAfter); assert.writeOK(configDB.settings.update( @@ -85,7 +86,7 @@ st.awaitBalancerRound(); - shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count(); + shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: st.shard0.shardName}).count(); assert.neq(shard0Chunks, shard0ChunksAfter); st.stop(); diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js index 1f2c1609969..4ac0105ed57 100644 --- a/jstests/sharding/basic_drop_coll.js +++ b/jstests/sharding/basic_drop_coll.js @@ -20,17 +20,17 @@ // Test dropping a sharded collection. assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}}); st.s.adminCommand({split: 'test.user', middle: {_id: 0}}); assert.commandWorked( - st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: 'shard0001'})); + st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: st.shard1.shardName})); assert.writeOK(testDB.user.insert({_id: 10})); assert.writeOK(testDB.user.insert({_id: -10})); - assert.neq(null, st.d0.getDB('test').user.findOne({_id: -10})); - assert.neq(null, st.d1.getDB('test').user.findOne({_id: 10})); + assert.neq(null, st.shard0.getDB('test').user.findOne({_id: -10})); + assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10})); var configDB = st.s.getDB('config'); var collDoc = configDB.collections.findOne({_id: 'test.user'}); @@ -41,8 +41,8 @@ assert.commandWorked(testDB.runCommand({drop: 'user'})); - assert.eq(null, st.d0.getDB('test').user.findOne()); - assert.eq(null, st.d1.getDB('test').user.findOne()); + assert.eq(null, st.shard0.getDB('test').user.findOne()); + assert.eq(null, st.shard1.getDB('test').user.findOne()); // Call drop again to verify that the command is idempotent. assert.commandWorked(testDB.runCommand({drop: 'user'})); diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js index 1c1fb11cb11..3e71167cdd6 100644 --- a/jstests/sharding/basic_sharding_params.js +++ b/jstests/sharding/basic_sharding_params.js @@ -6,11 +6,12 @@ 'use strict'; function shardingTestUsingObjects() { + // TODO: SERVER-33444 remove shardAsReplicaSet: false var st = new ShardingTest({ - mongos: {s0: {verbose: 6}, s1: {verbose: 5}}, config: {c0: {verbose: 4}}, - shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}} + shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}, + other: {shardAsReplicaSet: false} }); var s0 = st.s0; @@ -42,10 +43,12 @@ } function shardingTestUsingArrays() { + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ mongos: [{verbose: 5}, {verbose: 4}], config: [{verbose: 3}], - shards: [{verbose: 2}, {verbose: 1}] + shards: [{verbose: 2}, {verbose: 1}], + other: {shardAsReplicaSet: false} }); var s0 = st.s0; diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js index 0fb2a570f2b..cb86e2d34b0 100644 --- a/jstests/sharding/basic_split.js +++ b/jstests/sharding/basic_split.js @@ -41,7 +41,7 @@ // Insert documents large enough to fill up a chunk, but do it directly in the shard in order // to bypass the auto-split logic. var kiloDoc = new Array(1024).join('x'); - var testDB = st.d0.getDB('test'); + var testDB = st.rs0.getPrimary().getDB('test'); var bulk = testDB.user.initializeUnorderedBulkOp(); for (var x = -1200; x < 1200; x++) { bulk.insert({_id: x, val: kiloDoc}); diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js index a0d4b6356f1..cb3b4cd21d6 100644 --- a/jstests/sharding/batch_write_command_sharded.js +++ b/jstests/sharding/batch_write_command_sharded.js @@ -31,7 +31,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; var coll = mongos.getCollection("foo.bar"); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()})); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}})); // diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js index ec2451adcba..4df4a735228 100644 --- a/jstests/sharding/bouncing_count.js +++ b/jstests/sharding/bouncing_count.js @@ -8,7 +8,8 @@ (function() { 'use strict'; - var st = new ShardingTest({shards: 10, mongos: 3}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({shards: 10, mongos: 3, other: {shardAsReplicaSet: false}}); var mongosA = st.s0; var mongosB = st.s1; diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js index c0cffeeb05f..02cb8e47226 100644 --- a/jstests/sharding/bulk_insert.js +++ b/jstests/sharding/bulk_insert.js @@ -2,7 +2,8 @@ (function() { 'use strict'; - var st = new ShardingTest({shards: 2, mongos: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}}); var mongos = st.s; var staleMongos = st.s1; diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js index bc22369bd8e..abdf387faa3 100644 --- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js +++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js @@ -31,7 +31,7 @@ const shard0DB = st.shard0.getDB(jsTestName()); const shard1DB = st.shard1.getDB(jsTestName()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -43,7 +43,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey] chunk to shard0001. + // Move the [0, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/change_stream_invalidation.js b/jstests/sharding/change_stream_invalidation.js index f4904d5182e..396db586c4f 100644 --- a/jstests/sharding/change_stream_invalidation.js +++ b/jstests/sharding/change_stream_invalidation.js @@ -26,7 +26,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -38,7 +38,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey] chunk to shard0001. + // Move the [0, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/change_stream_read_preference.js b/jstests/sharding/change_stream_read_preference.js index 8f1008996be..d60b35a84b7 100644 --- a/jstests/sharding/change_stream_read_preference.js +++ b/jstests/sharding/change_stream_read_preference.js @@ -29,7 +29,7 @@ const mongosDB = st.s0.getDB(dbName); const mongosColl = mongosDB[jsTestName()]; - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -41,7 +41,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey] chunk to shard0001. + // Move the [0, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/change_stream_remove_shard.js b/jstests/sharding/change_stream_remove_shard.js index 1cb8678e9f1..451df54ec47 100644 --- a/jstests/sharding/change_stream_remove_shard.js +++ b/jstests/sharding/change_stream_remove_shard.js @@ -33,7 +33,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js index 707fe59708f..0cdd59cf131 100644 --- a/jstests/sharding/change_stream_update_lookup_collation.js +++ b/jstests/sharding/change_stream_update_lookup_collation.js @@ -27,7 +27,7 @@ const mongosDB = st.s0.getDB(jsTestName()); const mongosColl = mongosDB[jsTestName()]; - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -48,7 +48,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: "aBC"}})); - // Move the [MinKey, 'aBC') chunk to shard0001. + // Move the [MinKey, 'aBC') chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {shardKey: "ABC"}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js index 404b73c72ef..92547fabe62 100644 --- a/jstests/sharding/change_streams.js +++ b/jstests/sharding/change_streams.js @@ -29,7 +29,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -41,7 +41,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey) chunk to shard0001. + // Move the [0, MaxKey) chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); @@ -138,7 +138,7 @@ // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey). assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey) chunk to shard0001. + // Move the [0, MaxKey) chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/change_streams_shards_start_in_sync.js b/jstests/sharding/change_streams_shards_start_in_sync.js index 3ed7d3fe98a..efad4a2e382 100644 --- a/jstests/sharding/change_streams_shards_start_in_sync.js +++ b/jstests/sharding/change_streams_shards_start_in_sync.js @@ -33,7 +33,7 @@ const mongosDB = st.s0.getDB(jsTestName()); const mongosColl = mongosDB[jsTestName()]; - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -45,7 +45,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey) chunk to shard0001. + // Move the [0, MaxKey) chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js index 834ad613a38..913bea9b1b7 100644 --- a/jstests/sharding/cleanup_orphaned_auth.js +++ b/jstests/sharding/cleanup_orphaned_auth.js @@ -19,8 +19,11 @@ doassert(finalMsg); } - var st = - new ShardingTest({auth: true, other: {keyFile: 'jstests/libs/key1', useHostname: false}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest({ + auth: true, + other: {keyFile: 'jstests/libs/key1', useHostname: false, shardAsReplicaSet: false} + }); var shardAdmin = st.shard0.getDB('admin'); shardAdmin.createUser( diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js index 49fe99914a0..a8745cd3110 100644 --- a/jstests/sharding/coll_epoch_test0.js +++ b/jstests/sharding/coll_epoch_test0.js @@ -9,7 +9,7 @@ var coll = st.s.getCollection("foo.bar"); // First enable sharding admin.runCommand({enableSharding: coll.getDB() + ""}); -st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); +st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); admin.runCommand({shardCollection: coll + "", key: {_id: 1}}); var primary = config.databases.find({_id: coll.getDB() + ""}).primary; diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js index c2106ebefbe..d040e86930f 100644 --- a/jstests/sharding/coll_epoch_test2.js +++ b/jstests/sharding/coll_epoch_test2.js @@ -28,7 +28,7 @@ var shards = [st.shard0, st.shard1]; jsTest.log("Enabling sharding for the first time..."); admin.runCommand({enableSharding: coll.getDB() + ""}); -st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); +st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); admin.runCommand({shardCollection: coll + "", key: {_id: 1}}); assert.writeOK(coll.insert({hello: "world"})); @@ -79,7 +79,7 @@ assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000 "epoch not zero: " + droppedCollDoc.lastmodEpoch); admin.runCommand({enableSharding: coll.getDB() + ""}); -st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); +st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); admin.runCommand({shardCollection: coll + "", key: {_id: 1}}); var bulk = coll.initializeUnorderedBulkOp(); diff --git a/jstests/sharding/collation_targeting.js b/jstests/sharding/collation_targeting.js index c4dc22641c7..21740339d0f 100644 --- a/jstests/sharding/collation_targeting.js +++ b/jstests/sharding/collation_targeting.js @@ -11,7 +11,7 @@ var st = new ShardingTest({shards: 3}); var testDB = st.s.getDB("test"); assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()})); - st.ensurePrimaryShard(testDB.getName(), "shard0001"); + st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName); // Create a collection sharded on {a: 1}. Add 2dsphere index to test geoNear. var coll = testDB.getCollection("simple_collation"); @@ -21,22 +21,22 @@ assert.commandWorked(testDB.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}})); // Split the collection. - // shard0000: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } - // shard0001: { "a" : 10 } -->> { "a" : "a"} + // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } + // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"} // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }} assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: 10}})); assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {a: "a"}})); - assert.commandWorked( - testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: 1}, to: "shard0000"})); - assert.commandWorked( - testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: "FOO"}, to: "shard0001"})); - assert.commandWorked( - testDB.adminCommand({moveChunk: coll.getFullName(), find: {a: "foo"}, to: "shard0002"})); + assert.commandWorked(testDB.adminCommand( + {moveChunk: coll.getFullName(), find: {a: 1}, to: st.shard0.shardName})); + assert.commandWorked(testDB.adminCommand( + {moveChunk: coll.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName})); + assert.commandWorked(testDB.adminCommand( + {moveChunk: coll.getFullName(), find: {a: "foo"}, to: st.shard2.shardName})); // Put data on each shard. // Note that the balancer is off by default, so the chunks will stay put. - // shard0000: {a: 1} - // shard0001: {a: 100}, {a: "FOO"} + // st.shard0.shardName: {a: 1} + // st.shard1.shardName: {a: 100}, {a: "FOO"} // shard0002: {a: "foo"} // Include geo field to test geoNear. var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}}; diff --git a/jstests/sharding/collation_targeting_inherited.js b/jstests/sharding/collation_targeting_inherited.js index 2729b64552d..b9e88059dc1 100644 --- a/jstests/sharding/collation_targeting_inherited.js +++ b/jstests/sharding/collation_targeting_inherited.js @@ -11,7 +11,7 @@ var st = new ShardingTest({shards: 3}); var testDB = st.s.getDB("test"); assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()})); - st.ensurePrimaryShard(testDB.getName(), "shard0001"); + st.ensurePrimaryShard(testDB.getName(), st.shard1.shardName); // Create a collection with a case-insensitive default collation sharded on {a: 1}. var collCaseInsensitive = testDB.getCollection("case_insensitive"); @@ -26,24 +26,24 @@ })); // Split the collection. - // shard0000: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } - // shard0001: { "a" : 10 } -->> { "a" : "a"} + // st.shard0.shardName: { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } + // st.shard1.shardName: { "a" : 10 } -->> { "a" : "a"} // shard0002: { "a" : "a" } -->> { "a" : { "$maxKey" : 1 }} assert.commandWorked( testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: 10}})); assert.commandWorked( testDB.adminCommand({split: collCaseInsensitive.getFullName(), middle: {a: "a"}})); assert.commandWorked(testDB.adminCommand( - {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: "shard0000"})); + {moveChunk: collCaseInsensitive.getFullName(), find: {a: 1}, to: st.shard0.shardName})); assert.commandWorked(testDB.adminCommand( - {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: "shard0001"})); + {moveChunk: collCaseInsensitive.getFullName(), find: {a: "FOO"}, to: st.shard1.shardName})); assert.commandWorked(testDB.adminCommand( - {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: "shard0002"})); + {moveChunk: collCaseInsensitive.getFullName(), find: {a: "foo"}, to: st.shard2.shardName})); // Put data on each shard. // Note that the balancer is off by default, so the chunks will stay put. - // shard0000: {a: 1} - // shard0001: {a: 100}, {a: "FOO"} + // st.shard0.shardName: {a: 1} + // st.shard1.shardName: {a: 100}, {a: "FOO"} // shard0002: {a: "foo"} // Include geo field to test geoNear. var a_1 = {_id: 0, a: 1, geo: {type: "Point", coordinates: [0, 0]}}; diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js index e8783dac6a1..d3b2cf63b3e 100644 --- a/jstests/sharding/count1.js +++ b/jstests/sharding/count1.js @@ -30,7 +30,7 @@ // part 1 s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {name: 1}}); var primary = s.getPrimaryShard("test").getDB("test"); diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js index 22d4a80a175..d5bad76a246 100644 --- a/jstests/sharding/count2.js +++ b/jstests/sharding/count2.js @@ -4,7 +4,7 @@ var s2 = s1._mongos[1]; s1.adminCommand({enablesharding: "test"}); - s1.ensurePrimaryShard('test', 'shard0001'); + s1.ensurePrimaryShard('test', s1.shard1.shardName); s1.adminCommand({shardcollection: "test.foo", key: {name: 1}}); var db1 = s1.getDB("test").foo; diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js index f8beffa7e52..1610c1fef44 100644 --- a/jstests/sharding/create_idx_empty_primary.js +++ b/jstests/sharding/create_idx_empty_primary.js @@ -6,14 +6,14 @@ var st = new ShardingTest({shards: 2}); assert.commandWorked(st.s.adminCommand({enablesharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); var testDB = st.s.getDB('test'); assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}})); // Move only chunk out of primary shard. assert.commandWorked( - testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: 'shard0000'})); + testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: st.shard0.shardName})); assert.writeOK(testDB.user.insert({_id: 0})); @@ -23,10 +23,10 @@ var indexes = testDB.user.getIndexes(); assert.eq(2, indexes.length); - indexes = st.d0.getDB('test').user.getIndexes(); + indexes = st.rs0.getPrimary().getDB('test').user.getIndexes(); assert.eq(2, indexes.length); - indexes = st.d1.getDB('test').user.getIndexes(); + indexes = st.rs1.getPrimary().getDB('test').user.getIndexes(); assert.eq(2, indexes.length); st.stop(); diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js index cf5bfb27c1e..92e321eac27 100644 --- a/jstests/sharding/cursor1.js +++ b/jstests/sharding/cursor1.js @@ -8,7 +8,7 @@ // create a sharded 'test.foo', for the moment with just one chunk s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); db = s.getDB("test"); diff --git a/jstests/sharding/cursor_timeout.js b/jstests/sharding/cursor_timeout.js index 690bda9aa4a..340868e06f4 100644 --- a/jstests/sharding/cursor_timeout.js +++ b/jstests/sharding/cursor_timeout.js @@ -26,6 +26,7 @@ const cursorMonitorFrequencySecs = 1; + // TODO: SERVER-33444 remove shardAsReplicaSet: false const st = new ShardingTest({ shards: 2, other: { @@ -42,7 +43,8 @@ cursorTimeoutMillis: mongosCursorTimeoutMs, clientCursorMonitorFrequencySecs: cursorMonitorFrequencySecs } - } + }, + shardAsReplicaSet: false }, enableBalancer: false }); @@ -55,7 +57,7 @@ const shardColl = mongod.getCollection(routerColl.getFullName()); assert.commandWorked(adminDB.runCommand({enableSharding: routerColl.getDB().getName()})); - st.ensurePrimaryShard(routerColl.getDB().getName(), 'shard0000'); + st.ensurePrimaryShard(routerColl.getDB().getName(), "shard0000"); assert.commandWorked( adminDB.runCommand({shardCollection: routerColl.getFullName(), key: {x: 1}})); assert.commandWorked(adminDB.runCommand({split: routerColl.getFullName(), middle: {x: 10}})); diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js index 2e7f6220d75..04c3075b1f1 100644 --- a/jstests/sharding/delete_during_migrate.js +++ b/jstests/sharding/delete_during_migrate.js @@ -17,7 +17,7 @@ var ns = dbname + "." + coll; assert.commandWorked(st.s0.adminCommand({enablesharding: dbname})); - st.ensurePrimaryShard(dbname, 'shard0001'); + st.ensurePrimaryShard(dbname, st.shard1.shardName); var t = st.s0.getDB(dbname).getCollection(coll); diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js index 7fd7426dfd2..fb0cbdbdb4a 100644 --- a/jstests/sharding/enable_sharding_basic.js +++ b/jstests/sharding/enable_sharding_basic.js @@ -8,8 +8,9 @@ var st = new ShardingTest({mongos: 2, shards: 2}); // enableSharding can run only on mongos. - assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({enableSharding: 'db'}), - ErrorCodes.CommandNotFound); + assert.commandFailedWithCode( + st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}), + ErrorCodes.CommandNotFound); // enableSharding can run only against the admin database. assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}), diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js index bf3ce3d1a5a..7aac1aa02bf 100644 --- a/jstests/sharding/enforce_zone_policy.js +++ b/jstests/sharding/enforce_zone_policy.js @@ -6,7 +6,7 @@ var st = new ShardingTest({shards: 3, mongos: 1}); assert.commandWorked(st.s0.adminCommand({enablesharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); var testDB = st.s0.getDB('test'); var configDB = st.s0.getDB('config'); @@ -56,17 +56,18 @@ assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'initial'); // Spread chunks correctly across zones - st.addShardTag('shard0000', 'a'); - st.addShardTag('shard0001', 'a'); + st.addShardTag(st.shard0.shardName, 'a'); + st.addShardTag(st.shard1.shardName, 'a'); st.addTagRange('test.foo', {_id: -100}, {_id: 100}, 'a'); - st.addShardTag('shard0002', 'b'); + st.addShardTag(st.shard2.shardName, 'b'); st.addTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b'); st.addTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b'); assertBalanceCompleteAndStable(function() { - var chunksOnShard2 = - configDB.chunks.find({ns: 'test.foo', shard: 'shard0002'}).sort({min: 1}).toArray(); + var chunksOnShard2 = configDB.chunks.find({ns: 'test.foo', shard: st.shard2.shardName}) + .sort({min: 1}) + .toArray(); jsTestLog('Chunks on shard2: ' + tojson(chunksOnShard2)); @@ -83,18 +84,19 @@ st.removeTagRange('test.foo', {_id: MinKey}, {_id: -100}, 'b'); st.removeTagRange('test.foo', {_id: 100}, {_id: MaxKey}, 'b'); - st.removeShardTag('shard0001', 'a'); - st.removeShardTag('shard0002', 'b'); + st.removeShardTag(st.shard1.shardName, 'a'); + st.removeShardTag(st.shard2.shardName, 'b'); st.addTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a'); assertBalanceCompleteAndStable(function() { var counts = st.chunkCounts('foo'); printjson(counts); - return counts['shard0000'] == 11 && counts['shard0001'] == 0 && counts['shard0002'] == 0; + return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 && + counts[st.shard2.shardName] == 0; }, 'all chunks to zone a'); // Remove all zones and ensure collection is correctly redistributed - st.removeShardTag('shard0000', 'a'); + st.removeShardTag(st.shard0.shardName, 'a'); st.removeTagRange('test.foo', {_id: MinKey}, {_id: MaxKey}, 'a'); assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final'); diff --git a/jstests/sharding/error_during_agg_getmore.js b/jstests/sharding/error_during_agg_getmore.js index adcc6a6172b..d6f3f8a2f90 100644 --- a/jstests/sharding/error_during_agg_getmore.js +++ b/jstests/sharding/error_during_agg_getmore.js @@ -10,7 +10,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName); @@ -22,7 +22,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey] chunk to shard0001. + // Move the [0, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.shard1.shardName})); @@ -34,7 +34,7 @@ // likely arrive after the response from shard 0, but not so long that the background cluster // client cleanup job will have been given a chance to run. const delayMillis = 100; - st.shard1.delayMessagesFrom(st.s, delayMillis); + st.rs1.getPrimary().delayMessagesFrom(st.s, delayMillis); const nTrials = 10; for (let i = 1; i < 10; ++i) { diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js index 3293c167db1..0b5dcf8bf30 100644 --- a/jstests/sharding/explain_cmd.js +++ b/jstests/sharding/explain_cmd.js @@ -17,18 +17,18 @@ // Enable sharding. assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); - st.ensurePrimaryShard(db.getName(), 'shard0001'); + st.ensurePrimaryShard(db.getName(), st.shard1.shardName); db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}}); // Pre-split the collection to ensure that both shards have chunks. Explicitly // move chunks since the balancer is disabled. - for (var i = 1; i <= 2; i++) { - assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}})); + assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 1}})); + printjson(db.adminCommand( + {moveChunk: collSharded.getFullName(), find: {a: 1}, to: st.shard0.shardName})); - var shardName = "shard000" + (i - 1); - printjson( - db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName})); - } + assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: 2}})); + printjson(db.adminCommand( + {moveChunk: collSharded.getFullName(), find: {a: 2}, to: st.shard1.shardName})); // Put data on each shard. for (var i = 0; i < 3; i++) { diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js index d2370ee61ce..3066666c82d 100644 --- a/jstests/sharding/explain_find_and_modify_sharded.js +++ b/jstests/sharding/explain_find_and_modify_sharded.js @@ -20,18 +20,20 @@ assert.commandWorked(shardedColl.ensureIndex(shardKey)); // Enable sharding on the database and shard the collection. - // Use "shard0000" as the primary shard. + // Use "st.shard0.shardName" as the primary shard. assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()})); - st.ensurePrimaryShard(testDB.toString(), 'shard0000'); + st.ensurePrimaryShard(testDB.toString(), st.shard0.shardName); assert.commandWorked( testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey})); // Split and move the chunks so that - // chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on shard0000 - // chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on shard0001 + // chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on + // st.shard0.shardName + // chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on + // st.shard1.shardName assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}})); assert.commandWorked(testDB.adminCommand( - {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: 'shard0001'})); + {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: st.shard1.shardName})); var res; @@ -66,21 +68,23 @@ assert.eq(expectedStage, shardStage.shards[0][innerKey].stage); } - // Test that the explain command is routed to "shard0000" when targeting the lower chunk range. + // Test that the explain command is routed to "st.shard0.shardName" when targeting the lower + // chunk range. res = testDB.runCommand({ explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true}, verbosity: 'queryPlanner' }); assert.commandWorked(res); - assertExplainResult(res, 'queryPlanner', 'winningPlan', 'shard0000', 'UPDATE'); + assertExplainResult(res, 'queryPlanner', 'winningPlan', st.shard0.shardName, 'UPDATE'); - // Test that the explain command is routed to "shard0001" when targeting the higher chunk range. + // Test that the explain command is routed to "st.shard1.shardName" when targeting the higher + // chunk range. res = testDB.runCommand({ explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true}, verbosity: 'executionStats' }); assert.commandWorked(res); - assertExplainResult(res, 'executionStats', 'executionStages', 'shard0001', 'DELETE'); + assertExplainResult(res, 'executionStats', 'executionStages', st.shard1.shardName, 'DELETE'); st.stop(); })(); diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js index 254e919f237..0a31b5c94c9 100644 --- a/jstests/sharding/features1.js +++ b/jstests/sharding/features1.js @@ -3,7 +3,7 @@ var s = new ShardingTest({name: "features1", shards: 2, mongos: 1}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); // ---- can't shard system namespaces ---- diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js index e84290d046e..16d28c4d1ba 100644 --- a/jstests/sharding/features2.js +++ b/jstests/sharding/features2.js @@ -5,7 +5,7 @@ var s = new ShardingTest({name: "features2", shards: 2, mongos: 1}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); let a = s._connections[0].getDB("test"); let b = s._connections[1].getDB("test"); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index a6bd2e2b2d1..cee22543b7e 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -17,7 +17,7 @@ // shard test.foo and add a split point s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}}); @@ -45,8 +45,8 @@ assert.eq("test.foo", x.ns, "namespace mismatch"); assert(x.sharded, "collection is not sharded"); assert.eq(numDocs, x.count, "total count"); - assert.eq(numDocs / 2, x.shards.shard0000.count, "count on shard0000"); - assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001"); + assert.eq(numDocs / 2, x.shards[s.shard0.shardName].count, "count on " + s.shard0.shardName); + assert.eq(numDocs / 2, x.shards[s.shard1.shardName].count, "count on " + s.shard1.shardName); assert(x.totalIndexSize > 0); // insert one doc into a non-sharded collection @@ -145,7 +145,7 @@ // test fsync on admin db x = dbForTest._adminCommand("fsync"); assert(x.ok == 1, "fsync failed: " + tojson(x)); - if (x.all.shard0000 > 0) { + if (x.all[s.shard0.shardName] > 0) { assert(x.numFiles > 0, "fsync failed: " + tojson(x)); } diff --git a/jstests/sharding/find_and_modify_after_multi_write.js b/jstests/sharding/find_and_modify_after_multi_write.js index 15f54120706..749f999c54c 100644 --- a/jstests/sharding/find_and_modify_after_multi_write.js +++ b/jstests/sharding/find_and_modify_after_multi_write.js @@ -11,42 +11,42 @@ var testDB = st.s.getDB('test'); assert.commandWorked(testDB.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}})); assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}})); assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); var testDB2 = st.s1.getDB('test'); testDB2.user.insert({x: 123456}); // Move chunk to bump version on a different mongos. assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true})); // Issue a targetted findAndModify and check that it was upserted to the right shard. assert.commandWorked(testDB2.runCommand( {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true})); - assert.neq(null, st.d0.getDB('test').user.findOne({x: 100})); - assert.eq(null, st.d1.getDB('test').user.findOne({x: 100})); + assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 100})); + assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 100})); // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets // incremented to 3 assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); assert.commandWorked(testDB2.runCommand( {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true})); - assert.eq(null, st.d0.getDB('test').user.findOne({x: 200})); - assert.neq(null, st.d1.getDB('test').user.findOne({x: 200})); + assert.eq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 200})); + assert.neq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 200})); // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets // incremented to 4 assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true})); // Ensure that write commands with multi version do not reset the connection shard version // to @@ -56,8 +56,8 @@ assert.commandWorked(testDB2.runCommand( {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true})); - assert.neq(null, st.d0.getDB('test').user.findOne({x: 300})); - assert.eq(null, st.d1.getDB('test').user.findOne({x: 300})); + assert.neq(null, st.rs0.getPrimary().getDB('test').user.findOne({x: 300})); + assert.eq(null, st.rs1.getPrimary().getDB('test').user.findOne({x: 300})); st.stop(); }; diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js index a7e4e712ff6..e688fc0f9ca 100644 --- a/jstests/sharding/find_getmore_cmd.js +++ b/jstests/sharding/find_getmore_cmd.js @@ -25,11 +25,11 @@ assert.commandWorked(coll.ensureIndex({b: "text"})); assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); - st.ensurePrimaryShard(db.getName(), "shard0000"); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}}); assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}})); assert.commandWorked( - db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: "shard0001"})); + db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName})); // Find with no options. cmdRes = db.runCommand({find: coll.getName()}); diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js index ab706a448c4..372d96c2795 100644 --- a/jstests/sharding/findandmodify2.js +++ b/jstests/sharding/findandmodify2.js @@ -5,7 +5,7 @@ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); var db = s.getDB("test"); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var primary = s.getPrimaryShard("test").getDB("test"); var secondary = s.getOther(primary).getDB("test"); diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js index fd950bcf43c..d3403dd7fee 100644 --- a/jstests/sharding/forget_mr_temp_ns.js +++ b/jstests/sharding/forget_mr_temp_ns.js @@ -2,7 +2,8 @@ // Tests whether we forget M/R's temporary namespaces for sharded output // -var st = new ShardingTest({shards: 1, mongos: 1}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({shards: 1, mongos: 1, other: {shardAsReplicaSet: false}}); var mongos = st.s0; var admin = mongos.getDB("admin"); diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js index cf2c5769e5c..2145e987558 100644 --- a/jstests/sharding/fts_score_sort_sharded.js +++ b/jstests/sharding/fts_score_sort_sharded.js @@ -13,11 +13,11 @@ var cursor; // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}. // assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()})); -st.ensurePrimaryShard(coll.getDB().toString(), "shard0000"); +st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}})); assert.commandWorked( - admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"})); + admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName})); // // Insert documents into collection and create text index. diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js index 1f8633d0a06..260c37fea0d 100644 --- a/jstests/sharding/geo_near_random1.js +++ b/jstests/sharding/geo_near_random1.js @@ -15,18 +15,18 @@ load("jstests/libs/geo_near_random.js"); var test = new GeoNearRandomTest(testName, db); assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}})); test.insertPts(50); - + var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName]; for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) { assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}})); try { assert.commandWorked(s.s0.adminCommand({ moveChunk: ('test.' + testName), find: {_id: i - 1}, - to: ('shard000' + (i % 3)), + to: (shardList[i % 3]), _waitForDelete: true })); } catch (e) { diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js index 320842acb17..0b874e5aafe 100644 --- a/jstests/sharding/geo_near_random2.js +++ b/jstests/sharding/geo_near_random2.js @@ -15,18 +15,18 @@ load("jstests/libs/geo_near_random.js"); var test = new GeoNearRandomTest(testName, db); assert.commandWorked(s.s0.adminCommand({enablesharding: 'test'})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}})); test.insertPts(5000); - + var shardList = [s.shard0.shardName, s.shard1.shardName, s.shard2.shardName]; for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) { assert.commandWorked(s.s0.adminCommand({split: ('test.' + testName), middle: {_id: i}})); try { assert.commandWorked(s.s0.adminCommand({ moveChunk: ('test.' + testName), find: {_id: i - 1}, - to: ('shard000' + (i % 3)), + to: shardList[i % 3], _waitForDelete: true })); } catch (e) { diff --git a/jstests/sharding/geo_near_sharded.js b/jstests/sharding/geo_near_sharded.js index 352f9c90b84..a92b579ef7e 100644 --- a/jstests/sharding/geo_near_sharded.js +++ b/jstests/sharding/geo_near_sharded.js @@ -46,9 +46,10 @@ tojson({sharded: sharded, indexType: indexType})); } - var st = new ShardingTest({shards: 3, mongos: 1}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({shards: 3, mongos: 1, other: {shardAsReplicaSet: false}}); assert.commandWorked(st.s0.adminCommand({enablesharding: "test"})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); test(st, st.getDB('test'), true, '2dsphere'); st.stop(); diff --git a/jstests/sharding/geo_near_sort.js b/jstests/sharding/geo_near_sort.js index 202b6044e4c..fa839a78551 100644 --- a/jstests/sharding/geo_near_sort.js +++ b/jstests/sharding/geo_near_sort.js @@ -8,13 +8,13 @@ const caseInsensitive = {locale: "en_US", strength: 2}; assert.commandWorked(st.s0.adminCommand({enableSharding: db.getName()})); - st.ensurePrimaryShard(db.getName(), "shard0000"); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); // Split the data into 2 chunks and move the chunk with _id > 0 to shard 1. assert.commandWorked(st.s0.adminCommand({split: coll.getFullName(), middle: {_id: 0}})); - assert.commandWorked( - st.s0.adminCommand({movechunk: coll.getFullName(), find: {_id: 1}, to: "shard0001"})); + assert.commandWorked(st.s0.adminCommand( + {movechunk: coll.getFullName(), find: {_id: 1}, to: st.shard1.shardName})); // Insert some documents. The sort order by distance from the origin is [-2, 1, -1, 2] (under 2d // or 2dsphere geometry). The sort order by {a: 1} under the case-insensitive collation is [2, diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js index fa4947474a8..bffada99eef 100644 --- a/jstests/sharding/hash_basic.js +++ b/jstests/sharding/hash_basic.js @@ -4,7 +4,7 @@ var st = new ShardingTest({shards: 2, chunkSize: 1}); assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}})); var configDB = st.s0.getDB('config'); diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js index 21b69472e3c..4939cc53c99 100644 --- a/jstests/sharding/hash_shard1.js +++ b/jstests/sharding/hash_shard1.js @@ -8,7 +8,7 @@ var ns = dbname + "." + coll; var db = s.getDB(dbname); var t = db.getCollection(coll); db.adminCommand({enablesharding: dbname}); -s.ensurePrimaryShard(dbname, 'shard0001'); +s.ensurePrimaryShard(dbname, s.shard1.shardName); // for simplicity start by turning off balancer s.stopBalancer(); @@ -29,18 +29,18 @@ for (i = 0; i < numitems; i++) { assert.eq(t.find().count(), numitems, "count off after inserts"); printjson(t.find().explain()); -// find a chunk that's not on shard0000 -var chunk = s.config.chunks.findOne({shard: {$ne: "shard0000"}}); -assert.neq(chunk, null, "all chunks on shard0000!"); +// find a chunk that's not on s.shard0.shardName +var chunk = s.config.chunks.findOne({shard: {$ne: s.shard0.shardName}}); +assert.neq(chunk, null, "all chunks on s.shard0.shardName!"); printjson(chunk); // try to move the chunk using an invalid specification method. should fail. -var res = - db.adminCommand({movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"}); +var res = db.adminCommand( + {movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: s.shard0.shardName}); assert.eq(res.ok, 0, "moveChunk shouldn't work with invalid specification method"); // now move a chunk using the lower/upper bound method. should work. -var res = db.adminCommand({movechunk: ns, bounds: [chunk.min, chunk.max], to: "shard0000"}); +var res = db.adminCommand({movechunk: ns, bounds: [chunk.min, chunk.max], to: s.shard0.shardName}); printjson(res); assert.eq(res.ok, 1, "movechunk using lower/upper bound method didn't work "); @@ -49,7 +49,7 @@ assert.eq(t.find().itcount(), numitems, "count off after migrate"); printjson(t.find().explain()); // move a chunk using the find method -var res = db.adminCommand({movechunk: ns, find: {a: 2}, to: "shard0002"}); +var res = db.adminCommand({movechunk: ns, find: {a: 2}, to: s.shard2.shardName}); printjson(res); assert.eq(res.ok, 1, "movechunk using find query didn't work"); diff --git a/jstests/sharding/hash_shard_non_empty.js b/jstests/sharding/hash_shard_non_empty.js index 8ebf53cf7f2..865c103ce14 100644 --- a/jstests/sharding/hash_shard_non_empty.js +++ b/jstests/sharding/hash_shard_non_empty.js @@ -5,7 +5,7 @@ var dbname = "test"; var coll = "foo"; var db = s.getDB(dbname); db.adminCommand({enablesharding: dbname}); -s.ensurePrimaryShard('test', 'shard0001'); +s.ensurePrimaryShard('test', s.shard1.shardName); // for simplicity turn off balancer s.stopBalancer(); diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js index 19bf3066de2..b551ad53df2 100644 --- a/jstests/sharding/hash_shard_num_chunks.js +++ b/jstests/sharding/hash_shard_num_chunks.js @@ -9,7 +9,7 @@ var db = s.getDB(dbname); assert.commandWorked(db.adminCommand({enablesharding: dbname})); - s.ensurePrimaryShard(dbname, 'shard0001'); + s.ensurePrimaryShard(dbname, s.shard1.shardName); assert.commandWorked(db.adminCommand( {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500})); diff --git a/jstests/sharding/hash_skey_split.js b/jstests/sharding/hash_skey_split.js index fe8cef3e0d3..d6d1fcf5c84 100644 --- a/jstests/sharding/hash_skey_split.js +++ b/jstests/sharding/hash_skey_split.js @@ -5,17 +5,18 @@ var configDB = st.s.getDB('config'); assert.commandWorked(configDB.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(configDB.adminCommand( {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2})); - var metadata = - st.d0.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true}); + var metadata = st.rs0.getPrimary().getDB('admin').runCommand( + {getShardVersion: 'test.user', fullMetadata: true}); var chunks = metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending; assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata)); - metadata = st.d1.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true}); + metadata = st.rs1.getPrimary().getDB('admin').runCommand( + {getShardVersion: 'test.user', fullMetadata: true}); chunks = metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending; assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata)); diff --git a/jstests/sharding/idhack_sharded.js b/jstests/sharding/idhack_sharded.js index 4767e579c75..6b9716ea608 100644 --- a/jstests/sharding/idhack_sharded.js +++ b/jstests/sharding/idhack_sharded.js @@ -7,11 +7,11 @@ var coll = st.s0.getCollection("test.foo"); // Pre-split collection: shard 0 takes {x: {$lt: 0}}, shard 1 takes {x: {$gte: 0}}. // assert.commandWorked(coll.getDB().adminCommand({enableSharding: coll.getDB().getName()})); -st.ensurePrimaryShard(coll.getDB().toString(), "shard0000"); +st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName); assert.commandWorked(coll.getDB().adminCommand({shardCollection: coll.getFullName(), key: {x: 1}})); assert.commandWorked(coll.getDB().adminCommand({split: coll.getFullName(), middle: {x: 0}})); assert.commandWorked(coll.getDB().adminCommand( - {moveChunk: coll.getFullName(), find: {x: 0}, to: "shard0001", _waitForDelete: true})); + {moveChunk: coll.getFullName(), find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); // // Test that idhack queries with projections that remove the shard key return correct results. diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js index cd907921489..328ae508f5a 100644 --- a/jstests/sharding/in_memory_sort_limit.js +++ b/jstests/sharding/in_memory_sort_limit.js @@ -6,7 +6,7 @@ var st = new ShardingTest({shards: 2}); assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a // single-shard query (which doesn't exercise the bug) diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js index 9b57c3e43f8..482137a9d03 100644 --- a/jstests/sharding/index1.js +++ b/jstests/sharding/index1.js @@ -16,7 +16,7 @@ if (i == 0) { s.adminCommand({enablesharding: "" + coll._db}); - s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + s.ensurePrimaryShard(coll.getDB().getName(), s.shard1.shardName); } print("\n\n\n\n\nTest # " + i); diff --git a/jstests/sharding/json_schema.js b/jstests/sharding/json_schema.js index 25c4bdc0882..1c24f427eed 100644 --- a/jstests/sharding/json_schema.js +++ b/jstests/sharding/json_schema.js @@ -23,11 +23,11 @@ assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 0}})); assert.commandWorked(testDB.adminCommand({split: coll.getFullName(), middle: {_id: 100}})); - // Move the [0, 100) and [100, MaxKey) chunks to shard0001. - assert.commandWorked( - testDB.adminCommand({moveChunk: coll.getFullName(), find: {_id: 50}, to: "shard0001"})); - assert.commandWorked( - testDB.adminCommand({moveChunk: coll.getFullName(), find: {_id: 150}, to: "shard0001"})); + // Move the [0, 100) and [100, MaxKey) chunks to st.shard1.shardName. + assert.commandWorked(testDB.adminCommand( + {moveChunk: coll.getFullName(), find: {_id: 50}, to: st.shard1.shardName})); + assert.commandWorked(testDB.adminCommand( + {moveChunk: coll.getFullName(), find: {_id: 150}, to: st.shard1.shardName})); // Write one document into each of the chunks. assert.writeOK(coll.insert({_id: -150, a: 1})); diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js index 4aed86feeae..8bcaf621a51 100644 --- a/jstests/sharding/jumbo1.js +++ b/jstests/sharding/jumbo1.js @@ -4,7 +4,7 @@ var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableAutoSplit: true}}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}})); var db = s.getDB("test"); @@ -36,7 +36,8 @@ function diff1() { var x = s.chunkCounts("foo"); printjson(x); - return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001); + return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) - + Math.min(x[s.shard0.shardName], x[s.shard1.shardName]); } assert.soon(function() { @@ -49,7 +50,8 @@ // Check that the jumbo chunk did not move, which shouldn't be possible. var jumboChunk = s.getDB('config').chunks.findOne({ns: 'test.foo', min: {$lte: {x: 0}}, max: {$gt: {x: 0}}}); - assert.eq('shard0001', jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved'); + assert.eq( + s.shard1.shardName, jumboChunk.shard, 'jumbo chunk ' + tojson(jumboChunk) + ' was moved'); // TODO: SERVER-26531 Make sure that balancer marked the first chunk as jumbo. // Assumption: balancer favors moving the lowest valued chunk out of a shard. // assert(jumboChunk.jumbo, tojson(jumboChunk)); diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js index 285647bf3cd..c671e691f94 100644 --- a/jstests/sharding/key_many.js +++ b/jstests/sharding/key_many.js @@ -72,7 +72,7 @@ var s = new ShardingTest({name: "key_many", shards: 2}); assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var db = s.getDB('test'); var primary = s.getPrimaryShard("test").getDB("test"); diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js index a71120e06c3..c3fc654bf11 100644 --- a/jstests/sharding/key_string.js +++ b/jstests/sharding/key_string.js @@ -3,7 +3,7 @@ var s = new ShardingTest({name: "keystring", shards: 2}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {name: 1}}); primary = s.getPrimaryShard("test").getDB("test"); diff --git a/jstests/sharding/kill_op_overflow.js b/jstests/sharding/kill_op_overflow.js index 2d30799b9aa..6ca5c236bab 100644 --- a/jstests/sharding/kill_op_overflow.js +++ b/jstests/sharding/kill_op_overflow.js @@ -6,7 +6,7 @@ "use strict"; var st = new ShardingTest({name: "shard1", shards: 1, mongos: 1}); - assert.commandFailed( - st.s.getDB("admin").runCommand({killOp: 1, op: "shard0000:99999999999999999999999"})); + assert.commandFailed(st.s.getDB("admin").runCommand( + {killOp: 1, op: st.shard0.shardName + ":99999999999999999999999"})); st.stop(); })(); diff --git a/jstests/sharding/kill_pinned_cursor.js b/jstests/sharding/kill_pinned_cursor.js index f636f981082..0ba46f59b8f 100644 --- a/jstests/sharding/kill_pinned_cursor.js +++ b/jstests/sharding/kill_pinned_cursor.js @@ -12,7 +12,8 @@ const kFailPointName = "waitAfterPinningCursorBeforeGetMoreBatch"; const kFailpointOptions = {shouldCheckForInterrupt: true}; - const st = new ShardingTest({shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + const st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}}); const kDBName = "test"; const mongosDB = st.s.getDB(kDBName); const shard0DB = st.shard0.getDB(kDBName); diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js index 1f224fc211a..786ac576a0d 100644 --- a/jstests/sharding/large_chunk.js +++ b/jstests/sharding/large_chunk.js @@ -20,7 +20,7 @@ // Turn on sharding on the 'test.foo' collection and generate a large chunk assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var bigString = ""; while (bigString.length < 10000) { diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js index 787576ba6ff..ec744207e97 100644 --- a/jstests/sharding/limit_push.js +++ b/jstests/sharding/limit_push.js @@ -14,7 +14,7 @@ // Shard the collection s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}}); // Now split the and move the data between the shards diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js index 61bbabd762a..d2035acdba2 100644 --- a/jstests/sharding/localhostAuthBypass.js +++ b/jstests/sharding/localhostAuthBypass.js @@ -92,7 +92,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; var res = mongo.getDB("admin").runCommand({ moveChunk: "test.foo", find: {_id: 1}, - to: "shard0000" // Arbitrary shard. + to: st.shard0.shardName // Arbitrary shard. }); assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk"); assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"), @@ -176,7 +176,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; print("============ enabling sharding on test.foo."); mongo.getDB("admin").runCommand({enableSharding: "test"}); - shardingTest.ensurePrimaryShard('test', 'shard0001'); + shardingTest.ensurePrimaryShard('test', st.shard1.shardName); mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}}); var test = mongo.getDB("test"); @@ -186,6 +186,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; }; var start = function() { + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. return new ShardingTest({ auth: "", shards: numShards, @@ -193,7 +194,8 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; keyFile: keyfile, chunkSize: 1, useHostname: - false // Must use localhost to take advantage of the localhost auth bypass + false, // Must use localhost to take advantage of the localhost auth bypass + shardAsReplicaSet: false } }); }; diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js index 3c89fd6b85c..bc69d734a78 100644 --- a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js +++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js @@ -26,7 +26,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js index 613ca8f2b33..db8ac5ed31d 100644 --- a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js +++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js @@ -27,7 +27,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js index 0f16c4928a1..a6dd9631dac 100644 --- a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js +++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js @@ -26,7 +26,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -38,7 +38,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey) chunk to shard0001. + // Move the [0, MaxKey) chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); @@ -78,7 +78,7 @@ // Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey). assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}})); - // Move the [500, MaxKey) chunk back to shard0000. + // Move the [500, MaxKey) chunk back to st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()})); diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js index 853cdb33bad..6737d5fec0c 100644 --- a/jstests/sharding/mapReduce_inSharded.js +++ b/jstests/sharding/mapReduce_inSharded.js @@ -14,7 +14,7 @@ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); st.adminCommand({enablesharding: "mrShard"}); - st.ensurePrimaryShard('mrShard', 'shard0001'); + st.ensurePrimaryShard('mrShard', st.shard1.shardName); st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}); var db = st.getDB("mrShard"); diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js index d73eb517e98..d16fe3f9214 100644 --- a/jstests/sharding/mapReduce_inSharded_outSharded.js +++ b/jstests/sharding/mapReduce_inSharded_outSharded.js @@ -9,13 +9,17 @@ assert.eq(out.counts.output, 512, "output count is wrong"); }; - var st = new ShardingTest( - {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); + var st = new ShardingTest({ + shards: 2, + verbose: 1, + mongos: 1, + other: {chunkSize: 1, enableBalancer: true, shardAsReplicaSet: false} + }); var admin = st.s0.getDB('admin'); assert.commandWorked(admin.runCommand({enablesharding: "mrShard"})); - st.ensurePrimaryShard('mrShard', 'shard0001'); + st.ensurePrimaryShard('mrShard', st.shard1.shardName); assert.commandWorked( admin.runCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}})); diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js index ad94004a68a..d194623c3e7 100644 --- a/jstests/sharding/mapReduce_nonSharded.js +++ b/jstests/sharding/mapReduce_nonSharded.js @@ -10,7 +10,7 @@ var st = new ShardingTest( {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); st.adminCommand({enablesharding: "mrShard"}); -st.ensurePrimaryShard('mrShard', 'shard0001'); +st.ensurePrimaryShard('mrShard', st.shard1.shardName); st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}); var db = st.getDB("mrShard"); diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js index 90452c0c186..3a95fb9aa65 100644 --- a/jstests/sharding/mapReduce_outSharded.js +++ b/jstests/sharding/mapReduce_outSharded.js @@ -6,11 +6,16 @@ var verifyOutput = function(out) { assert.eq(out.counts.output, 512, "output count is wrong"); }; -var st = new ShardingTest( - {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({ + shards: 2, + verbose: 1, + mongos: 1, + other: {chunkSize: 1, enableBalancer: true, shardAsReplicaSet: false} +}); st.adminCommand({enablesharding: "mrShard"}); -st.ensurePrimaryShard('mrShard', 'shard0001'); +st.ensurePrimaryShard('mrShard', st.shard1.shardName); st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}}); var db = st.getDB("mrShard"); diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js index 0a66ce3bf60..353a3439642 100644 --- a/jstests/sharding/max_time_ms_sharded.js +++ b/jstests/sharding/max_time_ms_sharded.js @@ -39,11 +39,11 @@ // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}. // assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()})); - st.ensurePrimaryShard(coll.getDB().toString(), "shard0000"); + st.ensurePrimaryShard(coll.getDB().toString(), st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}})); assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}})); assert.commandWorked( - admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"})); + admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: st.shard1.shardName})); // // Insert 100 documents into sharded collection, such that each shard owns 50. @@ -204,7 +204,7 @@ // res = admin.runCommand({ // moveChunk: coll.getFullName(), // find: {_id: 0}, - // to: "shard0000", + // to: st.shard0.shardName, // maxTimeMS: 1000 * 60 * 60 * 24 // }); // assert.commandFailed( @@ -217,7 +217,7 @@ // assert.commandWorked(admin.runCommand({ // moveChunk: coll.getFullName(), // find: {_id: 0}, - // to: "shard0000", + // to: st.shard0.shardName, // maxTimeMS: 1000 * 60 * 60 * 24 // }), // "expected moveChunk to not hit time limit in mongod"); diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js index 61328320e87..5cdc03d292e 100644 --- a/jstests/sharding/migrateBig.js +++ b/jstests/sharding/migrateBig.js @@ -6,7 +6,7 @@ assert.writeOK( s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true)); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}})); var db = s.getDB("test"); diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js index ed4b792c915..990b76019c0 100644 --- a/jstests/sharding/migrateBig_balancer.js +++ b/jstests/sharding/migrateBig_balancer.js @@ -7,8 +7,12 @@ (function() { "use strict"; - var st = - new ShardingTest({name: 'migrateBig_balancer', shards: 2, other: {enableBalancer: true}}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({ + name: 'migrateBig_balancer', + shards: 2, + other: {enableBalancer: true, shardAsReplicaSet: false} + }); var mongos = st.s; var admin = mongos.getDB("admin"); @@ -16,7 +20,7 @@ var coll = db.getCollection("stuff"); assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()})); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); var data = "x"; var nsq = 16; diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js index 4bd880477ce..6af13213a46 100644 --- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js +++ b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js @@ -5,7 +5,8 @@ (function() { "use strict"; - var st = new ShardingTest({shards: 1}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({shards: 1, other: {shardAsReplicaSet: false}}); // Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration // process that crashed in the middle of the critical section. @@ -13,7 +14,7 @@ var recoveryDoc = { _id: 'minOpTimeRecovery', configsvrConnectionString: st.configRS.getURL(), - shardName: 'shard0000', + shardName: st.shard0.shardName, minOpTime: {ts: Timestamp(0, 0), t: 0}, minOpTimeUpdaters: 2 }; diff --git a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js index b28c3caed8b..b67f0bde73e 100644 --- a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js +++ b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js @@ -8,7 +8,7 @@ var testDB = st.s.getDB('test'); testDB.adminCommand({enableSharding: 'test'}); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}); var priConn = st.configRS.getPrimary(); @@ -21,9 +21,9 @@ } }); - testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0001'}); + testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName}); - var shardAdmin = st.d0.getDB('admin'); + var shardAdmin = st.rs0.getPrimary().getDB('admin'); var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'}); assert.neq(null, minOpTimeRecoveryDoc); diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js index 0c189fd1a55..5443450f3bc 100644 --- a/jstests/sharding/mongos_shard_failure_tolerance.js +++ b/jstests/sharding/mongos_shard_failure_tolerance.js @@ -55,7 +55,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; mongosConnIdle = new Mongo(st.s0.host); - MongoRunner.stopMongod(st.shard2); + st.rs2.stopSet(); jsTest.log("Testing active connection..."); @@ -99,8 +99,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; mongosConnIdle = new Mongo(st.s0.host); - MongoRunner.stopMongod(st.shard1); - + st.rs1.stopSet(); jsTest.log("Testing active connection..."); assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1})); diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js index 23994003be4..85b7dbb136f 100644 --- a/jstests/sharding/mongos_validate_writes.js +++ b/jstests/sharding/mongos_validate_writes.js @@ -19,7 +19,7 @@ var staleCollB = staleMongosB.getCollection(coll + ""); assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); coll.ensureIndex({a: 1}); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {a: 1}})); @@ -69,10 +69,11 @@ coll.ensureIndex({e: 1}); // Deletes need to be across two shards to trigger an error - this is probably an exceptional // case - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {e: 1}})); assert.commandWorked(admin.runCommand({split: coll + "", middle: {e: 0}})); - assert.commandWorked(admin.runCommand({moveChunk: coll + "", find: {e: 0}, to: "shard0001"})); + assert.commandWorked( + admin.runCommand({moveChunk: coll + "", find: {e: 0}, to: st.shard1.shardName})); // Make sure we can successfully remove, even though we have stale state assert.writeOK(coll.insert({e: "e"})); diff --git a/jstests/sharding/move_chunk_missing_idx.js b/jstests/sharding/move_chunk_missing_idx.js index 8125e8e6600..e52d95f722a 100644 --- a/jstests/sharding/move_chunk_missing_idx.js +++ b/jstests/sharding/move_chunk_missing_idx.js @@ -8,7 +8,7 @@ var st = new ShardingTest({shards: 2}); var testDB = st.s.getDB('test'); testDB.adminCommand({enableSharding: 'test'}); -st.ensurePrimaryShard(testDB.toString(), "shard0001"); +st.ensurePrimaryShard(testDB.toString(), st.shard1.shardName); testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}); // Test procedure: @@ -22,20 +22,22 @@ testDB.adminCommand({split: 'test.user', middle: {x: 0}}); testDB.adminCommand({split: 'test.user', middle: {x: 10}}); // Collection does not exist, no chunk, index missing case at destination case. -assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); +assert.commandWorked( + testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName})); // Drop index since last moveChunk created this. -st.d0.getDB('test').user.dropIndex({a: 1, b: 1}); +st.rs0.getPrimary().getDB('test').user.dropIndex({a: 1, b: 1}); // Collection exist but empty, index missing at destination case. -assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 10}, to: 'shard0000'})); +assert.commandWorked( + testDB.adminCommand({moveChunk: 'test.user', find: {x: 10}, to: st.shard0.shardName})); // Drop index since last moveChunk created this. -st.d0.getDB('test').user.dropIndex({a: 1, b: 1}); +st.rs0.getPrimary().getDB('test').user.dropIndex({a: 1, b: 1}); // Collection not empty, index missing at destination case. testDB.user.insert({x: 10}); assert.commandFailed( - testDB.adminCommand({moveChunk: 'test.user', find: {x: -10}, to: 'shard0000'})); + testDB.adminCommand({moveChunk: 'test.user', find: {x: -10}, to: st.shard0.shardName})); st.stop(); diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js index a821aa0d549..fe0942f0558 100644 --- a/jstests/sharding/move_chunk_open_cursors.js +++ b/jstests/sharding/move_chunk_open_cursors.js @@ -18,7 +18,7 @@ assert.writeOK(bulk.execute()); // Make sure we know which shard will host the data to begin. - st.ensurePrimaryShard(dbName, "shard0000"); + st.ensurePrimaryShard(dbName, st.shard0.shardName); assert.commandWorked(st.admin.runCommand({enableSharding: dbName})); assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}})); @@ -32,7 +32,7 @@ const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize); assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}})); - assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: "shard0001"})); + assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName})); assert.eq( aggCursor.itcount(), @@ -44,7 +44,7 @@ coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize})); const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize); assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}})); - assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: "shard0001"})); + assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName})); assert.eq( findCursor.itcount(), diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js index 91647dbb3da..ab0643b128b 100644 --- a/jstests/sharding/move_stale_mongos.js +++ b/jstests/sharding/move_stale_mongos.js @@ -2,13 +2,14 @@ // Tests that stale mongoses can properly move chunks. // -var st = new ShardingTest({shards: 2, mongos: 2}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({shards: 2, mongos: 2, other: {shardAsReplicaSet: false}}); var admin = st.s0.getDB('admin'); var testDb = 'test'; var testNs = 'test.foo'; assert.commandWorked(admin.runCommand({enableSharding: testDb})); -st.ensurePrimaryShard(testDb, st.shard0.shardName); +st.ensurePrimaryShard(testDb, st.shard0.name); assert.commandWorked(admin.runCommand({shardCollection: testNs, key: {_id: 1}})); var curShardIndex = 0; @@ -16,7 +17,7 @@ for (var i = 0; i < 100; i += 10) { assert.commandWorked(st.s0.getDB('admin').runCommand({split: testNs, middle: {_id: i}})); st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the split var nextShardIndex = (curShardIndex + 1) % 2; - var toShard = (nextShardIndex == 0) ? st.shard0.shardName : st.shard1.shardName; + var toShard = (nextShardIndex == 0) ? st.shard0.name : st.shard1.name; assert.commandWorked(st.s1.getDB('admin').runCommand( {moveChunk: testNs, find: {_id: i + 5}, to: toShard, _waitForDelete: true})); curShardIndex = nextShardIndex; diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js index 7c20f0d675f..285ec587682 100644 --- a/jstests/sharding/movechunk_include.js +++ b/jstests/sharding/movechunk_include.js @@ -6,7 +6,7 @@ function setupMoveChunkTest(st) { var testcoll = testdb.foo; st.adminCommand({enablesharding: "test"}); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); st.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); var str = ""; @@ -33,6 +33,7 @@ function setupMoveChunkTest(st) { break; } } + var result = st.adminCommand({ movechunk: "test.foo", find: {_id: 1}, diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js index 8e54d0ab551..37fddce75f4 100644 --- a/jstests/sharding/movechunk_parallel.js +++ b/jstests/sharding/movechunk_parallel.js @@ -27,7 +27,7 @@ load('./jstests/libs/chunk_manipulation_util.js'); assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20})); assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30})); - // Move two of the chunks to shard0001 so we have option to do parallel balancing + // Move two of the chunks to st.shard1.shardName so we have option to do parallel balancing assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName)); assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName)); diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js index 740878507b2..18a2f31658f 100644 --- a/jstests/sharding/movechunk_with_moveParanoia.js +++ b/jstests/sharding/movechunk_with_moveParanoia.js @@ -1,10 +1,17 @@ /** * This test sets moveParanoia flag and then check that the directory is created with the moved data */ + +// TODO: SERVER-33444 remove shardAsReplicaSet: false var st = new ShardingTest({ shards: 2, mongos: 1, - other: {chunkSize: 1, enableAutoSplit: true, shardOptions: {moveParanoia: ""}} + other: { + chunkSize: 1, + enableAutoSplit: true, + shardOptions: {moveParanoia: ""}, + shardAsReplicaSet: false + } }); load("jstests/sharding/movechunk_include.js"); diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js index 8bad709bd1d..69e6ed3b8e4 100644 --- a/jstests/sharding/moveprimary_ignore_sharded.js +++ b/jstests/sharding/moveprimary_ignore_sharded.js @@ -23,9 +23,9 @@ assert.writeOK(mongosA.getCollection("bar.coll2").insert({hello: "world"})); // Enable sharding printjson(adminA.runCommand({enableSharding: "foo"})); -st.ensurePrimaryShard('foo', 'shard0001'); +st.ensurePrimaryShard('foo', st.shard1.shardName); printjson(adminA.runCommand({enableSharding: "bar"})); -st.ensurePrimaryShard('bar', 'shard0000'); +st.ensurePrimaryShard('bar', st.shard0.shardName); // Setup three collections per-db // 0 : not sharded diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js index b7bbcb1f344..b3981cacb36 100644 --- a/jstests/sharding/mrShardedOutput.js +++ b/jstests/sharding/mrShardedOutput.js @@ -4,7 +4,8 @@ // collection input twice the size of the first and outputs it to the new sharded // collection created in the first pass. -var st = new ShardingTest({shards: 2, other: {chunkSize: 1}}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({shards: 2, other: {chunkSize: 1, shardAsReplicaSet: false}}); var config = st.getDB("config"); st.adminCommand({enablesharding: "test"}); diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js index 93164e5e128..3dc02da7072 100644 --- a/jstests/sharding/mrShardedOutputAuth.js +++ b/jstests/sharding/mrShardedOutputAuth.js @@ -36,8 +36,13 @@ assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded"); } - var st = new ShardingTest( - {name: "mrShardedOutputAuth", shards: 1, mongos: 1, other: {keyFile: 'jstests/libs/key1'}}); + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. + var st = new ShardingTest({ + name: "mrShardedOutputAuth", + shards: 1, + mongos: 1, + other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false} + }); // Setup the users to the input, output and admin databases var mongos = st.s; diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js index e2d1c6f7869..765ba02ae16 100644 --- a/jstests/sharding/mr_and_agg_versioning.js +++ b/jstests/sharding/mr_and_agg_versioning.js @@ -10,7 +10,7 @@ var numKeys = 1000; st.s.adminCommand({enableSharding: dbName}); - st.ensurePrimaryShard(dbName, 'shard0000'); + st.ensurePrimaryShard(dbName, st.shard0.shardName); st.s.adminCommand({shardCollection: collName, key: {key: 1}}); // Load chunk data to the stale mongoses before moving a chunk @@ -20,7 +20,7 @@ staleMongos2.getCollection(collName).find().itcount(); st.s.adminCommand({split: collName, middle: {key: numKeys / 2}}); - st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'}); + st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: st.shard1.shardName}); var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp(); for (var i = 0; i < numDocs; i++) { diff --git a/jstests/sharding/mr_noscripting.js b/jstests/sharding/mr_noscripting.js index 6bf196c587e..d5781e8fcea 100644 --- a/jstests/sharding/mr_noscripting.js +++ b/jstests/sharding/mr_noscripting.js @@ -3,7 +3,8 @@ var shardOpts = [ {} // just use default params ]; -var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1}}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1, shardAsReplicaSet: false}}); var mongos = st.s; st.shardColl('bar', {x: 1}); diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js index 360bb629471..aef63e6b684 100644 --- a/jstests/sharding/multi_mongos2.js +++ b/jstests/sharding/multi_mongos2.js @@ -5,7 +5,7 @@ var st = new ShardingTest({shards: 2, mongos: 2}); assert.commandWorked(st.s0.adminCommand({enablesharding: "test"})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}})); // Test queries diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js index e76dd547e85..f0e7a104e60 100644 --- a/jstests/sharding/multi_mongos2a.js +++ b/jstests/sharding/multi_mongos2a.js @@ -5,7 +5,7 @@ var st = new ShardingTest({shards: 2, mongos: 2}); assert.commandWorked(st.s0.adminCommand({enablesharding: "test"})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(st.s0.adminCommand({shardcollection: "test.foo", key: {num: 1}})); assert.writeOK(st.s0.getDB('test').existing.insert({_id: 1})); diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js index a95ee5924a0..910717b811f 100644 --- a/jstests/sharding/noUpdateButN1inAnotherCollection.js +++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js @@ -17,8 +17,8 @@ ns2 = "test.coll2"; adminSA = mongosA.getDB("admin"); adminSA.runCommand({enableSharding: "test"}); -adminSA.runCommand({moveprimary: "test", to: "shard0000"}); -adminSA.runCommand({moveprimary: "test2", to: "shard0001"}); +adminSA.runCommand({moveprimary: "test", to: "s.shard0.shardName"}); +adminSA.runCommand({moveprimary: "test2", to: "s.shard1.shardName"}); adminSA.runCommand({shardCollection: ns, key: {_id: 1}}); @@ -43,7 +43,7 @@ for (var i = 1; i < numDocs; i++) { debug("Inserted docs, now split chunks"); adminSA.runCommand({split: ns, find: {_id: 3}}); -adminSA.runCommand({movechunk: ns, find: {_id: 10}, to: "shard0001"}); +adminSA.runCommand({movechunk: ns, find: {_id: 10}, to: "s.shard1.shardName"}); var command = 'printjson(db.coll.update({ _id: 9 }, { $set: { a: "9" }}, true));'; diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js index e9bd4c1f3ac..8247736793b 100644 --- a/jstests/sharding/parallel.js +++ b/jstests/sharding/parallel.js @@ -6,7 +6,7 @@ var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); var db = s.getDB("test"); diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js index 71a7ef03090..d633669e28b 100644 --- a/jstests/sharding/prefix_shard_key.js +++ b/jstests/sharding/prefix_shard_key.js @@ -9,14 +9,15 @@ (function() { 'use strict'; - var s = new ShardingTest({shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var s = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}}); var db = s.getDB("test"); var admin = s.getDB("admin"); var config = s.getDB("config"); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); //******************Part 1******************** diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js index de9efe410a0..ec71924fc53 100644 --- a/jstests/sharding/presplit.js +++ b/jstests/sharding/presplit.js @@ -3,7 +3,7 @@ var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); // Insert enough data in 'test.foo' to fill several chunks, if it was sharded. bigString = ""; diff --git a/jstests/sharding/query_after_multi_write.js b/jstests/sharding/query_after_multi_write.js index a952484435c..4cfff22be4a 100644 --- a/jstests/sharding/query_after_multi_write.js +++ b/jstests/sharding/query_after_multi_write.js @@ -12,19 +12,19 @@ testDB.dropDatabase(); assert.commandWorked(testDB.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}})); assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}})); assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); var testDB2 = st.s1.getDB('test'); testDB2.user.insert({x: 123456}); // Move chunk to bump version on a different mongos. assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true})); // Issue a query and make sure it gets routed to the right shard. assert.neq(null, testDB2.user.findOne({x: 123456})); @@ -32,7 +32,7 @@ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets // incremented to 3 assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName, _waitForDelete: true})); // Issue a query and make sure it gets routed to the right shard again. assert.neq(null, testDB2.user.findOne({x: 123456})); @@ -40,7 +40,7 @@ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets // incremented to 4 assert.commandWorked(testDB.adminCommand( - {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true})); + {moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName, _waitForDelete: true})); // Ensure that write commands with multi version do not reset the connection shard version // to diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js index 27f6e1e4a75..d810c9380a0 100644 --- a/jstests/sharding/remove1.js +++ b/jstests/sharding/remove1.js @@ -5,7 +5,7 @@ var config = s.s0.getDB('config'); assert.commandWorked(s.s0.adminCommand({enableSharding: 'needToMove'})); - s.ensurePrimaryShard('needToMove', 'shard0000'); + s.ensurePrimaryShard('needToMove', s.shard0.shardName); // Returns an error when trying to remove a shard that doesn't exist. assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shardz"}), @@ -13,12 +13,12 @@ // First remove puts in draining mode, the second tells me a db needs to move, the third // actually removes - assert.commandWorked(s.s0.adminCommand({removeshard: "shard0000"})); + assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName})); // Can't have more than one draining shard at a time - assert.commandFailedWithCode(s.s0.adminCommand({removeshard: "shard0001"}), + assert.commandFailedWithCode(s.s0.adminCommand({removeshard: s.shard1.shardName}), ErrorCodes.ConflictingOperationInProgress); - assert.eq(s.s0.adminCommand({removeshard: "shard0000"}).dbsToMove, + assert.eq(s.s0.adminCommand({removeshard: s.shard0.shardName}).dbsToMove, ['needToMove'], "didn't show db to move"); @@ -28,7 +28,7 @@ // removed s.awaitBalancerRound(); - var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: "shard0000"})); + var removeResult = assert.commandWorked(s.s0.adminCommand({removeshard: s.shard0.shardName})); assert.eq('completed', removeResult.state, 'Shard was not removed: ' + tojson(removeResult)); var existingShards = config.shards.find({}).toArray(); @@ -36,7 +36,7 @@ existingShards.length, "Removed server still appears in count: " + tojson(existingShards)); - assert.commandFailed(s.s0.adminCommand({removeshard: "shard0001"})); + assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName})); // Should create a shard0002 shard var conn = MongoRunner.runMongod({}); diff --git a/jstests/sharding/remove3.js b/jstests/sharding/remove3.js index fdbaeb4d142..ab066f92f9d 100644 --- a/jstests/sharding/remove3.js +++ b/jstests/sharding/remove3.js @@ -5,7 +5,7 @@ var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2}); assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'})); - st.ensurePrimaryShard('TestDB', 'shard0000'); + st.ensurePrimaryShard('TestDB', st.shard0.shardName); assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}})); assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}})); @@ -14,25 +14,25 @@ st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'}); assert.commandWorked(st.s0.adminCommand( - {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true})); // Make sure both mongos instances know of the latest metadata assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length); assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length); - // Remove shard0001 + // Remove st.shard1.shardName var removeRes; - removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'})); + removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName})); assert.eq('started', removeRes.state); - removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'})); + removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName})); assert.eq('ongoing', removeRes.state); - // Move the one chunk off shard0001 + // Move the one chunk off st.shard1.shardName assert.commandWorked(st.s0.adminCommand( - {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0000', _waitForDelete: true})); + {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: st.shard0.shardName, _waitForDelete: true})); // Remove shard must succeed now - removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'})); + removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: st.shard1.shardName})); assert.eq('completed', removeRes.state); // Make sure both mongos instance refresh their metadata and do not reference the missing shard diff --git a/jstests/sharding/resume_change_stream.js b/jstests/sharding/resume_change_stream.js index 41a2426796a..e9a83adb561 100644 --- a/jstests/sharding/resume_change_stream.js +++ b/jstests/sharding/resume_change_stream.js @@ -34,7 +34,7 @@ assert.commandWorked(mongosDB.dropDatabase()); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -46,7 +46,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey] chunk to shard0001. + // Move the [0, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); @@ -143,7 +143,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 50}})); - // Move the [50, MaxKey] chunk to shard0001. + // Move the [50, MaxKey] chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {shardKey: 51}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js index c3e0f3f2e27..37ecb758653 100644 --- a/jstests/sharding/return_partial_shards_down.js +++ b/jstests/sharding/return_partial_shards_down.js @@ -5,7 +5,9 @@ // Checking UUID consistency involves talking to shards, but this test shuts down shards. TestData.skipCheckingUUIDsConsistentAcrossCluster = true; -var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}}}); +// TODO: SERVER-33444 remove shardAsReplicaSet: false +var st = new ShardingTest( + {shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}, shardAsReplicaSet: false}}); // Stop balancer, we're doing our own manual chunk distribution st.stopBalancer(); diff --git a/jstests/sharding/server_status.js b/jstests/sharding/server_status.js index ebc7fb6e42b..770300174b8 100644 --- a/jstests/sharding/server_status.js +++ b/jstests/sharding/server_status.js @@ -37,7 +37,7 @@ var mongosServerStatus = testDB.adminCommand({serverStatus: 1}); checkShardingServerStatus(mongosServerStatus); - var mongodServerStatus = st.d0.getDB('admin').runCommand({serverStatus: 1}); + var mongodServerStatus = st.rs0.getPrimary().getDB('admin').runCommand({serverStatus: 1}); checkShardingServerStatus(mongodServerStatus); st.stop(); diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js index de962489afe..a5dfd4aca1c 100644 --- a/jstests/sharding/shard1.js +++ b/jstests/sharding/shard1.js @@ -20,7 +20,7 @@ assert.commandFailed(s.s0.adminCommand(shardCommand)); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.eq(3, db.foo.find().length(), "after partitioning count failed"); diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js index a380e0b10c2..b0a2c0c72c8 100644 --- a/jstests/sharding/shard3.js +++ b/jstests/sharding/shard3.js @@ -8,7 +8,7 @@ db = s.getDB("test"); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); // Ensure that the second mongos will see the movePrimary @@ -163,7 +163,7 @@ // ---- retry commands SERVER-1471 ---- s.adminCommand({enablesharding: "test2"}); - s.ensurePrimaryShard('test2', 'shard0000'); + s.ensurePrimaryShard('test2', s.shard0.shardName); s.adminCommand({shardcollection: "test2.foo", key: {num: 1}}); dba = s.getDB("test2"); dbb = s2.getDB("test2"); diff --git a/jstests/sharding/shard4.js b/jstests/sharding/shard4.js index 76b9394cb19..b43bd702555 100644 --- a/jstests/sharding/shard4.js +++ b/jstests/sharding/shard4.js @@ -5,7 +5,7 @@ s = new ShardingTest({name: "shard4", shards: 2, mongos: 2}); s2 = s._mongos[1]; s.adminCommand({enablesharding: "test"}); -s.ensurePrimaryShard('test', 'shard0001'); +s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); if (s.configRS) { // Ensure that the second mongos will see the movePrimary diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js index c4f05d610cd..e165c8ae5d0 100644 --- a/jstests/sharding/shard5.js +++ b/jstests/sharding/shard5.js @@ -8,7 +8,7 @@ s.stopBalancer(); s2 = s._mongos[1]; s.adminCommand({enablesharding: "test"}); -s.ensurePrimaryShard('test', 'shard0001'); +s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {num: 1}}); if (s.configRS) { // Ensure that the second mongos will see the movePrimary diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js index 03c4b9523bf..5fa07ea1301 100644 --- a/jstests/sharding/shard6.js +++ b/jstests/sharding/shard6.js @@ -6,7 +6,7 @@ var s = new ShardingTest({name: "shard6", shards: 2}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.data", key: {num: 1}}); var version = s.getDB("admin").runCommand({buildinfo: 1}).versionArray; diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js index 0f7f7acc28b..5ec1801b7c4 100644 --- a/jstests/sharding/shard7.js +++ b/jstests/sharding/shard7.js @@ -8,7 +8,7 @@ c = db['foo']; c.drop(); s.adminCommand({enablesharding: '' + db}); -s.ensurePrimaryShard(db.getName(), 'shard0001'); +s.ensurePrimaryShard(db.getName(), s.shard1.shardName); s.adminCommand({shardcollection: '' + c, key: {a: 1, b: 1}}); // Check query operation with some satisfiable and unsatisfiable queries. diff --git a/jstests/sharding/shard_config_db_collections.js b/jstests/sharding/shard_config_db_collections.js index d666f3c8d43..3a2e3f0590e 100644 --- a/jstests/sharding/shard_config_db_collections.js +++ b/jstests/sharding/shard_config_db_collections.js @@ -21,7 +21,7 @@ assert.eq(0, config.databases.count({"_id": "config"})); // Test that you cannot set the primary shard for config (not even to 'config') - assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'shard0000'})); + assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName})); assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'})); st.stop(); @@ -79,10 +79,10 @@ // Insertion and retrieval assert.commandWorked(st.splitAt("config.sharded", {_id: 0})); - assert.commandWorked( - admin.runCommand({moveChunk: "config.sharded", find: {_id: -10}, to: "shard0000"})); - assert.commandWorked( - admin.runCommand({moveChunk: "config.sharded", find: {_id: 10}, to: "shard0001"})); + assert.commandWorked(admin.runCommand( + {moveChunk: "config.sharded", find: {_id: -10}, to: st.shard0.shardName})); + assert.commandWorked(admin.runCommand( + {moveChunk: "config.sharded", find: {_id: 10}, to: st.shard1.shardName})); assert.writeOK(config.sharded.insert({_id: -10})); assert.writeOK(config.sharded.insert({_id: 10})); @@ -135,10 +135,10 @@ assertNoChunksOnConfig(); // Test that we can move chunks between two non-config shards - assert.commandWorked( - admin.runCommand({moveChunk: "config.sharded", find: {_id: 40}, to: "shard0001"})); - assert.commandWorked( - admin.runCommand({moveChunk: "config.sharded", find: {_id: 40}, to: "shard0000"})); + assert.commandWorked(admin.runCommand( + {moveChunk: "config.sharded", find: {_id: 40}, to: st.shard1.shardName})); + assert.commandWorked(admin.runCommand( + {moveChunk: "config.sharded", find: {_id: 40}, to: st.shard0.shardName})); assertNoChunksOnConfig(); st.stop(); diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js index 754d2a8bea8..1c8415662a7 100644 --- a/jstests/sharding/shard_existing.js +++ b/jstests/sharding/shard_existing.js @@ -24,7 +24,7 @@ assert.lte(totalSize, dataSize); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}}); printjson(res); diff --git a/jstests/sharding/shard_key_immutable.js b/jstests/sharding/shard_key_immutable.js index bbc2a259e51..6e8c787393b 100644 --- a/jstests/sharding/shard_key_immutable.js +++ b/jstests/sharding/shard_key_immutable.js @@ -45,7 +45,7 @@ var st = new ShardingTest({shards: 2}); st.adminCommand({enablesharding: "test"}); -st.ensurePrimaryShard('test', 'shard0001'); +st.ensurePrimaryShard('test', st.shard1.shardName); st.adminCommand({shardcollection: "test.col0", key: {a: 1, b: 1}}); st.adminCommand({shardcollection: "test.col1", key: {'x.a': 1}}); diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js index ae4c1d58574..78ac1c3fb6f 100644 --- a/jstests/sharding/shard_keycount.js +++ b/jstests/sharding/shard_keycount.js @@ -16,7 +16,7 @@ // Enable sharding on DB assert.commandWorked(s.s0.adminCommand({enablesharding: dbName})); - s.ensurePrimaryShard(dbName, 'shard0001'); + s.ensurePrimaryShard(dbName, s.shard1.shardName); // Enable sharding on collection assert.commandWorked(s.s0.adminCommand({shardcollection: ns, key: {_id: 1}})); diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js index aba43f8e2e7..b7386f63f88 100644 --- a/jstests/sharding/shard_kill_and_pooling.js +++ b/jstests/sharding/shard_kill_and_pooling.js @@ -53,7 +53,7 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; var exitCode = killWith === 9 ? MongoRunner.EXIT_SIGKILL : MongoRunner.EXIT_CLEAN; - MongoRunner.stopMongod(st.shard0, killWith, {allowedExitCode: exitCode}); + st.rs0.stopSet(killWith, false, {allowedExitCode: exitCode}); jsTest.log("Restart shard..."); diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js index f1d52768f3c..62d0f3fa88e 100644 --- a/jstests/sharding/shard_targeting.js +++ b/jstests/sharding/shard_targeting.js @@ -8,7 +8,7 @@ var s = new ShardingTest({shards: 2}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var db = s.getDB("test"); var res; diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js index adb632dd09e..75f0ea19bb5 100644 --- a/jstests/sharding/shard_with_special_db_names.js +++ b/jstests/sharding/shard_with_special_db_names.js @@ -6,12 +6,12 @@ var specialNS = specialDB + ".special"; assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.data", key: {num: 1}})); // Test that the database will not complain "cannot have 2 database names that differs on case" assert.commandWorked(s.s0.adminCommand({enablesharding: specialDB})); - s.ensurePrimaryShard(specialDB, 'shard0000'); + s.ensurePrimaryShard(specialDB, s.shard0.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: specialNS, key: {num: 1}})); var exists = s.getDB("config").collections.find({_id: specialNS}).itcount(); diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js index 997d9b61acd..7d47db46e7d 100644 --- a/jstests/sharding/sharded_limit_batchsize.js +++ b/jstests/sharding/sharded_limit_batchsize.js @@ -91,11 +91,11 @@ // Enable sharding and pre-split the sharded collection. assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); - st.ensurePrimaryShard(db.getName(), "shard0000"); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}}); assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}})); - assert.commandWorked( - db.adminCommand({moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: "shard0001"})); + assert.commandWorked(db.adminCommand( + {moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: st.shard1.shardName})); // Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection. // Write 20 documents which all go to the primary shard in the unsharded collection. diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js index 4213b47d6d7..413a7194c22 100644 --- a/jstests/sharding/sharding_balance1.js +++ b/jstests/sharding/sharding_balance1.js @@ -4,7 +4,7 @@ var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var db = s.getDB("test"); @@ -28,12 +28,13 @@ function diff1() { var x = s.chunkCounts("foo"); printjson(x); - return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001); + return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) - + Math.min(x[s.shard0.shardName], x[s.shard1.shardName]); } function sum() { var x = s.chunkCounts("foo"); - return x.shard0000 + x.shard0001; + return x[s.shard0.shardName] + x[s.shard1.shardName]; } assert.lt(20, diff1(), "big differential here"); diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js index e72bcdba955..b2befe5dd22 100644 --- a/jstests/sharding/sharding_balance2.js +++ b/jstests/sharding/sharding_balance2.js @@ -64,7 +64,7 @@ s.awaitBalancerRound(); var chunkCounts = s.chunkCounts('foo', 'test'); - assert.eq(0, chunkCounts.shard0001); + assert.eq(0, chunkCounts[s.rs1.name]); s.stop(); })(); diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js index fa4ad9e96d1..155403e0b7c 100644 --- a/jstests/sharding/sharding_balance3.js +++ b/jstests/sharding/sharding_balance3.js @@ -10,7 +10,7 @@ }); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.config.settings.find().forEach(printjson); @@ -35,7 +35,8 @@ function diff1() { var x = s.chunkCounts("foo"); printjson(x); - return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001); + return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) - + Math.min(x[s.shard0.shardName], x[s.shard1.shardName]); } assert.lt(10, diff1()); diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js index e5c6e1056b5..75a30b62b9a 100644 --- a/jstests/sharding/sharding_balance4.js +++ b/jstests/sharding/sharding_balance4.js @@ -146,7 +146,8 @@ if (Math.random() > .999) printjson(x); - return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001); + return Math.max(x[s.shard0.shardName], x[s.shard1.shardName]) - + Math.min(x[s.shard0.shardName], x[s.shard1.shardName]); } assert.lt(20, diff1(), "initial load"); diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js index 71858ef833d..fe90a05ec46 100644 --- a/jstests/sharding/sharding_migrate_cursor1.js +++ b/jstests/sharding/sharding_migrate_cursor1.js @@ -15,7 +15,7 @@ s.adminCommand({enablesharding: "test"}); db = s.getDB("test"); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); t = db.foo; bigString = ""; diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js index 851a1d06582..9d0886105dc 100644 --- a/jstests/sharding/sharding_system_namespaces.js +++ b/jstests/sharding/sharding_system_namespaces.js @@ -39,7 +39,7 @@ if (Array.contains(storageEngines, "wiredTiger")) { checkCollectionOptions(db); assert.commandWorked(db.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(db.adminCommand({shardCollection: coll + '', key: {x: 1}})); coll.insert({x: 0}); diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js index 26491d46ded..dc423ac14d7 100644 --- a/jstests/sharding/sort1.js +++ b/jstests/sharding/sort1.js @@ -4,7 +4,7 @@ var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}}); var db = s.getDB("test"); diff --git a/jstests/sharding/split_against_shard_with_invalid_split_points.js b/jstests/sharding/split_against_shard_with_invalid_split_points.js index 5add17c87a8..8817e524daf 100644 --- a/jstests/sharding/split_against_shard_with_invalid_split_points.js +++ b/jstests/sharding/split_against_shard_with_invalid_split_points.js @@ -28,8 +28,9 @@ }); }; - assert.commandFailedWithCode(callSplit(st.d0.getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]), - ErrorCodes.InvalidOptions); + assert.commandFailedWithCode( + callSplit(st.rs0.getPrimary().getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]), + ErrorCodes.InvalidOptions); var chunksAfter = st.s.getDB('config').chunks.find().toArray(); assert.eq(chunksBefore, diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js index 969e915aaea..cba03476cb0 100644 --- a/jstests/sharding/ssv_config_check.js +++ b/jstests/sharding/ssv_config_check.js @@ -13,7 +13,7 @@ testDB.user.insert({x: 1}); - var directConn = new Mongo(st.d0.host); + var directConn = new Mongo(st.rs0.getPrimary().host); var adminDB = directConn.getDB('admin'); var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer; diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js index cef9ec46327..72ff9238bfc 100644 --- a/jstests/sharding/startup_with_all_configs_down.js +++ b/jstests/sharding/startup_with_all_configs_down.js @@ -23,7 +23,8 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; MongoRunner.runMongod(shard); } - var st = new ShardingTest({shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest({shards: 2, other: {shardAsReplicaSet: false}}); jsTestLog("Setting up initial data"); @@ -32,12 +33,12 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; } assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.foo', key: {_id: 1}})); assert.commandWorked(st.s0.adminCommand({split: 'test.foo', find: {_id: 50}})); assert.commandWorked( - st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: 'shard0001'})); + st.s0.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: st.shard1.shardName})); // Make sure the pre-existing mongos already has the routing information loaded into memory assert.eq(100, st.s.getDB('test').foo.find().itcount()); diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js index 3f3a1c75439..991bf3840d1 100644 --- a/jstests/sharding/stats.js +++ b/jstests/sharding/stats.js @@ -5,7 +5,7 @@ s.adminCommand({enablesharding: "test"}); db = s.getDB("test"); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); function numKeys(o) { var num = 0; @@ -52,14 +52,22 @@ assert.eq(db.foo.count(), x.count, "coll total count match"); assert.eq(2, x.nchunks, "coll chunk num"); assert.eq(2, numKeys(x.shards), "coll shard num"); - assert.eq(N / 2, x.shards.shard0000.count, "coll count on shard0000 expected"); - assert.eq(N / 2, x.shards.shard0001.count, "coll count on shard0001 expected"); - assert.eq(a.foo.count(), x.shards.shard0000.count, "coll count on shard0000 match"); - assert.eq(b.foo.count(), x.shards.shard0001.count, "coll count on shard0001 match"); - assert(!x.shards.shard0000.indexDetails, - 'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000)); - assert(!x.shards.shard0001.indexDetails, - 'indexDetails should not be present in shard0001: ' + tojson(x.shards.shard0001)); + assert.eq( + N / 2, x.shards[s.shard0.shardName].count, "coll count on s.shard0.shardName expected"); + assert.eq( + N / 2, x.shards[s.shard1.shardName].count, "coll count on s.shard1.shardName expected"); + assert.eq(a.foo.count(), + x.shards[s.shard0.shardName].count, + "coll count on s.shard0.shardName match"); + assert.eq(b.foo.count(), + x.shards[s.shard1.shardName].count, + "coll count on s.shard1.shardName match"); + assert(!x.shards[s.shard0.shardName].indexDetails, + 'indexDetails should not be present in s.shard0.shardName: ' + + tojson(x.shards[s.shard0.shardName])); + assert(!x.shards[s.shard1.shardName].indexDetails, + 'indexDetails should not be present in s.shard1.shardName: ' + + tojson(x.shards[s.shard1.shardName])); a_extras = a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes @@ -72,10 +80,16 @@ assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected"); assert.eq(2, numKeys(x.raw), "db shard num"); - assert.eq((N / 2) + a_extras, x.raw[s.shard0.name].objects, "db count on shard0000 expected"); - assert.eq((N / 2) + b_extras, x.raw[s.shard1.name].objects, "db count on shard0001 expected"); - assert.eq(a.stats().objects, x.raw[s.shard0.name].objects, "db count on shard0000 match"); - assert.eq(b.stats().objects, x.raw[s.shard1.name].objects, "db count on shard0001 match"); + assert.eq((N / 2) + a_extras, + x.raw[s.shard0.name].objects, + "db count on s.shard0.shardName expected"); + assert.eq((N / 2) + b_extras, + x.raw[s.shard1.name].objects, + "db count on s.shard1.shardName expected"); + assert.eq( + a.stats().objects, x.raw[s.shard0.name].objects, "db count on s.shard0.shardName match"); + assert.eq( + b.stats().objects, x.raw[s.shard1.name].objects, "db count on s.shard1.shardName match"); /* Test db.stat() and db.collection.stat() scaling */ diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js index ddee6706544..ac80c057d85 100644 --- a/jstests/sharding/tag_auto_split.js +++ b/jstests/sharding/tag_auto_split.js @@ -5,13 +5,13 @@ var s = new ShardingTest({shards: 2, mongos: 1}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}})); assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount()); - s.addShardTag("shard0000", "a"); - s.addShardTag("shard0000", "b"); + s.addShardTag(s.shard0.shardName, "a"); + s.addShardTag(s.shard0.shardName, "b"); s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a"); s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b"); diff --git a/jstests/sharding/tag_auto_split_partial_key.js b/jstests/sharding/tag_auto_split_partial_key.js index 51ddd69cc78..35f1c6c7b65 100644 --- a/jstests/sharding/tag_auto_split_partial_key.js +++ b/jstests/sharding/tag_auto_split_partial_key.js @@ -5,13 +5,13 @@ var s = new ShardingTest({shards: 2, mongos: 1}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}})); assert.eq(1, s.config.chunks.find({"ns": "test.foo"}).itcount()); - s.addShardTag("shard0000", "a"); - s.addShardTag("shard0000", "b"); + s.addShardTag(s.shard0.shardName, "a"); + s.addShardTag(s.shard0.shardName, "b"); s.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a"); s.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b"); diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js index fba599e896b..3cb99e6ab9f 100644 --- a/jstests/sharding/tag_range.js +++ b/jstests/sharding/tag_range.js @@ -5,7 +5,7 @@ const st = new ShardingTest({shards: 2, mongos: 1}); assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.tag_range', key: {_id: 1}})); function countTags(num, message) { @@ -14,9 +14,9 @@ assert.eq(1, st.config.chunks.count({"ns": "test.tag_range"})); - st.addShardTag('shard0000', 'a'); - st.addShardTag('shard0000', 'b'); - st.addShardTag('shard0000', 'c'); + st.addShardTag(st.shard0.shardName, 'a'); + st.addShardTag(st.shard0.shardName, 'b'); + st.addShardTag(st.shard0.shardName, 'c'); // add two ranges, verify the additions diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js index 510ab82874e..5ed4dd9866b 100644 --- a/jstests/sharding/time_zone_info_mongos.js +++ b/jstests/sharding/time_zone_info_mongos.js @@ -30,7 +30,7 @@ assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail"); assert.neq(-1, rawMongoProgramOutput().indexOf("Failed global initialization")); - // Enable sharding on the test DB and ensure its primary is shard0000. + // Enable sharding on the test DB and ensure its primary is st.shard0.shardName. assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL()); @@ -42,7 +42,7 @@ assert.commandWorked( mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}})); - // Move the [0, MaxKey) chunk to shard0001. + // Move the [0, MaxKey) chunk to st.shard1.shardName. assert.commandWorked(mongosDB.adminCommand( {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()})); diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js index f3f7486d098..560bc21d071 100644 --- a/jstests/sharding/top_chunk_autosplit.js +++ b/jstests/sharding/top_chunk_autosplit.js @@ -126,6 +126,16 @@ var highChunkInserts = {value: 1000, inc: 1}; var lowChunk = 1; var highChunk = -1; +// Main +var dbName = "TopChunkDB"; +var collName = "coll"; + +var st = shardSetup( + {name: "topchunk", shards: 4, chunkSize: 1, other: {enableAutoSplit: true}}, dbName, collName); +var db = st.getDB(dbName); +var coll = db[collName]; +var configDB = st.s.getDB('config'); + // Test objects: // name - name of test // lowOrHigh - 1 for low top chunk, -1 for high top chunk @@ -146,12 +156,12 @@ var tests = [ // Test auto-split on the "low" top chunk to another tagged shard name: "low top chunk with tag move", lowOrHigh: lowChunk, - movedToShard: "shard0002", + movedToShard: st.rs2.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]}, - {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]}, - {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]}, - {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}, + {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]}, + {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}, ], tagRanges: [ {range: lowChunkTagRange, tag: "NYC"}, @@ -165,12 +175,12 @@ var tests = [ // Test auto-split on the "low" top chunk to same tagged shard name: "low top chunk with tag no move", lowOrHigh: lowChunk, - movedToShard: "shard0000", + movedToShard: st.rs0.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]}, - {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]}, - {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]}, - {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}, + {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]}, + {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]}, ], tagRanges: [ {range: lowChunkTagRange, tag: "NYC"}, @@ -184,12 +194,12 @@ var tests = [ // Test auto-split on the "low" top chunk to another shard name: "low top chunk no tag move", lowOrHigh: lowChunk, - movedToShard: "shard0003", + movedToShard: st.rs3.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 20}, - {name: "shard0001", range: midChunkRange1, chunks: 20}, - {name: "shard0002", range: highChunkRange, chunks: 5}, - {name: "shard0003", range: midChunkRange2, chunks: 1} + {name: st.rs0.name, range: lowChunkRange, chunks: 20}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20}, + {name: st.rs2.name, range: highChunkRange, chunks: 5}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1} ], inserts: lowChunkInserts }, @@ -197,12 +207,12 @@ var tests = [ // Test auto-split on the "high" top chunk to another tagged shard name: "high top chunk with tag move", lowOrHigh: highChunk, - movedToShard: "shard0000", + movedToShard: st.rs0.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]}, - {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]}, - {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]}, - {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]} + {name: st.rs0.name, range: lowChunkRange, chunks: 5, tags: ["NYC"]}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]}, + {name: st.rs2.name, range: highChunkRange, chunks: 20, tags: ["NYC"]}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]} ], tagRanges: [ {range: lowChunkTagRange, tag: "NYC"}, @@ -216,12 +226,12 @@ var tests = [ // Test auto-split on the "high" top chunk to another shard name: "high top chunk no tag move", lowOrHigh: highChunk, - movedToShard: "shard0003", + movedToShard: st.rs3.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 5}, - {name: "shard0001", range: midChunkRange1, chunks: 20}, - {name: "shard0002", range: highChunkRange, chunks: 20}, - {name: "shard0003", range: midChunkRange2, chunks: 1} + {name: st.rs0.name, range: lowChunkRange, chunks: 5}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20}, + {name: st.rs2.name, range: highChunkRange, chunks: 20}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1} ], inserts: highChunkInserts }, @@ -229,12 +239,12 @@ var tests = [ // Test auto-split on the "high" top chunk to same tagged shard name: "high top chunk with tag no move", lowOrHigh: highChunk, - movedToShard: "shard0002", + movedToShard: st.rs2.name, shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]}, - {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]}, - {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]}, - {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]} + {name: st.rs0.name, range: lowChunkRange, chunks: 20, tags: ["NYC"]}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20, tags: ["SF"]}, + {name: st.rs2.name, range: highChunkRange, chunks: 5, tags: ["NYC"]}, + {name: st.rs3.name, range: midChunkRange2, chunks: 1, tags: ["SF"]} ], tagRanges: [ {range: lowChunkTagRange, tag: "NYC"}, @@ -248,73 +258,19 @@ var tests = [ // Test auto-split on the "high" top chunk to same shard name: "high top chunk no tag no move", lowOrHigh: highChunk, - movedToShard: "shard0002", - shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 20}, - {name: "shard0001", range: midChunkRange1, chunks: 20}, - {name: "shard0002", range: highChunkRange, chunks: 1}, - {name: "shard0003", range: midChunkRange2, chunks: 5} - ], - inserts: highChunkInserts - }, -]; - -var singleNodeTests = [ - { - // Test auto-split on the "low" top chunk on single node shard - name: "single node shard - low top chunk", - lowOrHigh: lowChunk, - movedToShard: "shard0000", - shards: [{name: "shard0000", range: lowChunkRange, chunks: 2}], - inserts: lowChunkInserts - }, - { - // Test auto-split on the "high" top chunk on single node shard - name: "single node shard - high top chunk", - lowOrHigh: highChunk, - movedToShard: "shard0000", - shards: [{name: "shard0000", range: highChunkRange, chunks: 2}], - inserts: highChunkInserts - }, -]; - -var maxSizeTests = [ - { - // Test auto-split on the "low" top chunk with maxSize on destination shard - name: "maxSize - low top chunk", - lowOrHigh: lowChunk, - movedToShard: "shard0000", - shards: [ - {name: "shard0000", range: lowChunkRange, chunks: 10}, - {name: "shard0001", range: highChunkRange, chunks: 1} - ], - inserts: lowChunkInserts - }, - { - // Test auto-split on the "high" top chunk with maxSize on destination shard - name: "maxSize - high top chunk", - lowOrHigh: highChunk, - movedToShard: "shard0000", + movedToShard: st.rs2.name, shards: [ - {name: "shard0000", range: highChunkRange, chunks: 10}, - {name: "shard0001", range: lowChunkRange, chunks: 1} + {name: st.rs0.name, range: lowChunkRange, chunks: 20}, + {name: st.rs1.name, range: midChunkRange1, chunks: 20}, + {name: st.rs2.name, range: highChunkRange, chunks: 1}, + {name: st.rs3.name, range: midChunkRange2, chunks: 5} ], inserts: highChunkInserts }, ]; -// Main -var dbName = "TopChunkDB"; -var collName = "coll"; - -var st = shardSetup( - {name: "topchunk", shards: 4, chunkSize: 1, other: {enableAutoSplit: true}}, dbName, collName); -var db = st.getDB(dbName); -var coll = db[collName]; -var configDB = st.s.getDB('config'); - assert.commandWorked(db.adminCommand({enableSharding: dbName})); -st.ensurePrimaryShard(dbName, 'shard0000'); +st.ensurePrimaryShard(dbName, st.rs0.name); // Execute all test objects for (var i = 0; i < tests.length; i++) { @@ -332,7 +288,26 @@ coll = db[collName]; configDB = st.s.getDB('config'); assert.commandWorked(db.adminCommand({enableSharding: dbName})); -st.ensurePrimaryShard(dbName, 'shard0000'); +st.ensurePrimaryShard(dbName, st.rs0.name); + +var singleNodeTests = [ + { + // Test auto-split on the "low" top chunk on single node shard + name: "single node shard - low top chunk", + lowOrHigh: lowChunk, + movedToShard: st.rs0.name, + shards: [{name: st.rs0.name, range: lowChunkRange, chunks: 2}], + inserts: lowChunkInserts + }, + { + // Test auto-split on the "high" top chunk on single node shard + name: "single node shard - high top chunk", + lowOrHigh: highChunk, + movedToShard: st.rs0.name, + shards: [{name: st.rs0.name, range: highChunkRange, chunks: 2}], + inserts: highChunkInserts + }, +]; // Execute all test objects for (var i = 0; i < singleNodeTests.length; i++) { @@ -355,16 +330,43 @@ db = st.getDB(dbName); coll = db[collName]; configDB = st.s.getDB('config'); -// maxSize on shard0000 - 5MB, on shard0001 - 1MB +var maxSizeTests = [ + { + // Test auto-split on the "low" top chunk with maxSize on destination shard + name: "maxSize - low top chunk", + lowOrHigh: lowChunk, + movedToShard: st.rs0.name, + shards: [ + {name: st.rs0.name, range: lowChunkRange, chunks: 10}, + {name: st.rs1.name, range: highChunkRange, chunks: 1} + ], + inserts: lowChunkInserts + }, + { + // Test auto-split on the "high" top chunk with maxSize on destination shard + name: "maxSize - high top chunk", + lowOrHigh: highChunk, + movedToShard: st.rs0.name, + shards: [ + {name: st.rs0.name, range: highChunkRange, chunks: 10}, + {name: st.rs1.name, range: lowChunkRange, chunks: 1} + ], + inserts: highChunkInserts + }, +]; + +// maxSize on st.rs0.name - 5MB, on st.rs1.name - 1MB assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[0], maxSize: 5})); assert.commandWorked(db.adminCommand({addshard: st.getConnNames()[1], maxSize: 1})); // SERVER-17070 Auto split moves to shard node running WiredTiger, if exceeding maxSize var unsupported = ["wiredTiger", "rocksdb", "inMemory", "ephemeralForTest"]; -if (unsupported.indexOf(st.d0.adminCommand({serverStatus: 1}).storageEngine.name) == -1 && - unsupported.indexOf(st.d1.adminCommand({serverStatus: 1}).storageEngine.name) == -1) { +if (unsupported.indexOf(st.rs0.getPrimary().adminCommand({serverStatus: 1}).storageEngine.name) == + -1 && + unsupported.indexOf(st.rs1.getPrimary().adminCommand({serverStatus: 1}).storageEngine.name) == + -1) { assert.commandWorked(db.adminCommand({enableSharding: dbName})); - st.ensurePrimaryShard(dbName, 'shard0000'); + st.ensurePrimaryShard(dbName, st.rs0.name); // Execute all test objects for (var i = 0; i < maxSizeTests.length; i++) { diff --git a/jstests/sharding/top_chunk_split.js b/jstests/sharding/top_chunk_split.js index c792ed81df3..5aeeb14ddfd 100644 --- a/jstests/sharding/top_chunk_split.js +++ b/jstests/sharding/top_chunk_split.js @@ -18,7 +18,7 @@ var shardVersion = [res.version, res.versionEpoch]; return db.runCommand({ splitChunk: 'test.user', - from: 'shard0000', + from: st.shard0.shardName, min: minKey, max: maxKey, keyPattern: {x: 1}, @@ -137,7 +137,7 @@ } // run test - test(st.d0.getDB('admin')); + test(st.rs0.getPrimary().getDB('admin')); // teardown testDB.user.drop(); diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js index 2c12ef4b0cf..54a9ad8ce27 100644 --- a/jstests/sharding/unowned_doc_filtering.js +++ b/jstests/sharding/unowned_doc_filtering.js @@ -6,6 +6,10 @@ * A restarted standalone will lose all data when using an ephemeral storage engine. * @tags: [requires_persistence] */ + +// This test shuts down shards. +TestData.skipCheckingUUIDsConsistentAcrossCluster = true; + (function() { "use strict"; @@ -14,7 +18,7 @@ var testDB = st.s.getDB('test'); assert.commandWorked(testDB.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0000'); + st.ensurePrimaryShard('test', st.shard0.shardName); assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}})); var inserts = []; @@ -25,14 +29,14 @@ assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}})); assert.commandWorked( - testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: 'shard0001'})); + testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: st.shard1.shardName})); // Insert some documents directly into the shards into chunks not owned by that shard. - st.d0.getDB('test').foo.insert({x: 100}); - st.d1.getDB('test').foo.insert({x: 0}); + st.rs0.getPrimary().getDB('test').foo.insert({x: 100}); + st.rs1.getPrimary().getDB('test').foo.insert({x: 0}); - st.restartMongod(0); - st.restartMongod(1); + st.rs0.restart(0); + st.rs1.restart(1); var fooCount; for (var retries = 0; retries <= 2; retries++) { diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js index ba936808fdd..b2a422a98cb 100644 --- a/jstests/sharding/update_immutable_fields.js +++ b/jstests/sharding/update_immutable_fields.js @@ -10,7 +10,7 @@ var shard0 = st.shard0; assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard0.shardName); assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}})); var getDirectShardedConn = function(st, collName) { @@ -27,7 +27,7 @@ authoritative: true, configdb: configConnStr, version: maxChunk.lastmod, - shard: 'shard0000', + shard: st.shard0.shardName, versionEpoch: maxChunk.lastmodEpoch }; diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js index 3b30d6936a9..d08d498c613 100644 --- a/jstests/sharding/update_sharded.js +++ b/jstests/sharding/update_sharded.js @@ -5,7 +5,7 @@ var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1}); s.adminCommand({enablesharding: "test"}); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); // repeat same tests with hashed shard key, to ensure identical behavior s.adminCommand({shardcollection: "test.update0", key: {key: 1}}); diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js index 9ee8f72d1bc..5d3dde7a5cd 100644 --- a/jstests/sharding/upsert_sharded.js +++ b/jstests/sharding/upsert_sharded.js @@ -12,7 +12,7 @@ var coll = mongos.getCollection("foo.bar"); assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok); - st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001'); + st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); var upsertedResult = function(query, expr) { coll.remove({}); diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js index caf5280d185..d9c718315fc 100644 --- a/jstests/sharding/user_flags_sharded.js +++ b/jstests/sharding/user_flags_sharded.js @@ -47,7 +47,8 @@ // other shard to create the collection on that shard s.adminCommand({enablesharding: dbname}); s.adminCommand({shardcollection: ns, key: {_id: 1}}); - s.adminCommand({moveChunk: ns, find: {_id: 1}, to: "shard0000", _waitForDelete: true}); + s.adminCommand( + {moveChunk: ns, find: {_id: 1}, to: "s.shard0.shardName", _waitForDelete: true}); print("*************** Collection Stats On Other Shard ************"); var shard2 = s._connections[0].getDB(dbname); diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js index 0df7ae3cb86..c8a361f72ed 100644 --- a/jstests/sharding/version1.js +++ b/jstests/sharding/version1.js @@ -39,7 +39,7 @@ configdb: s._configDB, version: new Timestamp(2, 0), authoritative: true, - shard: "shard0000", + shard: "s.shard0.shardName", shardHost: s.s.host }), "should have failed because version is config is 1|0"); @@ -51,7 +51,7 @@ version: new Timestamp(1, 0), versionEpoch: epoch, authoritative: true, - shard: "shard0000", + shard: s.shard0.shardName, shardHost: s.s.host }), "should have worked"); diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js index d0719457cbe..89b919f7ce4 100644 --- a/jstests/sharding/version2.js +++ b/jstests/sharding/version2.js @@ -22,7 +22,7 @@ authoritative: true, version: new Timestamp(1, 0), versionEpoch: fooEpoch, - shard: "shard0000", + shard: s.shard0.shardName, shardHost: s.s.host, })); @@ -34,7 +34,7 @@ 1); // From a different client - var a2 = connect(`mongodb://${s._connections[0].name}/admin`); + var a2 = connect(`mongodb://${s.rs0.getPrimary().name}/admin`); assert.eq( a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t, @@ -54,7 +54,7 @@ configdb: s._configDB, version: new Timestamp(1, 0), versionEpoch: barEpoch, - shard: 'shard0000', + shard: s.shard0.shardName, authoritative: true }), "setShardVersion bar temp"); diff --git a/jstests/sharding/views.js b/jstests/sharding/views.js index 4f066eaf72e..27325338025 100644 --- a/jstests/sharding/views.js +++ b/jstests/sharding/views.js @@ -45,12 +45,12 @@ let coll = db.getCollection("coll"); assert.commandWorked(config.adminCommand({enableSharding: db.getName()})); - st.ensurePrimaryShard(db.getName(), "shard0000"); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); assert.commandWorked(config.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}})); assert.commandWorked(mongos.adminCommand({split: coll.getFullName(), middle: {a: 6}})); assert.commandWorked( - db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: "shard0001"})); + db.adminCommand({moveChunk: coll.getFullName(), find: {a: 25}, to: st.shard1.shardName})); for (let i = 0; i < 10; ++i) { assert.writeOK(coll.insert({a: i})); diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js index bdc4c235f71..479a3c1f687 100644 --- a/jstests/sharding/write_commands_sharding_state.js +++ b/jstests/sharding/write_commands_sharding_state.js @@ -5,13 +5,15 @@ (function() { 'use strict'; - var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2}); + // TODO: SERVER-33444 remove shardAsReplicaSet: false + var st = new ShardingTest( + {name: "write_commands", mongos: 2, shards: 2, other: {shardAsReplicaSet: false}}); var dbTestName = 'WriteCommandsTestDB'; var collName = dbTestName + '.TestColl'; assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName})); - st.ensurePrimaryShard(dbTestName, 'shard0000'); + st.ensurePrimaryShard(dbTestName, st.shard0.shardName); assert.commandWorked( st.s0.adminCommand({shardCollection: collName, key: {Key: 1}, unique: true})); @@ -22,11 +24,11 @@ printjson(st.config.getSiblingDB('config').chunks.find().toArray()); - // Move 10 and 20 to shard00001 + // Move 10 and 20 to st.shard0.shardName1 assert.commandWorked(st.s0.adminCommand( - {moveChunk: collName, find: {Key: 19}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: collName, find: {Key: 19}, to: st.shard1.shardName, _waitForDelete: true})); assert.commandWorked(st.s0.adminCommand( - {moveChunk: collName, find: {Key: 21}, to: 'shard0001', _waitForDelete: true})); + {moveChunk: collName, find: {Key: 21}, to: st.shard1.shardName, _waitForDelete: true})); printjson(st.config.getSiblingDB('config').chunks.find().toArray()); @@ -46,9 +48,9 @@ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 11}).count()); assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count()); - // Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed + // Move chunk [0, 19] to st.shard0.shardName and make sure the documents are correctly placed assert.commandWorked(st.s0.adminCommand( - {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: 'shard0000'})); + {moveChunk: collName, find: {Key: 19}, _waitForDelete: true, to: st.shard0.shardName})); printjson(st.config.getSiblingDB('config').chunks.find().toArray()); printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray()); @@ -61,7 +63,8 @@ // Now that both mongod shards are restarted, they don't know yet that they are part of a // sharded // cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and - // doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at + // doesn't know that chunk with key 19 has moved to st.shard0.shardName so it will send it to + // st.shard1.shardName at // first. // // Shard0001 would only send back a stale config exception if it receives a setShardVersion diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js index 1037c1e7b75..1b29f50c459 100644 --- a/jstests/sharding/zero_shard_version.js +++ b/jstests/sharding/zero_shard_version.js @@ -9,7 +9,7 @@ var testDB_s0 = st.s.getDB('test'); assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'})); - st.ensurePrimaryShard('test', 'shard0001'); + st.ensurePrimaryShard('test', st.shard1.shardName); assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}})); var checkShardMajorVersion = function(conn, expectedVersion) { @@ -27,7 +27,7 @@ var testDB_s1 = st.s1.getDB('test'); assert.writeOK(testDB_s1.user.insert({x: 1})); assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName})); st.configRS.awaitLastOpCommitted(); @@ -40,15 +40,15 @@ // shard1: 0|0|a // mongos0: 1|0|a - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 0); - // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to + // mongos0 still thinks that { x: 1 } belong to st.shard1.shardName, but should be able to // refresh it's metadata correctly. assert.neq(null, testDB_s0.user.findOne({x: 1})); - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 2); + checkShardMajorVersion(st.rs1.getPrimary(), 0); // Set mongos2 & mongos3 to version 2|0|a var testDB_s2 = st.s2.getDB('test'); @@ -68,15 +68,15 @@ // shard1: 0|0|0 // mongos0: 2|0|a - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 0); - // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped, + // mongos0 still thinks { x: 10 } belong to st.shard0.shardName, but since coll is dropped, // query should be routed to primary shard. assert.neq(null, testDB_s0.user.findOne({x: 10})); - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 0); /////////////////////////////////////////////////////// // Test 2 shards with 1 chunk @@ -92,7 +92,7 @@ testDB_s1.user.insert({x: 1}); testDB_s1.user.insert({x: -11}); assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'})); + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: st.shard0.shardName})); st.configRS.awaitLastOpCommitted(); @@ -106,21 +106,21 @@ // // mongos2: 2|0|a - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 2); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 2); - // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to + // mongos2 still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to // refresh it's metadata correctly. assert.neq(null, testDB_s2.user.findOne({x: 1})); - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 2); + checkShardMajorVersion(st.rs0.getPrimary(), 2); + checkShardMajorVersion(st.rs1.getPrimary(), 2); // Set shard metadata to 2|0|b assert.neq(null, testDB_s2.user.findOne({x: -11})); - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 2); + checkShardMajorVersion(st.rs0.getPrimary(), 2); + checkShardMajorVersion(st.rs1.getPrimary(), 2); // Official config: // shard0: 2|0|b, [-inf, 0) @@ -132,7 +132,7 @@ // // mongos3: 2|0|a - // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to + // 4th mongos still thinks that { x: 1 } belong to st.shard0.shardName, but should be able to // refresh it's metadata correctly. assert.neq(null, testDB_s3.user.findOne({x: 1})); @@ -143,8 +143,8 @@ // Set mongos0 to version 0|0|0 testDB_s0.user.drop(); - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 0); assert.eq(null, testDB_s0.user.findOne({x: 1})); @@ -155,7 +155,7 @@ testDB_s1.user.insert({x: 1}); assert.commandWorked( - testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'})); + testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard0.shardName})); st.configRS.awaitLastOpCommitted(); @@ -169,14 +169,14 @@ // // mongos0: 0|0|0 - checkShardMajorVersion(st.d0, 0); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 0); + checkShardMajorVersion(st.rs1.getPrimary(), 0); // 1st mongos thinks that collection is unshareded and will attempt to query primary shard. assert.neq(null, testDB_s0.user.findOne({x: 1})); - checkShardMajorVersion(st.d0, 2); - checkShardMajorVersion(st.d1, 0); + checkShardMajorVersion(st.rs0.getPrimary(), 2); + checkShardMajorVersion(st.rs1.getPrimary(), 0); st.stop(); diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js index ef223161231..4f4c5aca00d 100644 --- a/jstests/slow1/sharding_multiple_collections.js +++ b/jstests/slow1/sharding_multiple_collections.js @@ -4,7 +4,7 @@ var s = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}}); assert.commandWorked(s.s0.adminCommand({enablesharding: "test"})); - s.ensurePrimaryShard('test', 'shard0001'); + s.ensurePrimaryShard('test', s.shard1.shardName); var S = ""; while (S.length < 500) { diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js index ee5d4f05a92..23703683aa4 100644 --- a/jstests/ssl/libs/ssl_helpers.js +++ b/jstests/ssl/libs/ssl_helpers.js @@ -109,11 +109,12 @@ function mixedShardTest(options1, options2, shouldSucceed) { // Start ShardingTest with enableBalancer because ShardingTest attempts to turn // off the balancer otherwise, which it will not be authorized to do if auth is enabled. // Once SERVER-14017 is fixed the "enableBalancer" line can be removed. + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ mongos: [options1], config: [options1], shards: [options1, options2], - other: {enableBalancer: true} + other: {enableBalancer: true, shardAsReplicaSet: false} }); // Create admin user in case the options include auth @@ -131,8 +132,8 @@ function mixedShardTest(options1, options2, shouldSucceed) { var r = st.adminCommand({enableSharding: "test"}); assert.eq(r, true, "error enabling sharding for this configuration"); - st.ensurePrimaryShard("test", "shard0000"); - r = st.adminCommand({movePrimary: 'test', to: 'shard0001'}); + st.ensurePrimaryShard("test", st.shard0.shardName); + r = st.adminCommand({movePrimary: 'test', to: st.shard1.shardName}); assert.eq(r, true, "error movePrimary failed for this configuration"); var db1 = st.getDB("test"); @@ -150,7 +151,8 @@ function mixedShardTest(options1, options2, shouldSucceed) { assert.eq(128, db1.col.count(), "error retrieving documents from cluster"); // Test shards talking to each other - r = st.getDB('test').adminCommand({moveChunk: 'test.col', find: {_id: 0}, to: 'shard0000'}); + r = st.getDB('test').adminCommand( + {moveChunk: 'test.col', find: {_id: 0}, to: st.shard0.shardName}); assert(r.ok, "error moving chunks: " + tojson(r)); db1.col.remove({}); diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js index 621bfb1468a..e1dc5ca822d 100644 --- a/jstests/ssl/sharding_with_x509.js +++ b/jstests/ssl/sharding_with_x509.js @@ -15,6 +15,7 @@ // Start ShardingTest with enableBalancer because ShardingTest attempts to turn off the balancer // otherwise, which it will not be authorized to do. Once SERVER-14017 is fixed the // "enableBalancer" line could be removed. + // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ shards: 2, mongos: 1, @@ -23,7 +24,8 @@ configOptions: x509_options, mongosOptions: x509_options, rsOptions: x509_options, - shardOptions: x509_options + shardOptions: x509_options, + shardAsReplicaSet: false } }); diff --git a/jstests/ssl/ssl_without_ca.js b/jstests/ssl/ssl_without_ca.js index cbc3aaa37f1..93658ef5ca8 100644 --- a/jstests/ssl/ssl_without_ca.js +++ b/jstests/ssl/ssl_without_ca.js @@ -42,11 +42,17 @@ assert.isnull(conn, "server started with x509 clusterAuthMode but no CA file"); jsTest.log("Assert mongos doesn\'t start with CA file missing and clusterAuthMode=x509."); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. assert.throws(function() { new ShardingTest({ shards: 1, mongos: 1, verbose: 2, - other: {configOptions: sslParams, mongosOptions: sslParams, shardOptions: sslParams} + other: { + configOptions: sslParams, + mongosOptions: sslParams, + shardOptions: sslParams, + shardAsReplicaSet: false + } }); }, [], "mongos started with x509 clusterAuthMode but no CA file"); diff --git a/jstests/ssl/x509_client.js b/jstests/ssl/x509_client.js index e6d6899c07e..09c546e2cb9 100644 --- a/jstests/ssl/x509_client.js +++ b/jstests/ssl/x509_client.js @@ -92,6 +92,7 @@ MongoRunner.stopMongod(mongo); print("2. Testing x.509 auth to mongos"); +// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed. var st = new ShardingTest({ shards: 1, mongos: 1, @@ -101,6 +102,7 @@ var st = new ShardingTest({ mongosOptions: x509_options, shardOptions: x509_options, useHostname: false, + shardAsReplicaSet: false } }); diff --git a/jstests/tool/dumprestore9.js b/jstests/tool/dumprestore9.js index 9123c75bf04..2fb63f70889 100644 --- a/jstests/tool/dumprestore9.js +++ b/jstests/tool/dumprestore9.js @@ -21,7 +21,7 @@ if (0) { s.adminCommand({ enablesharding: "aaa" }); // Make this db alphabetically before 'config' so it gets restored first - s.ensurePrimaryShard('aaa', 'shard0001'); + s.ensurePrimaryShard('aaa', s.shard1.shardName); s.adminCommand({shardcollection: "aaa.foo", key: {x: 1}}); db = s.getDB("aaa"); diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index c7fd9b52fbb..bb5530ebf79 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -58,6 +58,8 @@ * rs: same as above * chunkSize: same as above * keyFile {string}: the location of the keyFile + * shardAsReplicaSet {boolean}: if true, start shards as 2 node replica sets. default + * is true. * * shardOptions {Object}: same as the shards property above. * Can be used to specify options that are common all shards. @@ -821,7 +823,6 @@ var ShardingTest = function(params) { */ this.restartMongod = function(n, opts, beforeRestartCallback) { var mongod; - if (otherParams.useBridge) { mongod = unbridgedConnections[n]; } else { @@ -1026,6 +1027,8 @@ var ShardingTest = function(params) { var mongosVerboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 1; var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1; var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3; + var startShardsAsRS = + otherParams.hasOwnProperty('shardAsReplicaSet') ? otherParams.shardAsReplicaSet : true; // Default enableBalancer to false. otherParams.enableBalancer = @@ -1118,7 +1121,7 @@ var ShardingTest = function(params) { // Start the MongoD servers (shards) for (var i = 0; i < numShards; i++) { - if (otherParams.rs || otherParams["rs" + i]) { + if (otherParams.rs || otherParams["rs" + i] || startShardsAsRS) { var setName = testName + "-rs" + i; var rsDefaults = { @@ -1129,14 +1132,27 @@ var ShardingTest = function(params) { pathOpts: Object.merge(pathOpts, {shard: i}), }; - rsDefaults = Object.merge(rsDefaults, otherParams.rs); - rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions); - rsDefaults = Object.merge(rsDefaults, otherParams["rs" + i]); - rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas; + if (otherParams.rs || otherParams["rs" + i]) { + if (otherParams.rs) { + rsDefaults = Object.merge(rsDefaults, otherParams.rs); + } + if (otherParams["rs" + i]) { + rsDefaults = Object.merge(rsDefaults, otherParams["rs" + i]); + } + rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions); + rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas; + } + var rsSettings = rsDefaults.settings; delete rsDefaults.settings; - var numReplicas = rsDefaults.nodes || 3; + // If both rs and startShardsAsRS are specfied, the number of nodes + // in the rs field should take priority. + if (otherParams.rs || otherParams["rs" + i]) { + var numReplicas = rsDefaults.nodes || 3; + } else if (startShardsAsRS) { + var numReplicas = 2; + } delete rsDefaults.nodes; var protocolVersion = rsDefaults.protocolVersion; @@ -1242,7 +1258,7 @@ var ShardingTest = function(params) { // Do replication on replica sets if required for (var i = 0; i < numShards; i++) { - if (!otherParams.rs && !otherParams["rs" + i]) { + if (!otherParams.rs && !otherParams["rs" + i] && !startShardsAsRS) { continue; } |