summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml1
-rw-r--r--jstests/auth/check_metadata_consistency.js2
-rw-r--r--jstests/sharding/after_cluster_time.js3
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js74
-rw-r--r--jstests/sharding/awaitable_hello_primary_failures.js16
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js7
-rw-r--r--jstests/sharding/conn_pool_stats.js4
-rw-r--r--jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js16
-rw-r--r--jstests/sharding/exhaust_hello_topology_changes.js2
-rw-r--r--jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js10
-rw-r--r--jstests/sharding/linearizable_read_concern.js11
-rw-r--r--jstests/sharding/live_shard_logical_initial_sync.js6
-rw-r--r--jstests/sharding/migration_coordinator_abort_failover.js4
-rw-r--r--jstests/sharding/migration_coordinator_shutdown_in_critical_section.js4
-rw-r--r--jstests/sharding/migration_recovers_unfinished_migrations.js4
-rw-r--r--jstests/sharding/migration_server_status.js12
-rw-r--r--jstests/sharding/query/explain_agg_read_pref.js4
-rw-r--r--jstests/sharding/read_committed_lookup.js3
-rw-r--r--jstests/sharding/resharding_nonblocking_coordinator_rebuild.js4
-rw-r--r--jstests/sharding/shard_identity_config_update.js78
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js7
-rw-r--r--jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js33
-rw-r--r--jstests/sharding/sharding_index_catalog_API.js3
-rw-r--r--jstests/sharding/sharding_index_catalog_upgrade_downgrade.js3
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_aggregate.js3
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_read.js7
-rw-r--r--jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js5
-rw-r--r--jstests/sharding/transient_txn_error_labels.js8
-rw-r--r--jstests/sharding/transient_txn_error_labels_with_write_concern.js8
-rw-r--r--jstests/sharding/txn_commit_optimizations_for_read_only_shards.js6
-rw-r--r--jstests/sharding/txn_single_write_shard_failover.js3
-rw-r--r--jstests/sharding/txn_two_phase_commit_server_status.js5
-rw-r--r--jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js7
-rw-r--r--src/mongo/shell/shardingtest.js3
36 files changed, 234 insertions, 134 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml
index 11c8f5e1a83..0ff3e20cc11 100644
--- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml
+++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml
@@ -81,7 +81,6 @@ selector:
# system.profile collection doesn't exist on mongos.
- requires_profiling
- catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
index b988b4581fd..f0405d6c585 100644
--- a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
@@ -7,7 +7,6 @@ selector:
- jstests/sharding/**/libs/**/*.js
exclude_with_any_tags:
- catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
executor:
archive:
diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml
index 9550b5331d9..b0f0085f5b0 100644
--- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml
@@ -45,7 +45,6 @@ selector:
# system.profile collection doesn't exist on mongos.
- requires_profiling
- catalog_shard_incompatible
- - temporary_catalog_shard_incompatible
executor:
archive:
diff --git a/jstests/auth/check_metadata_consistency.js b/jstests/auth/check_metadata_consistency.js
index 84dd5f89e88..9a94c0e5e72 100644
--- a/jstests/auth/check_metadata_consistency.js
+++ b/jstests/auth/check_metadata_consistency.js
@@ -5,7 +5,7 @@
* featureFlagCheckMetadataConsistency,
* requires_fcv_70,
* # TODO SERVER-74445: Remove tag once the command will be compatible with catalog shard
- * temporary_catalog_shard_incompatible,
+ * catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/after_cluster_time.js b/jstests/sharding/after_cluster_time.js
index 690c1f8b50b..05e2b73cca2 100644
--- a/jstests/sharding/after_cluster_time.js
+++ b/jstests/sharding/after_cluster_time.js
@@ -31,6 +31,9 @@ rst.initiate();
// Start the sharding test and add the majority read concern enabled replica set.
const st = new ShardingTest({manualAddShard: true});
+if (TestData.catalogShard) {
+ assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
+}
assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
const testDB = st.s.getDB("test");
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index 7ba54f04eba..450625491e6 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -1,7 +1,9 @@
/**
* Shuts down config server and shard replica set nodes one by one and ensures correct behaviour.
*
- * @tags: [temporary_catalog_shard_incompatible]
+ * Restarts the config server, which requires persistence so restarted nodes can rejoin their
+ * original replica set and run shutdown hooks.
+ * @tags: [requires_persistence]
*/
// Checking UUID and index consistency involves talking to the config servers, which are shut down
@@ -14,52 +16,78 @@ TestData.skipCheckShardFilteringMetadata = true;
(function() {
'use strict';
-var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+var st = new ShardingTest({
+ shards: {
+ rs0: {nodes: 2},
+ },
+ config: 3
+});
// The default read concern is local, which is incompatible with secondary reads when the primary is
// down.
st.s.adminCommand({setDefaultRWConcern: 1, defaultReadConcern: {level: "available"}});
+let count = 0;
jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' +
'Insert test data to work with');
assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
-assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray());
+count += 1;
+assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray());
jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' +
'Inserts and queries must work');
-st.configRS.stop(0);
+st.configRS.stop(0, undefined, undefined, {forRestart: true});
st.restartMongos(0);
assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
{_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
-assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray());
+count += 1;
+assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray());
-jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
- 'Inserts and queries must work');
-st.configRS.stop(1);
-st.restartMongos(0);
-assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
- {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
-assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+if (!TestData.catalogShard) {
+ // For a catalog shard, the config server is the shard, so we can't have a different number up.
+ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' +
+ 'Inserts and queries must work');
+ st.configRS.stop(1, undefined, undefined, {forRestart: true});
+ st.restartMongos(0);
+ assert.commandWorked(st.s0.getDB('TestDB').TestColl.update(
+ {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}}));
+ count += 1;
+ assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray());
+}
jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' +
'Only queries will work (no shard primary)');
st.rs0.stop(0);
st.restartMongos(0);
st.s0.setSecondaryOk();
-assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray());
+assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray());
-jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
- 'MongoS must start, but no operations will work (no shard nodes available)');
-st.rs0.stop(1);
-st.restartMongos(0);
-assert.throws(function() {
- st.s0.getDB('TestDB').TestColl.find().toArray();
-});
+if (!TestData.catalogShard) {
+ // For a catalog shard, the config server is the shard, so we can't have a different number up.
+ jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' +
+ 'MongoS must start, but no operations will work (no shard nodes available)');
+ st.rs0.stop(1);
+ st.restartMongos(0);
+ assert.throws(function() {
+ st.s0.getDB('TestDB').TestColl.find().toArray();
+ });
+}
jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' +
'Metadata cannot be loaded at all, no operations will work');
-st.configRS.stop(1);
+if (!TestData.catalogShard) {
+ st.configRS.stop(2);
+} else if (TestData.catalogShard) {
+ st.configRS.stop(1, undefined, undefined, {forRestart: true});
+ // Restart mongos while a config server is still up.
+ st.restartMongos(0);
+ // After taking down the last config/shard node, no user data operations will work.
+ st.configRS.stop(2, undefined, undefined, {forRestart: true});
+ assert.throws(function() {
+ st.s0.getDB('TestDB').TestColl.find().toArray();
+ });
+}
// Instead of restarting mongos, ensure it has no metadata
assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1}));
@@ -83,7 +111,7 @@ for (var i = 0; i < 2; i++) {
}
}
-// Restart one config server node to ensure that teardown checks may be executed
-st.restartConfigServer(0);
+// Restart two config server nodes to ensure that teardown checks may be executed
+st.restartAllConfigServers();
st.stop();
}());
diff --git a/jstests/sharding/awaitable_hello_primary_failures.js b/jstests/sharding/awaitable_hello_primary_failures.js
index fdc43ad07fb..8000ca5b3d8 100644
--- a/jstests/sharding/awaitable_hello_primary_failures.js
+++ b/jstests/sharding/awaitable_hello_primary_failures.js
@@ -2,7 +2,9 @@
* Test to assert that the RSM behaves correctly when contacting the primary node fails in various
* ways.
*
- * @tags: [temporary_catalog_shard_incompatible]
+ * Restarts the config server in catalog shard suites, which requires persistence so restarted nodes
+ * can rejoin their original replica set and run shutdown hooks.
+ * @tags: [requires_persistence]
*/
// Checking UUID consistency and orphans involves talking to a shard node, which in this test is
@@ -64,8 +66,18 @@ awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true});
// Shutdown the primary node. The RSM should mark the node as down.
jsTestLog("Shutting down primary node.");
-st.rs0.stop(0);
+if (TestData.catalogShard) {
+ st.rs0.stop(0, undefined, undefined, {forRestart: true});
+} else {
+ st.rs0.stop(0);
+}
awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: false});
+if (TestData.catalogShard) {
+ // Shard0 is the config server in catalog shard mode, so restart it for the ShardingTest
+ // shutdown hooks.
+ st.rs0.start(0, undefined, true /* restart */);
+}
+
st.stop();
}());
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index a3775770129..22e7eb7cfb4 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -9,7 +9,6 @@
// requires_majority_read_concern,
// requires_persistence,
// uses_change_streams,
-// temporary_catalog_shard_incompatible,
// ]
(function() {
"use strict";
@@ -36,7 +35,11 @@ const st = new ShardingTest({
rs: {
nodes: 1,
// Use a higher frequency for periodic noops to speed up the test.
- setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true},
+ setParameter: {
+ periodicNoopIntervalSecs: 1,
+ writePeriodicNoops: true,
+ enableShardedIndexConsistencyCheck: false
+ },
},
other: {configOptions: nodeOptions}
});
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index a34710be3e5..f9d5b9e89ee 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -1,7 +1,9 @@
/**
* Tests for the connPoolStats command.
*
- * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible]
+ * Incompatible because it makes assertions about the specific number of connections used, which
+ * don't account for background activity on a config server.
+ * @tags: [requires_fcv_63, catalog_shard_incompatible]
*/
load("jstests/libs/fail_point_util.js");
load("jstests/libs/conn_pool_helpers.js");
diff --git a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
index 22dcf02724a..0e077cbe508 100644
--- a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
+++ b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
@@ -41,7 +41,8 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
-let st = new ShardingTest({shards: 2});
+let st = new ShardingTest(
+ {shards: 2, configOptions: {setParameter: {enableShardedIndexConsistencyCheck: false}}});
let testColl = st.s.getDB(dbName).getCollection(collName);
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
@@ -67,9 +68,7 @@ moveChunkHangAtStep5FailPoint.wait();
donorReplSetTest.freeze(donorPrimary);
moveChunkHangAtStep5FailPoint.off();
-if (!TestData.catalogShard) {
- moveChunkThread.join();
-}
+moveChunkThread.join();
metadataRefreshFailPoint.wait();
donorReplSetTest.unfreeze(donorPrimary);
@@ -85,15 +84,6 @@ assert.eq(1, getNumRangeDeletionDocs(recipientShard, ns));
testColl.drop();
metadataRefreshFailPoint.off();
-if (TestData.catalogShard) {
- // In catalog shard mode, the migration won't finish until after we finish migration recovery,
- // which is blocked by the fail point until we disable it above.
- //
- // SERVER-74446: Investigate why this only happens in catalog shard mode and if its safe to
- // ignore by changing the test.
- moveChunkThread.join();
-}
-
jsTest.log("Wait for the recipient to delete the range deletion task doc");
assert.soon(() => {
return 0 == getNumRangeDeletionDocs(recipientShard, ns);
diff --git a/jstests/sharding/exhaust_hello_topology_changes.js b/jstests/sharding/exhaust_hello_topology_changes.js
index 6e5647bc5d6..b3ba45d8861 100644
--- a/jstests/sharding/exhaust_hello_topology_changes.js
+++ b/jstests/sharding/exhaust_hello_topology_changes.js
@@ -6,7 +6,7 @@
* hit. A replica set node should send a response to the mongos as soon as it processes a topology
* change, so "immediately"/"quickly" can vary - we specify 5 seconds in this test ('timeoutMS').
*
- * @tags: [requires_streamable_rsm, temporary_catalog_shard_incompatible]
+ * @tags: [requires_streamable_rsm]
*/
// This test shuts down a shard's node and because of this consistency checking
diff --git a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
index feebe8ccc67..e3bd67dfc01 100644
--- a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
+++ b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
@@ -5,7 +5,6 @@
* @tags: [
* # The SBE plan cache was enabled by default in 6.3.
* requires_fcv_63,
- * temporary_catalog_shard_incompatible,
* ]
*/
@@ -97,7 +96,14 @@ assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
st.shard0.adminCommand(
{_flushRoutingTableCacheUpdates: collA.getFullName(), syncFromConfig: true});
- assertPlanCacheSizeForColl(collA.getFullName(), 0);
+ if (TestData.catalogShard) {
+ // Refining a shard key runs a "noop" find on the refined namespace, which runs locally on
+ // the config server without a shard version, so it generates a plan key cache on collA that
+ // is not cleared.
+ assertPlanCacheSizeForColl(collA.getFullName(), 1);
+ } else {
+ assertPlanCacheSizeForColl(collA.getFullName(), 0);
+ }
assertPlanCacheSizeForColl(collB.getFullName(), 1);
})();
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index 98724828edd..9e23463f72f 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -19,8 +19,6 @@
* document. This test is mainly trying to ensure that system behavior is
* reasonable when executing linearizable reads in a sharded cluster, so as to
* exercise possible (invalid) user behavior.
- *
- * @tags: [temporary_catalog_shard_incompatible]
*/
load("jstests/replsets/rslib.js");
@@ -38,10 +36,9 @@ var testName = "linearizable_read_concern";
var st = new ShardingTest({
name: testName,
- shards: 2,
other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true},
mongos: 1,
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
enableBalancer: false
});
@@ -126,5 +123,11 @@ var result = testDB.runReadCommand({
});
assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired);
+if (TestData.catalogShard) {
+ // Reconnect so the config server is available for shutdown hooks.
+ secondaries[0].reconnect(primary);
+ secondaries[1].reconnect(primary);
+}
+
st.stop();
})();
diff --git a/jstests/sharding/live_shard_logical_initial_sync.js b/jstests/sharding/live_shard_logical_initial_sync.js
index 9fd41c33e3d..bec176292bf 100644
--- a/jstests/sharding/live_shard_logical_initial_sync.js
+++ b/jstests/sharding/live_shard_logical_initial_sync.js
@@ -3,8 +3,7 @@
* shards using logical initial sync.
*
* We control our own failovers, and we also need the RSM to react reasonably quickly to those.
- * @tags: [does_not_support_stepdowns, requires_streamable_rsm,
- * temporary_catalog_shard_incompatible]
+ * @tags: [does_not_support_stepdowns, requires_streamable_rsm]
*/
(function() {
@@ -15,7 +14,8 @@ load("jstests/sharding/libs/sharding_state_test.js");
const st = new ShardingTest({config: 1, shards: {rs0: {nodes: 1}}});
const rs = st.rs0;
-const newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag: "shardsvr"});
+const serverTypeFlag = TestData.catalogShard ? "configsvr" : "shardsvr";
+const newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag});
jsTestLog("Checking sharding state before failover.");
ShardingStateTest.checkShardingState(st);
diff --git a/jstests/sharding/migration_coordinator_abort_failover.js b/jstests/sharding/migration_coordinator_abort_failover.js
index aca052bf42d..cda648db59e 100644
--- a/jstests/sharding/migration_coordinator_abort_failover.js
+++ b/jstests/sharding/migration_coordinator_abort_failover.js
@@ -4,8 +4,8 @@
*
* Assumes a donor stepdown will trigger a failover migration response, but if donor is catalog
* shard, it will trigger a full retry from mongos, which leads to a successful retry despite the
- * original interrupted attempt correctly failing. See if the test can be reworked.
- * @tags: [temporary_catalog_shard_incompatible]
+ * original interrupted attempt correctly failing.
+ * @tags: [catalog_shard_incompatible]
*/
// This test induces failovers on shards.
diff --git a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
index 1a39a9b43c3..f8a1be859a1 100644
--- a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
+++ b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
@@ -3,11 +3,13 @@
* _configsvrEnsureChunkVersionIsGreaterThan and while the node is forcing a filtering metadata
* refresh.
*
+ * Shuts down a donor shard which leads mongos to retry if the donor is also the config server, and
+ * this can fail waiting for read preference if the shard is slow to recover.
* @tags: [
* does_not_support_stepdowns,
* # Require persistence to restart nodes
* requires_persistence,
- * temporary_catalog_shard_incompatible,
+ * catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/migration_recovers_unfinished_migrations.js b/jstests/sharding/migration_recovers_unfinished_migrations.js
index 7203e3540a6..06ee681454c 100644
--- a/jstests/sharding/migration_recovers_unfinished_migrations.js
+++ b/jstests/sharding/migration_recovers_unfinished_migrations.js
@@ -7,7 +7,6 @@
* # that migration by sending a new `moveChunk` command to the donor shard causing the test to
* # hang.
* does_not_support_stepdowns,
- * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
@@ -27,6 +26,7 @@ const nodeOptions = {
// the shards that would interfere with the migration recovery interleaving this test requires.
var st = new ShardingTest({
shards: {rs0: {nodes: 2}, rs1: {nodes: 1}},
+ config: 3,
other: {configOptions: nodeOptions, enableBalancer: false}
});
let staticMongod = MongoRunner.runMongod({});
@@ -62,7 +62,7 @@ const rs0Secondary = st.rs0.getSecondary();
let hangInEnsureChunkVersionIsGreaterThanInterruptibleFailpoint =
configureFailPoint(rs0Secondary, "hangInEnsureChunkVersionIsGreaterThanInterruptible");
-assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: true}));
+st.rs0.stepUp(rs0Secondary);
joinMoveChunk1();
migrationCommitNetworkErrorFailpoint.off();
skipShardFilteringMetadataRefreshFailpoint.off();
diff --git a/jstests/sharding/migration_server_status.js b/jstests/sharding/migration_server_status.js
index ab73e053b82..39c1330866b 100644
--- a/jstests/sharding/migration_server_status.js
+++ b/jstests/sharding/migration_server_status.js
@@ -2,7 +2,7 @@
* Tests that serverStatus includes a migration status when called on the source shard of an active
* migration.
*
- * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible]
+ * @tags: [requires_fcv_63]
*/
load('./jstests/libs/chunk_manipulation_util.js');
@@ -141,7 +141,12 @@ assertMigrationStatusOnServerStatus(shard0ServerStatus,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
-assertSessionMigrationStatusSource(shard0ServerStatus, 2400, 2600);
+// Background metadata operations on the config server can throw off the count, so just assert the
+// fields are present for a catalog shard.
+const expectedEntriesMigrated = TestData.catalogShard ? undefined : 2400;
+const expectedEntriesSkipped = TestData.catalogShard ? undefined : 2600;
+assertSessionMigrationStatusSource(
+ shard0ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped);
// Destination shard should have the correct server status
shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1});
@@ -153,7 +158,8 @@ assertMigrationStatusOnServerStatus(shard1ServerStatus,
{"_id": 0},
{"_id": {"$maxKey": 1}},
coll + "");
-assertSessionMigrationStatusDestination(shard1ServerStatus, 2400);
+assertSessionMigrationStatusDestination(
+ shard1ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped);
unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted);
diff --git a/jstests/sharding/query/explain_agg_read_pref.js b/jstests/sharding/query/explain_agg_read_pref.js
index 7ecdc123018..da3ad99ad10 100644
--- a/jstests/sharding/query/explain_agg_read_pref.js
+++ b/jstests/sharding/query/explain_agg_read_pref.js
@@ -1,7 +1,5 @@
/**
* Tests that readPref applies on an explain for an aggregation command.
- *
- * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
@@ -36,7 +34,7 @@ assert.commandWorked(mongosDB.dropDatabase());
const coll = mongosDB.getCollection("coll");
assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()}));
-st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0");
+st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName);
const rs0Primary = st.rs0.getPrimary();
const rs0Secondary = st.rs0.getSecondary();
const rs1Primary = st.rs1.getPrimary();
diff --git a/jstests/sharding/read_committed_lookup.js b/jstests/sharding/read_committed_lookup.js
index 0bbf1cfbf79..89093521afa 100644
--- a/jstests/sharding/read_committed_lookup.js
+++ b/jstests/sharding/read_committed_lookup.js
@@ -39,6 +39,9 @@ let shardSecondary = rst.getSecondary();
let st = new ShardingTest({
manualAddShard: true,
});
+if (TestData.catalogShard) {
+ assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1}));
+}
// The default WC is majority and this test can't satisfy majority writes.
assert.commandWorked(st.s.adminCommand(
{setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: "majority"}}));
diff --git a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
index c005f1e0e77..3bf4ef08fc1 100644
--- a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
+++ b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
@@ -2,8 +2,8 @@
* Tests that resharding participants do not block replication while waiting for the
* ReshardingCoordinatorService to be rebuilt.
*
- * Looks like a test incompatibility, but should be verified and maybe rework the test.
- * @tags: [temporary_catalog_shard_incompatible]
+ * TODO SERVER-75885: Investigate if this test can be enabled or reworked.
+ * @tags: [catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index b0b018b5b29..6d0d3458b26 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -1,10 +1,7 @@
/**
* Tests that the config server connection string in the shard identity document of both the
* primary and secondary will get updated whenever the config server membership changes.
- *
- * Shuts down the first shard but expects the config server to still be up. See if we can rework to
- * get coverage in catalog shard mode.
- * @tags: [requires_persistence, temporary_catalog_shard_incompatible]
+ * @tags: [requires_persistence]
*/
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
@@ -15,9 +12,11 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
load('jstests/replsets/rslib.js');
-var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
+const notInMultiversionTest = !jsTestOptions().shardMixedBinVersions &&
+ jsTestOptions().mongosBinVersion !== "last-lts" &&
+ jsTestOptions().mongosBinVersion !== "last-continuous";
-var shardPri = st.rs0.getPrimary();
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
// Note: Adding new replica set member by hand because of SERVER-24011.
@@ -65,20 +64,45 @@ assert.soon(function() {
return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr);
});
-var secConn = st.rs0.getSecondary();
-secConn.setSecondaryOk();
-assert.soon(function() {
- return checkConfigStrUpdated(secConn, expectedConfigStr);
+st.rs0.getSecondaries().forEach(secConn => {
+ secConn.setSecondaryOk();
+ assert.soon(function() {
+ return checkConfigStrUpdated(secConn, expectedConfigStr);
+ });
});
+// Config servers in 7.0 also maintain the connection string in their shard identity document.
+// TODO SERVER-75391: Always run config server assertions.
+if (notInMultiversionTest) {
+ assert.soon(function() {
+ return checkConfigStrUpdated(st.configRS.getPrimary(), expectedConfigStr);
+ });
+
+ st.configRS.getSecondaries().forEach(secConn => {
+ secConn.setSecondaryOk();
+ assert.soon(function() {
+ return checkConfigStrUpdated(secConn, expectedConfigStr);
+ });
+ });
+
+ newNode.setSecondaryOk();
+ assert.soon(function() {
+ return checkConfigStrUpdated(newNode, expectedConfigStr);
+ });
+}
+
//
// Remove the newly added member from the config replSet while the shards are down.
// Check that the shard identity document will be updated with the new replSet connection
// string when they come back up.
//
-st.rs0.stop(0);
-st.rs0.stop(1);
+// We can't reconfigure the config server if some nodes are down, so skip in catalog shard mode and
+// just verify all nodes update the config string eventually.
+if (!TestData.catalogShard) {
+ st.rs0.stop(0);
+ st.rs0.stop(1);
+}
MongoRunner.stopMongod(newNode);
@@ -88,8 +112,10 @@ replConfig.members.pop();
reconfig(st.configRS, replConfig);
-st.rs0.restart(0, {shardsvr: ''});
-st.rs0.restart(1, {shardsvr: ''});
+if (!TestData.catalogShard) {
+ st.rs0.restart(0, {shardsvr: ''});
+ st.rs0.restart(1, {shardsvr: ''});
+}
st.rs0.waitForPrimary();
st.rs0.awaitSecondaryNodes();
@@ -98,11 +124,27 @@ assert.soon(function() {
return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr);
});
-secConn = st.rs0.getSecondary();
-secConn.setSecondaryOk();
-assert.soon(function() {
- return checkConfigStrUpdated(secConn, origConfigConnStr);
+st.rs0.getSecondaries().forEach(secConn => {
+ secConn.setSecondaryOk();
+ assert.soon(function() {
+ return checkConfigStrUpdated(secConn, origConfigConnStr);
+ });
});
+// Config servers in 7.0 also maintain the connection string in their shard identity document.
+// TODO SERVER-75391: Always run config server assertions.
+if (notInMultiversionTest) {
+ assert.soon(function() {
+ return checkConfigStrUpdated(st.configRS.getPrimary(), origConfigConnStr);
+ });
+
+ st.configRS.getSecondaries().forEach(secConn => {
+ secConn.setSecondaryOk();
+ assert.soon(function() {
+ return checkConfigStrUpdated(secConn, origConfigConnStr);
+ });
+ });
+}
+
st.stop();
})();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 12e2c24c8ec..146e1d8ffbf 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -1,6 +1,5 @@
// replica set as solo shard
// TODO: Add assertion code that catches hang
-// @tags: [temporary_catalog_shard_incompatible]
// The UUID and index check must be able to contact the shard primaries, but this test manually
// stops 2/3 nodes of a replica set.
@@ -45,7 +44,11 @@ var mongosConn = shardingTest.s;
var testDB = mongosConn.getDB(testDBName);
// Add replSet1 as only shard
-assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+if (!TestData.catalogShard) {
+ assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()}));
+} else {
+ assert.commandWorked(mongosConn.adminCommand({transitionToCatalogShard: 1}));
+}
// Enable sharding on test db and its collection foo
assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
diff --git a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
index d2e40b75b1a..b35ebe11c89 100644
--- a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
+++ b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
@@ -1,8 +1,6 @@
/**
* Tests that shard removal triggers an update of the catalog cache so that routers don't continue
* to target shards that have been removed.
- *
- * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
'use strict';
@@ -69,8 +67,13 @@ const dbName = 'TestDB';
// Remove shard0.
removeShard(st, st.shard0.shardName);
- // Stop the replica set so that future requests to this shard will be unsuccessful.
- st.rs0.stopSet();
+ // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this
+ // step for a catalog shard, since the config server must be up for the second router to
+ // refresh. The default read concern is local, so the router should eventually target a shard
+ // with chunks.
+ if (!TestData.catalogShard) {
+ st.rs0.stopSet();
+ }
// Ensure that s1, the router which did not run removeShard, eventually stops targeting chunks
// for the sharded collection which previously resided on a shard that no longer exists.
@@ -128,21 +131,15 @@ const dbName = 'TestDB';
// Remove shard0. We need assert.soon since chunks in the sessions collection may need to be
// migrated off by the balancer.
- assert.soon(() => {
- const removeRes = st.s0.adminCommand({removeShard: st.shard0.shardName});
- if (!removeRes.ok && removeRes.code === ErrorCodes.ShardNotFound) {
- // If the config server primary steps down after removing the config.shards doc for the
- // shard being removed but before completing the _configsvrRemoveShard command, the
- // mongos would retry the command on the new config server primary which would not find
- // the removed shard in its ShardRegistry causing the command to fail with
- // ShardNotFound.
- return true;
- }
- return removeRes.state === 'completed';
- });
+ removeShard(st, st.shard0.shardName);
- // Stop the replica set so that future requests to this shard will be unsuccessful.
- st.rs0.stopSet();
+ // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this
+ // step for a catalog shard, since the config server must be up for the second router to
+ // refresh. The default read concern is local, so the router should eventually target a shard
+ // with chunks.
+ if (!TestData.catalogShard) {
+ st.rs0.stopSet();
+ }
// Ensure that s1, the router which did not run removeShard, eventually stops targeting data for
// the unsharded collection which previously had as primary a shard that no longer exists.
diff --git a/jstests/sharding/sharding_index_catalog_API.js b/jstests/sharding/sharding_index_catalog_API.js
index 40cb0de806b..0709b48c59e 100644
--- a/jstests/sharding/sharding_index_catalog_API.js
+++ b/jstests/sharding/sharding_index_catalog_API.js
@@ -1,10 +1,11 @@
/**
* Tests that the global indexes API correctly creates and drops an index from the catalog.
*
+ * TODO SERVER-75274: Enable with a catalog shard.
* @tags: [
* multiversion_incompatible,
* featureFlagGlobalIndexesShardingCatalog,
- * temporary_catalog_shard_incompatible,
+ * catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js b/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js
index 5c1a06ddd4d..6822c260753 100644
--- a/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js
+++ b/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js
@@ -2,8 +2,9 @@
* Tests that the global indexes collections are dropped on FCV downgrade and recreated after
* upgrading.
*
+ * TODO SERVER-75274: Enable with a catalog shard.
* @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog,
- * requires_fcv_70, temporary_catalog_shard_incompatible]
+ * requires_fcv_70, catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
index ce97d916494..ef8f7d27413 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
@@ -5,7 +5,6 @@
* @tags: [
* requires_majority_read_concern,
* requires_persistence,
- * temporary_catalog_shard_incompatible,
* ]
*/
@@ -42,7 +41,7 @@ const st = new ShardingTest({
rs2: {nodes: 2},
},
mongos: 1,
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
other: {configOptions: nodeOptions, rsOptions: nodeOptions}
});
// Config sharded collections.
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_read.js b/jstests/sharding/sharding_non_transaction_snapshot_read.js
index 9c93e9d6082..48d12e289b8 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_read.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_read.js
@@ -4,7 +4,6 @@
* @tags: [
* requires_majority_read_concern,
* requires_persistence,
- * temporary_catalog_shard_incompatible,
* ]
*/
@@ -36,7 +35,7 @@ let shardingScenarios = {
setUp: function() {
const st = new ShardingTest({
mongos: 1,
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
shards: {rs0: {nodes: 2}},
other: {configOptions: nodeOptions, rsOptions: nodeOptions}
});
@@ -54,7 +53,7 @@ let shardingScenarios = {
rs2: {nodes: 2},
},
mongos: 1,
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
other: {configOptions: nodeOptions, rsOptions: nodeOptions}
});
setUpAllScenarios(st);
@@ -98,7 +97,7 @@ let shardingScenarios = {
rs2: {nodes: 2},
},
mongos: 1,
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
other: {configOptions: nodeOptions, rsOptions: nodeOptions}
});
setUpAllScenarios(st);
diff --git a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
index c727b227145..528b51b1513 100644
--- a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
+++ b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
@@ -4,10 +4,13 @@
* Verifies that the background thread running the reshardCollection command will retry when mongos
* reports an error caused by a network error from the primary shard.
*
+ * Incompatible with a catalog shard because it uses a sequence of fail points to test the
+ * resharding test fixture, which doesn't work when the first shard is the config server. This only
+ * tests the testing fixture, so it wouldn't add meaningful coverage for a catalog shard.
* @tags: [
* requires_persistence,
* uses_atclustertime,
- * temporary_catalog_shard_incompatible,
+ * catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/transient_txn_error_labels.js b/jstests/sharding/transient_txn_error_labels.js
index 036f8136e61..5bdf614935f 100644
--- a/jstests/sharding/transient_txn_error_labels.js
+++ b/jstests/sharding/transient_txn_error_labels.js
@@ -2,7 +2,6 @@
* Test TransientTransactionErrors error label in transactions.
* @tags: [
* uses_transactions,
- * temporary_catalog_shard_incompatible,
* ]
*/
@@ -17,8 +16,11 @@ const collName = "no_error_labels_outside_txn";
// We are testing coordinateCommitTransaction, which requires the nodes to be started with
// --shardsvr.
-const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const st = new ShardingTest({
+ config: TestData.catalogShard ? undefined : 1,
+ mongos: 1,
+ shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}
+});
const primary = st.rs0.getPrimary();
const secondary = st.rs0.getSecondary();
diff --git a/jstests/sharding/transient_txn_error_labels_with_write_concern.js b/jstests/sharding/transient_txn_error_labels_with_write_concern.js
index db59d5d6c81..dfd34df385c 100644
--- a/jstests/sharding/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/sharding/transient_txn_error_labels_with_write_concern.js
@@ -2,7 +2,6 @@
* Test TransientTransactionError error label for commands in transactions with write concern.
* @tags: [
* uses_transactions,
- * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
@@ -17,8 +16,11 @@ const collName = "transient_txn_error_labels_with_write_concern";
// We are testing coordinateCommitTransaction, which requires the nodes to be started with
// --shardsvr.
-const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const st = new ShardingTest({
+ config: TestData.catalogShard ? undefined : 1,
+ mongos: 1,
+ shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}
+});
const rst = st.rs0;
const primary = rst.getPrimary();
diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
index 159662b1849..2749b88dd73 100644
--- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
+++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
@@ -6,8 +6,7 @@
* no failures, a participant having failed over, a participant being unable to satisfy the client's
* writeConcern, and an invalid client writeConcern.
*
- * @tags: [requires_fcv_70, uses_transactions, uses_multi_shard_transaction,
- * temporary_catalog_shard_incompatible]
+ * @tags: [requires_fcv_70, uses_transactions, uses_multi_shard_transaction]
*/
(function() {
@@ -64,7 +63,7 @@ TestData.transactionLifetimeLimitSeconds = 30;
let st = new ShardingTest({
shards: 3,
// Create shards with more than one node because we test for writeConcern majority failing.
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
other: {
mongosOptions: {
verbose: 3,
@@ -346,6 +345,7 @@ const failureModes = {
},
};
+clearRawMongoProgramOutput();
for (const failureModeName in failureModes) {
for (const type in transactionTypes) {
const lsid = getLSID();
diff --git a/jstests/sharding/txn_single_write_shard_failover.js b/jstests/sharding/txn_single_write_shard_failover.js
index a582622ed33..83c96251fdc 100644
--- a/jstests/sharding/txn_single_write_shard_failover.js
+++ b/jstests/sharding/txn_single_write_shard_failover.js
@@ -13,7 +13,6 @@
* @tags: [
* uses_multi_shard_transaction,
* uses_transactions,
- * temporary_catalog_shard_incompatible,
* ]
*/
@@ -32,7 +31,7 @@ const ns2 = db2Name + "." + coll2Name;
const st = new ShardingTest({
shards: {rs0: {nodes: 2}, rs1: {nodes: 1}},
- config: 1,
+ config: TestData.catalogShard ? undefined : 1,
other: {
mongosOptions: {verbose: 3},
}
diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js
index 2525e16ee86..aee129a1f0b 100644
--- a/jstests/sharding/txn_two_phase_commit_server_status.js
+++ b/jstests/sharding/txn_two_phase_commit_server_status.js
@@ -1,5 +1,4 @@
// Basic test that the two-phase commit coordinator metrics fields appear in serverStatus output.
-// @tags: [temporary_catalog_shard_incompatible]
(function() {
"use strict";
@@ -7,7 +6,9 @@ const st = new ShardingTest({shards: 1});
const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1}));
assert.neq(null, res.twoPhaseCommitCoordinator);
-assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated);
+// A catalog shard will have run config server metadata transactions, which are single shard but
+// create a two phase commit coordinator.
+assert.eq(TestData.catalogShard ? 1 : 0, res.twoPhaseCommitCoordinator.totalCreated);
assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit);
assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit);
assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit);
diff --git a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
index 325e37815df..a0ab4991f66 100644
--- a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
+++ b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
@@ -7,7 +7,6 @@
* @tags: [
* requires_fcv_63,
* featureFlagUpdateOneWithoutShardKey,
- * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
@@ -191,11 +190,9 @@ function testCommandShardedCollectionOnMultipleShards(testCase) {
// _id, but the way the test is structured, the _id and the shard key have the same value when
// inserted.
if (res.targetDoc["_id"] < splitPoint) {
- let hostname = st.shard0.host.split("/")[0];
- assert.eq(res.shardId, hostname);
+ assert.eq(res.shardId, st.shard0.shardName);
} else {
- let hostname = st.shard1.host.split("/")[0];
- assert.eq(res.shardId, hostname);
+ assert.eq(res.shardId, st.shard1.shardName);
}
// Check that no modifications were made to the documents.
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index e85329bbeb0..431d8441e86 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -1192,7 +1192,8 @@ var ShardingTest = function(params) {
var numShards = otherParams.hasOwnProperty('shards') ? otherParams.shards : 2;
var mongosVerboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 1;
var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1;
- const usedDefaultNumConfigs = !otherParams.hasOwnProperty('config');
+ const usedDefaultNumConfigs =
+ !otherParams.hasOwnProperty('config') || otherParams.config === undefined;
var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3;
let isCatalogShardMode =