summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml55
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml21
-rw-r--r--etc/evergreen.yml2
-rw-r--r--etc/evergreen_yml_components/definitions.yml16
-rw-r--r--jstests/libs/catalog_shard_util.js23
-rw-r--r--jstests/sharding/addshard2.js15
-rw-r--r--jstests/sharding/addshard5.js8
-rw-r--r--jstests/sharding/addshard6.js2
-rw-r--r--jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js2
-rw-r--r--jstests/sharding/allow_partial_results.js2
-rw-r--r--jstests/sharding/allow_partial_results_nshards.js1
-rw-r--r--jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js2
-rw-r--r--jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js6
-rw-r--r--jstests/sharding/analyze_shard_key/refresh_sample_rates_sharded.js2
-rw-r--r--jstests/sharding/arbiters_do_not_use_cluster_time.js3
-rw-r--r--jstests/sharding/auth.js2
-rw-r--r--jstests/sharding/authCommands.js17
-rw-r--r--jstests/sharding/auth_secondaryok_routing.js11
-rw-r--r--jstests/sharding/auth_sharding_cmd_metadata.js8
-rw-r--r--jstests/sharding/awaitable_hello_primary_failures.js2
-rw-r--r--jstests/sharding/basic_sharding_params.js1
-rw-r--r--jstests/sharding/batch_write_command_sharded.js2
-rw-r--r--jstests/sharding/change_stream_error_label.js1
-rw-r--r--jstests/sharding/change_stream_no_shards.js5
-rw-r--r--jstests/sharding/change_streams_primary_shard_unaware.js2
-rw-r--r--jstests/sharding/check_metadata_consistency.js3
-rw-r--r--jstests/sharding/chunks_onCurrentShardSince.js8
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js13
-rw-r--r--jstests/sharding/clone_catalog_data.js15
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js3
-rw-r--r--jstests/sharding/compact_structured_encryption_data_coordinator.js2
-rw-r--r--jstests/sharding/compound_hashed_shard_key_presplitting.js3
-rw-r--r--jstests/sharding/config_rs_no_primary.js16
-rw-r--r--jstests/sharding/config_settings_schema_upgrade_downgrade.js7
-rw-r--r--jstests/sharding/configsvr_retries_createindex_on_stale_config.js1
-rw-r--r--jstests/sharding/conn_pool_stats.js2
-rw-r--r--jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js2
-rw-r--r--jstests/sharding/convert_to_and_from_sharded.js2
-rw-r--r--jstests/sharding/count_config_servers.js3
-rw-r--r--jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js13
-rw-r--r--jstests/sharding/exhaust_hello_topology_changes.js2
-rw-r--r--jstests/sharding/global_index_sharding_catalog_API.js6
-rw-r--r--jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js2
-rw-r--r--jstests/sharding/health_monitor/config_server_health_observer_crash.js1
-rw-r--r--jstests/sharding/implicit_default_write_concern_add_shard.js5
-rw-r--r--jstests/sharding/index_and_collection_option_propagation.js3
-rw-r--r--jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js2
-rw-r--r--jstests/sharding/internal_txns/end_sessions.js18
-rw-r--r--jstests/sharding/internal_txns/internal_client_restrictions.js9
-rw-r--r--jstests/sharding/internal_txns/kill_sessions.js2
-rw-r--r--jstests/sharding/internal_txns/partial_index.js9
-rw-r--r--jstests/sharding/internal_txns/sessions.js2
-rw-r--r--jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js1
-rw-r--r--jstests/sharding/key_rotation.js3
-rw-r--r--jstests/sharding/lagged_config_secondary.js2
-rw-r--r--jstests/sharding/libs/chunk_bounds_util.js5
-rw-r--r--jstests/sharding/libs/mongos_api_params_util.js78
-rw-r--r--jstests/sharding/linearizable_read_concern.js2
-rw-r--r--jstests/sharding/listDatabases.js1
-rw-r--r--jstests/sharding/live_shard_logical_initial_sync.js3
-rw-r--r--jstests/sharding/localhostAuthBypass.js4
-rw-r--r--jstests/sharding/merge_all_chunks_on_shard.js1
-rw-r--r--jstests/sharding/merge_with_chunk_migrations.js4
-rw-r--r--jstests/sharding/merge_with_drop_shard.js2
-rw-r--r--jstests/sharding/migration_coordinator_abort_failover.js5
-rw-r--r--jstests/sharding/migration_coordinator_shutdown_in_critical_section.js1
-rw-r--r--jstests/sharding/migration_recovers_unfinished_migrations.js1
-rw-r--r--jstests/sharding/migration_server_status.js2
-rw-r--r--jstests/sharding/mongod_returns_no_cluster_time_without_keys.js8
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js1
-rw-r--r--jstests/sharding/move_chunk_interrupt_postimage.js2
-rw-r--r--jstests/sharding/names.js8
-rw-r--r--jstests/sharding/nonreplicated_uuids_on_shardservers.js2
-rw-r--r--jstests/sharding/primary_config_server_blackholed_from_mongos.js2
-rw-r--r--jstests/sharding/printShardingStatus.js4
-rw-r--r--jstests/sharding/query/aggregation_currentop.js31
-rw-r--r--jstests/sharding/query/current_op_no_shards.js5
-rw-r--r--jstests/sharding/query/explain_agg_read_pref.js2
-rw-r--r--jstests/sharding/query/owning_shard_expression.js2
-rw-r--r--jstests/sharding/query/view_rewrite.js2
-rw-r--r--jstests/sharding/read_after_optime.js3
-rw-r--r--jstests/sharding/read_write_concern_defaults_commands_api.js8
-rw-r--r--jstests/sharding/read_write_concern_defaults_propagation.js4
-rw-r--r--jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js11
-rw-r--r--jstests/sharding/remove2.js2
-rw-r--r--jstests/sharding/repl_monitor_refresh.js2
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js11
-rw-r--r--jstests/sharding/resharding_change_stream_namespace_filtering.js3
-rw-r--r--jstests/sharding/resharding_disallow_drop.js1
-rw-r--r--jstests/sharding/resharding_nonblocking_coordinator_rebuild.js3
-rw-r--r--jstests/sharding/resharding_retryable_writes.js4
-rw-r--r--jstests/sharding/resharding_size_estimate.js1
-rw-r--r--jstests/sharding/retryable_writes.js2
-rw-r--r--jstests/sharding/return_partial_shards_down.js8
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js2
-rw-r--r--jstests/sharding/set_cluster_parameter.js3
-rw-r--r--jstests/sharding/set_fcv_logging.js6
-rw-r--r--jstests/sharding/set_fcv_to_downgrading_fast.js9
-rw-r--r--jstests/sharding/shard_collection_config_db.js3
-rw-r--r--jstests/sharding/shard_identity_config_update.js5
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js1
-rw-r--r--jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js2
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_aggregate.js1
-rw-r--r--jstests/sharding/sharding_non_transaction_snapshot_read.js1
-rw-r--r--jstests/sharding/sharding_rs2.js1
-rw-r--r--jstests/sharding/single_shard_transaction_with_arbiter.js2
-rw-r--r--jstests/sharding/snapshot_reads_target_at_point_in_time.js1
-rw-r--r--jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js1
-rw-r--r--jstests/sharding/transient_txn_error_labels.js1
-rw-r--r--jstests/sharding/transient_txn_error_labels_with_write_concern.js1
-rw-r--r--jstests/sharding/txn_addingParticipantParameter.js23
-rw-r--r--jstests/sharding/txn_commit_optimizations_for_read_only_shards.js2
-rw-r--r--jstests/sharding/txn_single_write_shard_failover.js1
-rw-r--r--jstests/sharding/txn_two_phase_commit_server_status.js1
-rw-r--r--jstests/sharding/unique_index_on_shardservers.js1
-rw-r--r--jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js6
-rw-r--r--jstests/sharding/use_rsm_data_for_cs.js1
-rw-r--r--jstests/sharding/warm_up_connection_pool.js12
-rw-r--r--src/mongo/shell/shardingtest.js28
119 files changed, 590 insertions, 137 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml
new file mode 100644
index 00000000000..3fc54b940a8
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml
@@ -0,0 +1,55 @@
+# Section that is ignored by resmoke.py.
+config_variables:
+- &keyFile jstests/libs/authTestsKey
+- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
+
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/sharding/**/*.js
+ exclude_files:
+ - jstests/sharding/**/libs/**/*.js
+ # Skip any tests that run with auth explicitly.
+ - jstests/sharding/*[aA]uth*.js
+ - jstests/sharding/query/*[aA]uth*.js
+ - jstests/sharding/change_streams/*[aA]uth*.js
+
+ - jstests/sharding/advance_cluster_time_action_type.js
+ - jstests/sharding/query/aggregation_currentop.js
+ - jstests/sharding/internal_txns/internal_client_restrictions.js
+ - jstests/sharding/kill_sessions.js
+ # Skip these additional tests when running with auth enabled.
+ - jstests/sharding/parallel.js
+ # Skip the testcases that do not have auth bypass when running ops in parallel.
+ - jstests/sharding/migration_ignore_interrupts_1.js # SERVER-21713
+ - jstests/sharding/migration_ignore_interrupts_2.js # SERVER-21713
+ - jstests/sharding/migration_server_status.js # SERVER-21713
+ - jstests/sharding/migration_sets_fromMigrate_flag.js # SERVER-21713
+ - jstests/sharding/migration_with_source_ops.js # SERVER-21713
+ - jstests/sharding/movechunk_parallel.js # SERVER-21713
+ - jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713
+ # Runs with auth enabled.
+ - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+ # Skip because this suite implicitly authenticates as __system, which allows bypassing user write
+ # blocking.
+ - jstests/sharding/set_user_write_block_mode.js
+ exclude_with_any_tags:
+ - catalog_shard_incompatible
+ - temporary_catalog_shard_incompatible
+
+executor:
+ archive:
+ tests:
+ - jstests/sharding/*reshard*.js
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ auth: true
+ authMechanism: SCRAM-SHA-256
+ catalogShard: true
+ keyFile: *keyFile
+ keyFileData: *keyFileData
+ roleGraphInvalidationIsFatal: true
+ nodb: ''
diff --git a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
new file mode 100644
index 00000000000..472ffd0ae87
--- /dev/null
+++ b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml
@@ -0,0 +1,21 @@
+test_kind: js_test
+
+selector:
+ roots:
+ - jstests/sharding/**/*.js
+ exclude_files:
+ - jstests/sharding/**/libs/**/*.js
+ exclude_with_any_tags:
+ - catalog_shard_incompatible
+ - temporary_catalog_shard_incompatible
+
+executor:
+ archive:
+ tests:
+ - jstests/sharding/*reshard*.js
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ catalogShard: true
+ nodb: ''
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 89bc718374f..060607ab41e 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -1576,6 +1576,8 @@ buildvariants:
- name: .concurrency .large !.ubsan !.no_txns !.debug_only
distros:
- rhel80-medium
+ - name: sharding_catalog_shard_gen
+ - name: sharding_auth_catalog_shard_gen
- name: concurrency_sharded_with_catalog_shard_gen
distros:
- rhel80-medium
diff --git a/etc/evergreen_yml_components/definitions.yml b/etc/evergreen_yml_components/definitions.yml
index be5c1491088..10b165f676e 100644
--- a/etc/evergreen_yml_components/definitions.yml
+++ b/etc/evergreen_yml_components/definitions.yml
@@ -6355,6 +6355,14 @@ tasks:
use_large_distro: "true"
- <<: *gen_task_template
+ name: sharding_catalog_shard_gen
+ tags: []
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
name: sharding_multiversion_gen
tags: ["random_multiversion_ds", "multiversion"]
commands:
@@ -6409,6 +6417,14 @@ tasks:
use_large_distro: "true"
- <<: *gen_task_template
+ name: sharding_auth_catalog_shard_gen
+ tags: []
+ commands:
+ - func: "generate resmoke tasks"
+ vars:
+ use_large_distro: "true"
+
+- <<: *gen_task_template
name: sharding_auth_audit_gen
tags: ["sharding", "auth", "audit", "non_live_record", "no_debug_mode"]
commands:
diff --git a/jstests/libs/catalog_shard_util.js b/jstests/libs/catalog_shard_util.js
index 9369f614ab1..d6daad0385a 100644
--- a/jstests/libs/catalog_shard_util.js
+++ b/jstests/libs/catalog_shard_util.js
@@ -9,7 +9,30 @@ var CatalogShardUtil = (function() {
st.configRS.getPrimary(), "CatalogShard", undefined /* user */, true /* ignoreFCV */);
}
+ function transitionToDedicatedConfigServer(st, timeout) {
+ if (timeout == undefined) {
+ timeout = 10 * 60 * 1000; // 10 minutes
+ }
+
+ assert.soon(function() {
+ const res = st.s.adminCommand({transitionToDedicatedConfigServer: 1});
+ if (!res.ok && res.code === ErrorCodes.ShardNotFound) {
+ // If the config server primary steps down right after removing the config.shards
+ // doc for the shard but before responding with "state": "completed", the mongos
+ // would retry the _configsvrTransitionToDedicatedConfigServer command against the
+ // new config server primary, which would not find the removed shard in its
+ // ShardRegistry if it has done a ShardRegistry reload after the config.shards doc
+ // for the shard was removed. This would cause the command to fail with
+ // ShardNotFound.
+ return true;
+ }
+ assert.commandWorked(res);
+ return res.state == 'completed';
+ }, "failed to transition to dedicated config server within " + timeout + "ms", timeout);
+ }
+
return {
isEnabledIgnoringFCV,
+ transitionToDedicatedConfigServer,
};
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 3bc524550a5..6f8e6d8ae49 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -39,15 +39,20 @@ const assertAddShardFailed = function(res, shardName) {
// If a shard name was specified in the addShard, make sure no shard with its name shows up
// in config.shards.
if (shardName) {
- assert.eq(
- null,
- st.s.getDB('config').shards.findOne({_id: shardName}),
- "addShard for " + shardName + " reported failure, but shard shows up in config.shards");
+ if (TestData.catalogShard && shardName === "config") {
+ // In catalog shard mode there's always an entry for config for the config server.
+ assert.neq(null, st.s.getDB('config').shards.findOne({_id: shardName}));
+ } else {
+ assert.eq(null,
+ st.s.getDB('config').shards.findOne({_id: shardName}),
+ "addShard for " + shardName +
+ " reported failure, but shard shows up in config.shards");
+ }
}
};
const st = new ShardingTest({
- shards: 0,
+ shards: TestData.catalogShard ? 1 : 0,
mongos: 1,
});
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index 0f826aab738..adfc574be99 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -31,6 +31,14 @@ assert.commandWorked(mongos.adminCommand(
assert.commandWorked(mongos.adminCommand(
{moveChunk: coll + '', find: {_id: 0}, to: st.shard0.shardName, _waitForDelete: true}));
+// Guarantee the sessions collection chunk isn't on shard1.
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: "config.system.sessions",
+ find: {_id: 0},
+ to: st.shard0.shardName,
+ _waitForDelete: true
+}));
+
// Drop and re-add shard with the same name but a new host.
removeShard(st, st.shard1.shardName);
diff --git a/jstests/sharding/addshard6.js b/jstests/sharding/addshard6.js
index b69350e76b6..f04a91a4661 100644
--- a/jstests/sharding/addshard6.js
+++ b/jstests/sharding/addshard6.js
@@ -21,7 +21,7 @@ var assertAddShardFailed = function(res, shardName) {
};
var st = new ShardingTest({
- shards: 0,
+ shards: TestData.catalogShard ? 1 : 0,
mongos: 1,
});
diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
index e60c35ee15b..7ba54f04eba 100644
--- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
+++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js
@@ -1,5 +1,7 @@
/**
* Shuts down config server and shard replica set nodes one by one and ensures correct behaviour.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
// Checking UUID and index consistency involves talking to the config servers, which are shut down
diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js
index d8c8d8aeb82..e96331904a5 100644
--- a/jstests/sharding/allow_partial_results.js
+++ b/jstests/sharding/allow_partial_results.js
@@ -1,6 +1,8 @@
/**
* Tests that the 'allowPartialResults' option to find is respected, and that aggregation does not
* accept the 'allowPartialResults' option.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
// This test shuts down a shard.
diff --git a/jstests/sharding/allow_partial_results_nshards.js b/jstests/sharding/allow_partial_results_nshards.js
index 74b1b72c91b..ebad306982e 100644
--- a/jstests/sharding/allow_partial_results_nshards.js
+++ b/jstests/sharding/allow_partial_results_nshards.js
@@ -4,6 +4,7 @@
* @tags: [
* requires_replication,
* requires_sharding,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js
index adf23a2c3c8..72dc63cde94 100644
--- a/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js
+++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js
@@ -1,7 +1,7 @@
/**
* Tests support for the analyzeShardKey command.
*
- * @tags: [requires_fcv_63, featureFlagAnalyzeShardKey]
+ * @tags: [requires_fcv_63, featureFlagAnalyzeShardKey, temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
index 9ad086d466d..2e1a23f9a0c 100644
--- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
+++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js
@@ -98,7 +98,11 @@ function testExistingCollection(conn, ns) {
configSecondaries.forEach(node => {
testNotSupported(node, ErrorCodes.NotWritablePrimary);
});
- testNotSupported(shard0Primary, ErrorCodes.IllegalOperation);
+ if (!TestData.catalogShard) {
+ // If there's a catalog shard, shard0 will be the config server and can accept
+ // configureQueryAnalyzer.
+ testNotSupported(shard0Primary, ErrorCodes.IllegalOperation);
+ }
shard0Secondaries.forEach(node => {
testNotSupported(node, ErrorCodes.NotWritablePrimary);
});
diff --git a/jstests/sharding/analyze_shard_key/refresh_sample_rates_sharded.js b/jstests/sharding/analyze_shard_key/refresh_sample_rates_sharded.js
index 3c7e17a20f0..8dd73a0ddb1 100644
--- a/jstests/sharding/analyze_shard_key/refresh_sample_rates_sharded.js
+++ b/jstests/sharding/analyze_shard_key/refresh_sample_rates_sharded.js
@@ -2,7 +2,7 @@
* Tests that the _refreshQueryAnalyzerConfiguration command is only supported on the config
* server's primary and that it returns correct sample rates.
*
- * @tags: [requires_fcv_63, featureFlagAnalyzeShardKey]
+ * @tags: [requires_fcv_63, featureFlagAnalyzeShardKey, temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/arbiters_do_not_use_cluster_time.js b/jstests/sharding/arbiters_do_not_use_cluster_time.js
index 70f3469679f..2874649f3f6 100644
--- a/jstests/sharding/arbiters_do_not_use_cluster_time.js
+++ b/jstests/sharding/arbiters_do_not_use_cluster_time.js
@@ -1,5 +1,8 @@
/**
* Tests that arbiters do not gossip clusterTime or operationTime.
+ *
+ * A config server can't have arbiter nodes.
+ * @tags: [catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index b64b36efb94..5e6b9f35717 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -52,7 +52,7 @@ function getShardName(rsTest) {
var s = new ShardingTest({
name: "auth",
mongos: 1,
- shards: 0,
+ shards: TestData.catalogShard ? 1 : 0,
other: {keyFile: "jstests/libs/key1", chunkSize: 1, enableAutoSplit: false},
});
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index d60590e53b9..145686a796f 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -23,6 +23,12 @@ var st = new ShardingTest({
other: {keyFile: 'jstests/libs/key1', useHostname: false, chunkSize: 2},
});
+// This test relies on shard1 having no chunks in config.system.sessions.
+authutil.asCluster(st.s, "jstests/libs/key1", function() {
+ assert.commandWorked(st.s.adminCommand(
+ {moveChunk: "config.system.sessions", find: {_id: 0}, to: st.shard0.shardName}));
+});
+
var mongos = st.s;
var adminDB = mongos.getDB('admin');
var configDB = mongos.getDB('config');
@@ -50,8 +56,12 @@ var authenticatedConn = new Mongo(mongos.host);
authenticatedConn.getDB('admin').auth(rwUser, password);
// Add user to shards to prevent localhost connections from having automatic full access
-st.rs0.getPrimary().getDB('admin').createUser(
- {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+if (!TestData.catalogShard) {
+ // In catalog shard mode, the first shard is the config server, so the user we made via mongos
+ // already used up this shard's localhost bypass.
+ st.rs0.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+}
st.rs1.getPrimary().getDB('admin').createUser(
{user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
@@ -213,7 +223,8 @@ var checkAdminOps = function(hasAuth) {
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandSucceeded(adminDB, {hello: 1});
checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- var chunk = findChunksUtil.findOneChunkByNs(configDB, 'test.foo', {shard: st.rs0.name});
+ var chunk =
+ findChunksUtil.findOneChunkByNs(configDB, 'test.foo', {shard: st.shard0.shardName});
checkCommandSucceeded(
adminDB,
{moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
diff --git a/jstests/sharding/auth_secondaryok_routing.js b/jstests/sharding/auth_secondaryok_routing.js
index 3a7b1771141..cdf3a5ce643 100644
--- a/jstests/sharding/auth_secondaryok_routing.js
+++ b/jstests/sharding/auth_secondaryok_routing.js
@@ -55,10 +55,13 @@ var nodeCount = replTest.nodes.length;
var adminDB = mongos.getDB('admin');
adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
adminDB.auth('user', 'password');
-var priAdminDB = replTest.getPrimary().getDB('admin');
-replTest.getPrimary().waitForClusterTime(60);
-priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 30000});
+if (!TestData.catalogShard) {
+ // In catalog shard mode, creating this user above also created it on the first shard.
+ var priAdminDB = replTest.getPrimary().getDB('admin');
+ replTest.getPrimary().waitForClusterTime(60);
+ priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 30000});
+}
coll.drop();
coll.setSecondaryOk();
diff --git a/jstests/sharding/auth_sharding_cmd_metadata.js b/jstests/sharding/auth_sharding_cmd_metadata.js
index 548f65ee01a..2219815bf5d 100644
--- a/jstests/sharding/auth_sharding_cmd_metadata.js
+++ b/jstests/sharding/auth_sharding_cmd_metadata.js
@@ -29,7 +29,15 @@ const shardAdminDB = st.rs0.getPrimary().getDB('admin');
const shardTestDB = st.rs0.getPrimary().getDB('test');
// ConfigOpTime can't be advanced from external clients
+if (TestData.catalogShard) {
+ // We've already used up the localhost bypass in catalog shard mode, so we have to log in to
+ // create the user below.
+ shardAdminDB.auth('foo', 'bar');
+}
shardAdminDB.createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles});
+if (TestData.catalogShard) {
+ shardAdminDB.logout();
+}
shardAdminDB.auth('user', 'pwd');
const newTimestamp = Timestamp(getConfigOpTime().getTime() + 1000, 0);
assert.commandWorked(shardTestDB.runCommand({ping: 1, $configTime: newTimestamp}));
diff --git a/jstests/sharding/awaitable_hello_primary_failures.js b/jstests/sharding/awaitable_hello_primary_failures.js
index 09ff7aa8f5a..fdc43ad07fb 100644
--- a/jstests/sharding/awaitable_hello_primary_failures.js
+++ b/jstests/sharding/awaitable_hello_primary_failures.js
@@ -1,6 +1,8 @@
/**
* Test to assert that the RSM behaves correctly when contacting the primary node fails in various
* ways.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
// Checking UUID consistency and orphans involves talking to a shard node, which in this test is
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index 9b53182ef1e..8ab7c585147 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -1,5 +1,6 @@
/**
* Test of complex sharding initialization
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index d2b0a2ddfd9..b7a8c792dbf 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -14,7 +14,7 @@ TestData.skipCheckShardFilteringMetadata = true;
(function() {
"use strict";
-var st = new ShardingTest({shards: 2});
+var st = new ShardingTest({shards: 2, config: 3});
jsTest.log("Starting sharding batch write tests...");
diff --git a/jstests/sharding/change_stream_error_label.js b/jstests/sharding/change_stream_error_label.js
index d087152ca54..d2dad6a061f 100644
--- a/jstests/sharding/change_stream_error_label.js
+++ b/jstests/sharding/change_stream_error_label.js
@@ -4,6 +4,7 @@
* @tags: [
* requires_sharding,
* uses_change_streams,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/change_stream_no_shards.js b/jstests/sharding/change_stream_no_shards.js
index 901473e221f..e3ad7921b82 100644
--- a/jstests/sharding/change_stream_no_shards.js
+++ b/jstests/sharding/change_stream_no_shards.js
@@ -1,6 +1,9 @@
/**
* Test that running a $changeStream aggregation on a cluster with no shards returns an empty result
* set with a cursorID of zero.
+ *
+ * Requires no shards so there can't be a catalog shard.
+ * @tags: [catalog_shard_incompatible]
*/
(function() {
const st = new ShardingTest({shards: 0});
@@ -36,4 +39,4 @@ assert.docEq([], nonCsCmdRes.cursor.firstBatch);
assert.eq(nonCsCmdRes.cursor.id, 0);
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js
index a94f9187b7b..a3775770129 100644
--- a/jstests/sharding/change_streams_primary_shard_unaware.js
+++ b/jstests/sharding/change_streams_primary_shard_unaware.js
@@ -3,11 +3,13 @@
//
// This test triggers a compiler bug that causes a crash when compiling with optimizations on, see
// SERVER-36321.
+//
// @tags: [
// denylist_from_rhel_67_s390x,
// requires_majority_read_concern,
// requires_persistence,
// uses_change_streams,
+// temporary_catalog_shard_incompatible,
// ]
(function() {
"use strict";
diff --git a/jstests/sharding/check_metadata_consistency.js b/jstests/sharding/check_metadata_consistency.js
index 8fc02e77dde..eae7b9ccbbe 100644
--- a/jstests/sharding/check_metadata_consistency.js
+++ b/jstests/sharding/check_metadata_consistency.js
@@ -1,7 +1,8 @@
/*
* Tests to validate the correct behaviour of checkMetadataConsistency command.
*
- * @tags: [featureFlagCheckMetadataConsistency]
+ * TODO SERVER-74445: Fix cluster level checkMetadataConsistency command with a catalog shard.
+ * @tags: [featureFlagCheckMetadataConsistency, temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/chunks_onCurrentShardSince.js b/jstests/sharding/chunks_onCurrentShardSince.js
index 70ce6a5382a..97ef8e010d3 100644
--- a/jstests/sharding/chunks_onCurrentShardSince.js
+++ b/jstests/sharding/chunks_onCurrentShardSince.js
@@ -154,9 +154,11 @@ const chunksColl = st.config.chunks;
const testDB = st.s.getDB(jsTestName());
/* Perform tests */
-upgradeFCVTest(st, chunksColl, testDB);
-moveAndMergeChunksTest(st, chunksColl, testDB);
-splitChunksTest(st, chunksColl, testDB);
+if (!TestData.catalogShard) {
+ upgradeFCVTest(st, chunksColl, testDB);
+ moveAndMergeChunksTest(st, chunksColl, testDB);
+ splitChunksTest(st, chunksColl, testDB);
+}
st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index cb4490ec00b..dd6b384a276 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -26,8 +26,13 @@ function assertUnauthorized(res, msg) {
var st = new ShardingTest({auth: true, other: {keyFile: 'jstests/libs/key1', useHostname: false}});
var shardAdmin = st.shard0.getDB('admin');
-shardAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
-shardAdmin.auth('admin', 'x');
+if (!TestData.catalogShard) {
+ // In catalog shard mode, this will create a user on the config server, which we already do
+ // below.
+ shardAdmin.createUser(
+ {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+ shardAdmin.auth('admin', 'x');
+}
var mongos = st.s0;
var mongosAdmin = mongos.getDB('admin');
@@ -42,7 +47,9 @@ assert.commandWorked(
mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
// cleanupOrphaned requires auth as admin user.
-assert.commandWorked(shardAdmin.logout());
+if (!TestData.catalogShard) {
+ assert.commandWorked(shardAdmin.logout());
+}
assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
var fooDB = st.shard0.getDB('foo');
diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js
index f0ed140eda0..0d198472609 100644
--- a/jstests/sharding/clone_catalog_data.js
+++ b/jstests/sharding/clone_catalog_data.js
@@ -164,15 +164,24 @@
const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st);
- // Check that the command fails when attempting to run on a config server that doesn't support
- // catalog shard mode.
- if (isCatalogShardEnabled) {
+ if (TestData.catalogShard) {
+ // The config server is a shard and already has collections for the database.
+ assert.commandFailedWithCode(st.configRS.getPrimary().adminCommand({
+ _shardsvrCloneCatalogData: 'test',
+ from: fromShard.host,
+ writeConcern: {w: "majority"}
+ }),
+ ErrorCodes.NamespaceExists);
+ } else if (isCatalogShardEnabled) {
+ // The config server is dedicated but supports catalog shard mode, so it can accept shaded
+ // commands.
assert.commandWorked(st.configRS.getPrimary().adminCommand({
_shardsvrCloneCatalogData: 'test',
from: fromShard.host,
writeConcern: {w: "majority"}
}));
} else {
+ // A dedicated non-catalog shard supporting config server cannot run the command.
assert.commandFailedWithCode(st.configRS.getPrimary().adminCommand({
_shardsvrCloneCatalogData: 'test',
from: fromShard.host,
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index c34b3d40d59..992b2a6748a 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -9,7 +9,8 @@
*
* This test is labeled resource intensive because its total io_write is 70MB compared to a median
* of 5MB across all sharding tests in wiredTiger.
- * @tags: [resource_intensive]
+ *
+ * @tags: [resource_intensive, temporary_catalog_shard_incompatible]
*/
load('jstests/libs/write_concern_util.js');
load('jstests/multiVersion/libs/auth_helpers.js');
diff --git a/jstests/sharding/compact_structured_encryption_data_coordinator.js b/jstests/sharding/compact_structured_encryption_data_coordinator.js
index fac06a85889..ab933bbcef6 100644
--- a/jstests/sharding/compact_structured_encryption_data_coordinator.js
+++ b/jstests/sharding/compact_structured_encryption_data_coordinator.js
@@ -1,5 +1,5 @@
// Basic test that the CompactStructuredEncryptionDataCoordinator runs.
-// @tags: [requires_sharding,requires_fcv_60]
+// @tags: [requires_sharding,requires_fcv_60, temporary_catalog_shard_incompatible]
(function() {
'use strict';
diff --git a/jstests/sharding/compound_hashed_shard_key_presplitting.js b/jstests/sharding/compound_hashed_shard_key_presplitting.js
index 6fdb8abc001..d288eba14b0 100644
--- a/jstests/sharding/compound_hashed_shard_key_presplitting.js
+++ b/jstests/sharding/compound_hashed_shard_key_presplitting.js
@@ -4,6 +4,7 @@
*
* @tags: [
* multiversion_incompatible,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
@@ -350,4 +351,4 @@ assert.commandWorked(db.adminCommand(
checkValidChunks(db.coll, shardKey, [6, 2, 1]);
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 135249ee3e1..c14e60f7269 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -13,6 +13,7 @@ TestData.skipCheckShardFilteringMetadata = true;
var st = new ShardingTest({
shards: 1,
+ config: 3,
other: {
c0: {}, // Make sure 1st config server is primary
c1: {rsConfig: {priority: 0}},
@@ -37,9 +38,18 @@ assert.neq(null, mongos2);
var testOps = function(mongos) {
jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
mongos);
- var initialCount = mongos.getDB('test').foo.count();
- assert.commandWorked(mongos.getDB('test').foo.insert({a: 1}));
- assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+ if (TestData.catalogShard) {
+ // In catalog shard mode there's also only one shard node up with no primary, so just verify
+ // we can still do ops on a secondary that don't require metadata.
+ mongos.setSecondaryOk(true);
+ assert.eq(1, mongos.getDB('test').foo.count());
+ mongos.setSecondaryOk(false);
+ } else {
+ var initialCount = mongos.getDB('test').foo.count();
+ // In catalog shard mode there's no primary.
+ assert.commandWorked(mongos.getDB('test').foo.insert({a: 1}));
+ assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+ }
assert.throws(function() {
mongos.getDB('config').shards.findOne();
diff --git a/jstests/sharding/config_settings_schema_upgrade_downgrade.js b/jstests/sharding/config_settings_schema_upgrade_downgrade.js
index 6562cc9b94b..865ff3214cc 100644
--- a/jstests/sharding/config_settings_schema_upgrade_downgrade.js
+++ b/jstests/sharding/config_settings_schema_upgrade_downgrade.js
@@ -4,7 +4,12 @@
* Tests that a schema is added to the config.settings collection on upgrade and removed on
* downgrade.
*
- * @tags: [multiversion_incompatible, featureFlagConfigSettingsSchema, does_not_support_stepdowns]
+ * @tags: [
+ * multiversion_incompatible,
+ * featureFlagConfigSettingsSchema,
+ * does_not_support_stepdowns,
+ * temporary_catalog_shard_incompatible,
+ * ]
*/
(function() {
'use strict';
diff --git a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
index 74b82be8344..60a20c63197 100644
--- a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
+++ b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
@@ -1,6 +1,7 @@
/**
* Verifies creating the logical sessions collection TTL index retries on stale version errors.
* @tags: [
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index 09189821d13..a34710be3e5 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -1,7 +1,7 @@
/**
* Tests for the connPoolStats command.
*
- * @tags: [requires_fcv_63]
+ * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible]
*/
load("jstests/libs/fail_point_util.js");
load("jstests/libs/conn_pool_helpers.js");
diff --git a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js
index 6c5933c381c..c49b295d7ba 100644
--- a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js
+++ b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js
@@ -307,7 +307,7 @@ checkCRUDCommands(rst0.getPrimary().getDB(dbName));
checkDDLCommands(rst0.getPrimary().getDB(DDLDbName));
let st = new ShardingTest({
- shards: 0,
+ shards: TestData.catalogShard ? 1 : 0,
mongos: 1,
});
diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js
index 340580b5dbf..b26e9e94931 100644
--- a/jstests/sharding/convert_to_and_from_sharded.js
+++ b/jstests/sharding/convert_to_and_from_sharded.js
@@ -1,7 +1,7 @@
/**
* Test that a replica set member can process basic CRUD operations after switching from being
* a shardsvr and back to non shardsvr.
- * @tags: [requires_persistence]
+ * @tags: [requires_persistence, temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index d73a4198e7e..b127273b05e 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -13,7 +13,8 @@ TestData.skipCheckShardFilteringMetadata = true;
(function() {
"use strict";
-var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+var st =
+ new ShardingTest({name: 'sync_conn_cmd', shards: TestData.catalogShard ? 1 : 0, config: 3});
st.s.setSecondaryOk();
var configDB = st.config;
diff --git a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
index a3a80d47a4b..22dcf02724a 100644
--- a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
+++ b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js
@@ -67,7 +67,9 @@ moveChunkHangAtStep5FailPoint.wait();
donorReplSetTest.freeze(donorPrimary);
moveChunkHangAtStep5FailPoint.off();
-moveChunkThread.join();
+if (!TestData.catalogShard) {
+ moveChunkThread.join();
+}
metadataRefreshFailPoint.wait();
donorReplSetTest.unfreeze(donorPrimary);
@@ -83,6 +85,15 @@ assert.eq(1, getNumRangeDeletionDocs(recipientShard, ns));
testColl.drop();
metadataRefreshFailPoint.off();
+if (TestData.catalogShard) {
+ // In catalog shard mode, the migration won't finish until after we finish migration recovery,
+ // which is blocked by the fail point until we disable it above.
+ //
+ // SERVER-74446: Investigate why this only happens in catalog shard mode and if its safe to
+ // ignore by changing the test.
+ moveChunkThread.join();
+}
+
jsTest.log("Wait for the recipient to delete the range deletion task doc");
assert.soon(() => {
return 0 == getNumRangeDeletionDocs(recipientShard, ns);
diff --git a/jstests/sharding/exhaust_hello_topology_changes.js b/jstests/sharding/exhaust_hello_topology_changes.js
index b3ba45d8861..6e5647bc5d6 100644
--- a/jstests/sharding/exhaust_hello_topology_changes.js
+++ b/jstests/sharding/exhaust_hello_topology_changes.js
@@ -6,7 +6,7 @@
* hit. A replica set node should send a response to the mongos as soon as it processes a topology
* change, so "immediately"/"quickly" can vary - we specify 5 seconds in this test ('timeoutMS').
*
- * @tags: [requires_streamable_rsm]
+ * @tags: [requires_streamable_rsm, temporary_catalog_shard_incompatible]
*/
// This test shuts down a shard's node and because of this consistency checking
diff --git a/jstests/sharding/global_index_sharding_catalog_API.js b/jstests/sharding/global_index_sharding_catalog_API.js
index 8c631ac79c4..40cb0de806b 100644
--- a/jstests/sharding/global_index_sharding_catalog_API.js
+++ b/jstests/sharding/global_index_sharding_catalog_API.js
@@ -1,7 +1,11 @@
/**
* Tests that the global indexes API correctly creates and drops an index from the catalog.
*
- * @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog]
+ * @tags: [
+ * multiversion_incompatible,
+ * featureFlagGlobalIndexesShardingCatalog,
+ * temporary_catalog_shard_incompatible,
+ * ]
*/
(function() {
diff --git a/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js b/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js
index c172f40d855..75782689a39 100644
--- a/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js
+++ b/jstests/sharding/global_index_sharding_catalog_collection_upgrade_downgrade.js
@@ -3,7 +3,7 @@
* upgrading.
*
* @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog,
- * featureFlagDowngradingToUpgrading]
+ * featureFlagDowngradingToUpgrading, temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/health_monitor/config_server_health_observer_crash.js b/jstests/sharding/health_monitor/config_server_health_observer_crash.js
index 51d20521b2f..bfa79366944 100644
--- a/jstests/sharding/health_monitor/config_server_health_observer_crash.js
+++ b/jstests/sharding/health_monitor/config_server_health_observer_crash.js
@@ -44,6 +44,7 @@ var st = new ShardingTest({
shards: 1,
mongos: [mongosParams, {}],
other: {useBridge: true},
+ config: 3,
});
assert.commandWorked(st.s0.adminCommand(
diff --git a/jstests/sharding/implicit_default_write_concern_add_shard.js b/jstests/sharding/implicit_default_write_concern_add_shard.js
index fdb095fe264..2797ea2ec10 100644
--- a/jstests/sharding/implicit_default_write_concern_add_shard.js
+++ b/jstests/sharding/implicit_default_write_concern_add_shard.js
@@ -1,6 +1,9 @@
/**
* Tests adding shard to sharded cluster will fail if the implicitDefaultWriteConcern is
* w:1 and CWWC is not set.
+ *
+ * For some reason fails in the check shard filtering metadata hook when shutting down the cluster.
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
@@ -38,7 +41,7 @@ function testAddShard(CWWCSet, isPSASet, fixAddShard) {
shardServer.initiate();
const st = new ShardingTest({
- shards: 0,
+ shards: TestData.catalogShard ? 1 : 0,
mongos: 1,
});
var admin = st.getDB('admin');
diff --git a/jstests/sharding/index_and_collection_option_propagation.js b/jstests/sharding/index_and_collection_option_propagation.js
index 0b37e7ecd82..425f4c393ff 100644
--- a/jstests/sharding/index_and_collection_option_propagation.js
+++ b/jstests/sharding/index_and_collection_option_propagation.js
@@ -5,6 +5,9 @@
* - If called on a sharded collection, the request is broadcast to shards with chunks.
*
* This test verifies this behavior.
+ *
+ * Shuts down shard0, which also shuts down the config server. See if the test can be reworked.
+ * @tags: [temporary_catalog_shard_incompatible]
*/
// This test shuts down a shard's node and because of this consistency checking
diff --git a/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js b/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js
index f0d463bd290..2a06943ee99 100644
--- a/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js
+++ b/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js
@@ -9,7 +9,7 @@
load('jstests/libs/ingress_handshake_metrics_helpers.js');
let runTest = (connectionHealthLoggingOn) => {
- let st = new ShardingTest({shards: 0, other: {auth: ''}});
+ let st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, other: {auth: ''}});
let conn = st.s;
jsTestLog("Setting up users and test data.");
diff --git a/jstests/sharding/internal_txns/end_sessions.js b/jstests/sharding/internal_txns/end_sessions.js
index 59e1a144480..ff08aa34c43 100644
--- a/jstests/sharding/internal_txns/end_sessions.js
+++ b/jstests/sharding/internal_txns/end_sessions.js
@@ -42,7 +42,7 @@ const parentLsid = {
id: sessionUUID
};
-const kInternalTxnNumber = NumberLong(0);
+const kInternalTxnNumber = NumberLong(50123);
let numTransactionsCollEntries = 0;
let numImageCollEntries = 0;
@@ -66,9 +66,12 @@ assert.commandWorked(testDB.adminCommand(
{commitTransaction: 1, lsid: childLsid0, txnNumber: kInternalTxnNumber, autocommit: false}));
numTransactionsCollEntries++;
-assert.eq(numTransactionsCollEntries, transactionsCollOnPrimary.find().itcount());
+// Use a filter to skip transactions from internal metadata operations if we're running in catalog
+// shard mode.
+assert.eq(numTransactionsCollEntries,
+ transactionsCollOnPrimary.find({txnNum: kInternalTxnNumber}).itcount());
-const parentTxnNumber1 = NumberLong(1);
+const parentTxnNumber1 = NumberLong(55123);
assert.commandWorked(testDB.runCommand({
update: kCollName,
@@ -99,7 +102,9 @@ assert.commandWorked(testDB.adminCommand(
numTransactionsCollEntries++;
numImageCollEntries++;
-assert.eq(numTransactionsCollEntries, transactionsCollOnPrimary.find().itcount());
+assert.eq(numTransactionsCollEntries,
+ transactionsCollOnPrimary.find({txnNum: {$in: [kInternalTxnNumber, parentTxnNumber1]}})
+ .itcount());
assert.eq(numImageCollEntries, imageCollOnPrimary.find().itcount());
assert.commandWorked(shard0Primary.adminCommand({refreshLogicalSessionCacheNow: 1}));
@@ -115,8 +120,9 @@ jsTest.log(
"Verify that the config.transactions entries and config.image_collection got reaped " +
"since the config.system.sessions entry for the parent session had already been deleted");
assert.eq(0,
- transactionsCollOnPrimary.find().itcount(),
- tojson(transactionsCollOnPrimary.find().toArray()));
+ transactionsCollOnPrimary.find({txnNum: {$in: [kInternalTxnNumber, parentTxnNumber1]}})
+ .itcount(),
+ tojson(transactionsCollOnPrimary.find({txnNum: kInternalTxnNumber}).toArray()));
assert.eq(0, imageCollOnPrimary.find().itcount());
st.stop();
diff --git a/jstests/sharding/internal_txns/internal_client_restrictions.js b/jstests/sharding/internal_txns/internal_client_restrictions.js
index 7db1f28375a..fcb93eaf1e7 100644
--- a/jstests/sharding/internal_txns/internal_client_restrictions.js
+++ b/jstests/sharding/internal_txns/internal_client_restrictions.js
@@ -78,8 +78,8 @@ jsTestLog("Verify internal session and txnRetryCounter require internal privileg
// Auth as a user with enough privileges to read from any collection, but not to identify as an
// internal client.
const shardDB = st.rs0.getPrimary().getDB("admin");
-shardDB.createUser({user: "shardAdmin", pwd: "password", roles: jsTest.adminUserRoles});
-assert(shardDB.auth("shardAdmin", "password"));
+shardDB.createUser({user: "admin", pwd: "password", roles: jsTest.adminUserRoles});
+assert(shardDB.auth("admin", "password"));
verifyTxnRetryCounterForExternalClients(shardDB, {expectFail: true});
verifyInternalSessionsForExternalClients(shardDB, {expectFail: true});
@@ -90,7 +90,10 @@ jsTestLog("Verify internal session and txnRetryCounter require internal privileg
// Auth as a user with enough privileges to read from any collection, but not to identify as an
// internal client.
const mongosDB = st.s.getDB("admin");
-mongosDB.createUser({user: "admin", pwd: "password", roles: jsTest.adminUserRoles});
+if (!TestData.catalogShard) {
+ // In catalog shard mode, the user made on the shard above is also a cluster global user.
+ mongosDB.createUser({user: "admin", pwd: "password", roles: jsTest.adminUserRoles});
+}
assert(mongosDB.auth("admin", "password"));
verifyTxnRetryCounterForExternalClients(mongosDB, {expectFail: true});
diff --git a/jstests/sharding/internal_txns/kill_sessions.js b/jstests/sharding/internal_txns/kill_sessions.js
index b0afc95cfb6..605d2d9079b 100644
--- a/jstests/sharding/internal_txns/kill_sessions.js
+++ b/jstests/sharding/internal_txns/kill_sessions.js
@@ -1,7 +1,7 @@
/*
* Tests running killSessions to kill internal sessions on both mongos and mongod.
*
- * @tags: [requires_fcv_60, uses_transactions]
+ * @tags: [requires_fcv_60, uses_transactions, temporary_catalog_shard_incompatible]
*/
(function() {
'use strict';
diff --git a/jstests/sharding/internal_txns/partial_index.js b/jstests/sharding/internal_txns/partial_index.js
index b9c5462aaa4..d2c5eee5946 100644
--- a/jstests/sharding/internal_txns/partial_index.js
+++ b/jstests/sharding/internal_txns/partial_index.js
@@ -93,6 +93,15 @@ function runTest(st, alwaysCreateFeatureFlagEnabled) {
});
}
+ if (TestData.catalogShard) {
+ // A config server does internal txns, so clear the transaction table to make sure it's
+ // empty before dropping the index, otherwise it can't be recreated automatically. Disable
+ // implicit sessions we can directly write to config.transactions.
+ TestData.disableImplicitSessions = true;
+ assert.commandWorked(st.rs0.getPrimary().getCollection(kConfigTxnNs).remove({}));
+ TestData.disableImplicitSessions = false;
+ }
+
// If the collection is empty and the index does not exist, we should always create the partial
// index on stepup,
indexRecreationTest(true /* expectRecreateAfterDrop */);
diff --git a/jstests/sharding/internal_txns/sessions.js b/jstests/sharding/internal_txns/sessions.js
index 1c3bfc41fa6..27f890f14d8 100644
--- a/jstests/sharding/internal_txns/sessions.js
+++ b/jstests/sharding/internal_txns/sessions.js
@@ -1,7 +1,7 @@
/*
* Tests basic support for internal sessions.
*
- * @tags: [requires_fcv_60, uses_transactions]
+ * @tags: [requires_fcv_60, uses_transactions, temporary_catalog_shard_incompatible]
*/
(function() {
'use strict';
diff --git a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
index ffc349fa76b..feebe8ccc67 100644
--- a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
+++ b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
@@ -5,6 +5,7 @@
* @tags: [
* # The SBE plan cache was enabled by default in 6.3.
* requires_fcv_63,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/key_rotation.js b/jstests/sharding/key_rotation.js
index 2c3dafebea3..ee49f7cc7f6 100644
--- a/jstests/sharding/key_rotation.js
+++ b/jstests/sharding/key_rotation.js
@@ -5,7 +5,8 @@
* - manual key rotation is possible by deleting existing keys and restarting the cluster.
*
* Manual key rotation requires restarting a shard, so a persistent storage engine is necessary.
- * @tags: [requires_persistence]
+ * Shuts down shard0, which also shuts down the config server. See if the test can be reworked.
+ * @tags: [requires_persistence, temporary_catalog_shard_incompatible]
*/
// This test restarts a shard replica set, potentially changing the primary node, while
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index b17f32e1fb2..2327769d539 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -1,6 +1,8 @@
/**
* Test that mongos times out when the config server replica set only contains nodes that
* are behind the majority opTime.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
load("jstests/libs/write_concern_util.js");
diff --git a/jstests/sharding/libs/chunk_bounds_util.js b/jstests/sharding/libs/chunk_bounds_util.js
index 40d11d00d0a..d891e98827b 100644
--- a/jstests/sharding/libs/chunk_bounds_util.js
+++ b/jstests/sharding/libs/chunk_bounds_util.js
@@ -80,8 +80,9 @@ var chunkBoundsUtil = (function() {
*/
let _getShard = function(st, shardName) {
for (let i = 0; i < st._connections.length; i++) {
- if (st["rs" + i].name == shardName) {
- return st._connections[i];
+ const conn = st._connections[i];
+ if (conn.shardName === shardName) {
+ return conn;
}
}
};
diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js
index 19c240124d9..d4c7f183ccf 100644
--- a/jstests/sharding/libs/mongos_api_params_util.js
+++ b/jstests/sharding/libs/mongos_api_params_util.js
@@ -11,6 +11,7 @@ let MongosAPIParametersUtil = (function() {
load('jstests/sharding/libs/remove_shard_util.js');
load('jstests/sharding/libs/sharded_transactions_helpers.js');
load('jstests/libs/auto_retry_transaction_in_sharding.js');
+ load('jstests/libs/catalog_shard_util.js');
// TODO SERVER-50144 Remove this and allow orphan checking.
// This test calls removeShard which can leave docs in config.rangeDeletions in state "pending",
@@ -56,7 +57,8 @@ let MongosAPIParametersUtil = (function() {
["permittedInTxn", true],
["permittedOnShardedCollection", true],
["requiresShardedCollection", false],
- ["requiresCommittedReads", false]]) {
+ ["requiresCommittedReads", false],
+ ["requiresCatalogShardEnabled", false]]) {
if (testCase.hasOwnProperty(propertyName)) {
assert(typeof testCase[propertyName] === "boolean",
`${propertyName} must be a boolean: ${tojson(testCase)}`);
@@ -66,6 +68,7 @@ let MongosAPIParametersUtil = (function() {
}
assert(testCase.shardCommandName ? typeof (testCase.shardCommandName) === "string" : true);
+ assert(testCase.shardPrimary ? typeof (testCase.shardPrimary) === "function" : true);
assert(testCase.configServerCommandName
? typeof (testCase.configServerCommandName) === "string"
: true);
@@ -84,6 +87,13 @@ let MongosAPIParametersUtil = (function() {
assert.commandWorked(st.stopBalancer());
}
+ function awaitTransitionToDedicatedConfigServer() {
+ assert.commandWorked(st.startBalancer());
+ st.awaitBalancerRound();
+ CatalogShardUtil.transitionToDedicatedConfigServer(st);
+ assert.commandWorked(st.stopBalancer());
+ }
+
// Each test case is potentially run with any combination of API parameters, in
// sharded/unsharded collection, inside or outside of a multi-document transaction. The "db"
// database is dropped and recreated between test cases, so most tests don't need custom setUp
@@ -140,13 +150,32 @@ let MongosAPIParametersUtil = (function() {
runsAgainstAdminDb: true,
configServerCommandName: "_configsvrAddShard",
shardCommandName: "_addShard",
+ shardPrimary: () => {
+ return st.rs1.getPrimary();
+ },
permittedInTxn: false,
setUp: () => {
// Remove shard0 so we can add it back.
assert.commandWorked(st.s0.getDB("db").dropDatabase());
- awaitRemoveShard(st.shard0.shardName);
+ awaitRemoveShard(st.shard1.shardName);
},
- command: () => ({addShard: st.rs0.getURL()})
+ command: () => ({addShard: st.rs1.getURL()})
+ }
+ },
+ {
+ commandName: "transitionToCatalogShard",
+ run: {
+ inAPIVersion1: false,
+ runsAgainstAdminDb: true,
+ configServerCommandName: "_configsvrTransitionToCatalogShard",
+ permittedInTxn: false,
+ requiresCatalogShardEnabled: true,
+ setUp: () => {
+ // Remove shard0 so we can add it back.
+ assert.commandWorked(st.s0.getDB("db").dropDatabase());
+ awaitTransitionToDedicatedConfigServer();
+ },
+ command: () => ({transitionToCatalogShard: 1})
}
},
{
@@ -772,13 +801,13 @@ let MongosAPIParametersUtil = (function() {
context.thread.start();
const adminDb = st.s0.getDB("admin");
- jsTestLog(`Waiting for "find" on "${st.rs0.name}" ` +
+ jsTestLog(`Waiting for "find" on "${st.shard0.shardName}" ` +
`with comment ${uuidStr} in currentOp`);
assert.soon(() => {
const filter = {
"command.find": "collection",
"command.comment": uuidStr,
- shard: st.rs0.name
+ shard: st.shard0.shardName
};
const inprog = adminDb.currentOp(filter).inprog;
if (inprog.length === 1) {
@@ -1017,12 +1046,29 @@ let MongosAPIParametersUtil = (function() {
runsAgainstAdminDb: true,
configServerCommandName: "_configsvrRemoveShard",
permittedInTxn: false,
- command: () => ({removeShard: st.shard0.shardName}),
+ command: () => ({removeShard: st.shard1.shardName}),
cleanUp: () => {
// Wait for the shard to be removed completely before re-adding it.
- awaitRemoveShard(st.shard0.shardName);
+ awaitRemoveShard(st.shard1.shardName);
assert.commandWorked(st.s0.getDB("admin").runCommand(
- {addShard: st.rs0.getURL(), name: st.shard0.shardName}));
+ {addShard: st.rs1.getURL(), name: st.shard1.shardName}));
+ }
+ }
+ },
+ {
+ commandName: "transitionToDedicatedConfigServer",
+ run: {
+ inAPIVersion1: false,
+ runsAgainstAdminDb: true,
+ configServerCommandName: "_configsvrTransitionToDedicatedConfigServer",
+ permittedInTxn: false,
+ requiresCatalogShardEnabled: true,
+ command: () => ({transitionToDedicatedConfigServer: 1}),
+ cleanUp: () => {
+ // Wait for the shard to be removed completely before re-adding it.
+ awaitTransitionToDedicatedConfigServer(st.shard0.shardName);
+ assert.commandWorked(
+ st.s0.getDB("admin").runCommand({transitionToCatalogShard: 1}));
}
}
},
@@ -1412,6 +1458,8 @@ let MongosAPIParametersUtil = (function() {
assert.commandWorked(st.rs0.getPrimary().adminCommand({serverStatus: 1}))
.storageEngine.supportsCommittedReads;
+ const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st);
+
(() => {
// Validate test cases for all commands. Ensure there is at least one test case for every
// mongos command, and that the test cases are well formed.
@@ -1526,6 +1574,9 @@ let MongosAPIParametersUtil = (function() {
if (!supportsCommittedReads && runOrExplain.requiresCommittedReads)
continue;
+ if (!isCatalogShardEnabled && runOrExplain.requiresCatalogShardEnabled)
+ continue;
+
if (apiParameters.apiStrict && !runOrExplain.inAPIVersion1)
continue;
@@ -1553,7 +1604,8 @@ let MongosAPIParametersUtil = (function() {
st.s.getDB("db")["collection"].insert({_id: 0}, {writeConcern: {w: "majority"}}));
const configPrimary = st.configRS.getPrimary();
- const shardZeroPrimary = st.rs0.getPrimary();
+ const shardPrimary =
+ runOrExplain.shardPrimary ? runOrExplain.shardPrimary() : st.rs0.getPrimary();
const context = {apiParameters: apiParameters};
const commandDbName = runOrExplain.runsAgainstAdminDb ? "admin" : "db";
@@ -1577,7 +1629,7 @@ let MongosAPIParametersUtil = (function() {
Object.assign(Object.assign({}, commandBody), apiParameters);
assert.commandWorked(configPrimary.adminCommand({clearLog: "global"}));
- assert.commandWorked(shardZeroPrimary.adminCommand({clearLog: "global"}));
+ assert.commandWorked(shardPrimary.adminCommand({clearLog: "global"}));
const message =
`[${i + 1} of ${testInstances.length}]: command ${tojson(commandWithAPIParams)}` +
` ${shardedCollection ? "sharded" : "unsharded"},` +
@@ -1587,7 +1639,7 @@ let MongosAPIParametersUtil = (function() {
flushRoutersAndRefreshShardMetadata(st, {ns: "db.collection"});
jsTestLog(`Running ${message}`);
- setLogVerbosity([configPrimary, shardZeroPrimary, st.rs1.getPrimary()],
+ setLogVerbosity([configPrimary, st.rs0.getPrimary(), st.rs1.getPrimary()],
{"command": {"verbosity": 2}});
const res = context.db.runCommand(commandWithAPIParams);
@@ -1619,10 +1671,10 @@ let MongosAPIParametersUtil = (function() {
if (shardCommandName) {
jsTestLog(`Check for ${shardCommandName} in shard server's log`);
- checkPrimaryLog(shardZeroPrimary, shardCommandName, apiParameters);
+ checkPrimaryLog(shardPrimary, shardCommandName, apiParameters);
}
- setLogVerbosity([configPrimary, shardZeroPrimary, st.rs1.getPrimary()],
+ setLogVerbosity([configPrimary, st.rs0.getPrimary(), st.rs1.getPrimary()],
{"command": {"verbosity": 0}});
st.s0.getDB("db").runCommand({dropDatabase: 1});
diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js
index bfd1563fd3d..98724828edd 100644
--- a/jstests/sharding/linearizable_read_concern.js
+++ b/jstests/sharding/linearizable_read_concern.js
@@ -19,6 +19,8 @@
* document. This test is mainly trying to ensure that system behavior is
* reasonable when executing linearizable reads in a sharded cluster, so as to
* exercise possible (invalid) user behavior.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
load("jstests/replsets/rslib.js");
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index fe34dbe0aa7..7a9358c5a23 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,3 +1,4 @@
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
'use strict';
var test = new ShardingTest({shards: 1, mongos: 1, other: {chunkSize: 1}});
diff --git a/jstests/sharding/live_shard_logical_initial_sync.js b/jstests/sharding/live_shard_logical_initial_sync.js
index ffecb60d802..9fd41c33e3d 100644
--- a/jstests/sharding/live_shard_logical_initial_sync.js
+++ b/jstests/sharding/live_shard_logical_initial_sync.js
@@ -3,7 +3,8 @@
* shards using logical initial sync.
*
* We control our own failovers, and we also need the RSM to react reasonably quickly to those.
- * @tags: [does_not_support_stepdowns, requires_streamable_rsm]
+ * @tags: [does_not_support_stepdowns, requires_streamable_rsm,
+ * temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 3576e68c517..aa9668e252e 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -27,7 +27,9 @@ var createUser = function(mongo) {
};
var addUsersToEachShard = function(st) {
- for (var i = 0; i < numShards; i++) {
+ // In catalog shard mode skip the first shard because it is also the config server and will
+ // already have a user made on it through mongos.
+ for (var i = TestData.catalogShard ? 1 : 0; i < numShards; i++) {
print("============ adding a user to shard " + i);
var d = st["shard" + i];
d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
diff --git a/jstests/sharding/merge_all_chunks_on_shard.js b/jstests/sharding/merge_all_chunks_on_shard.js
index 402f5edb09f..b0c452fba33 100644
--- a/jstests/sharding/merge_all_chunks_on_shard.js
+++ b/jstests/sharding/merge_all_chunks_on_shard.js
@@ -5,6 +5,7 @@
* featureFlagAutoMerger,
* # Balancer is stopped when stepping down
* does_not_support_stepdowns,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/merge_with_chunk_migrations.js b/jstests/sharding/merge_with_chunk_migrations.js
index 461fe57cf8d..53b87cbafd2 100644
--- a/jstests/sharding/merge_with_chunk_migrations.js
+++ b/jstests/sharding/merge_with_chunk_migrations.js
@@ -1,5 +1,9 @@
// Tests that the $merge aggregation stage is resilient to chunk migrations in both the source and
// output collection during execution.
+//
+// Fails waiting for a specific aggregation in currentOp, which probably isn't working when we
+// change the cluster topology.
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
'use strict';
diff --git a/jstests/sharding/merge_with_drop_shard.js b/jstests/sharding/merge_with_drop_shard.js
index 41c5dafb105..c06f5866b40 100644
--- a/jstests/sharding/merge_with_drop_shard.js
+++ b/jstests/sharding/merge_with_drop_shard.js
@@ -1,5 +1,7 @@
// Tests that the $merge aggregation stage is resilient to drop shard in both the source and
// output collection during execution.
+//
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
'use strict';
diff --git a/jstests/sharding/migration_coordinator_abort_failover.js b/jstests/sharding/migration_coordinator_abort_failover.js
index d7d31a9ce0b..aca052bf42d 100644
--- a/jstests/sharding/migration_coordinator_abort_failover.js
+++ b/jstests/sharding/migration_coordinator_abort_failover.js
@@ -1,6 +1,11 @@
/**
* Tests that a donor resumes coordinating a migration if it fails over after creating the
* migration coordinator document but before deleting it.
+ *
+ * Assumes a donor stepdown will trigger a failover migration response, but if donor is catalog
+ * shard, it will trigger a full retry from mongos, which leads to a successful retry despite the
+ * original interrupted attempt correctly failing. See if the test can be reworked.
+ * @tags: [temporary_catalog_shard_incompatible]
*/
// This test induces failovers on shards.
diff --git a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
index 43f00e19537..1a39a9b43c3 100644
--- a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
+++ b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js
@@ -7,6 +7,7 @@
* does_not_support_stepdowns,
* # Require persistence to restart nodes
* requires_persistence,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/migration_recovers_unfinished_migrations.js b/jstests/sharding/migration_recovers_unfinished_migrations.js
index 75fbdf5dc63..7203e3540a6 100644
--- a/jstests/sharding/migration_recovers_unfinished_migrations.js
+++ b/jstests/sharding/migration_recovers_unfinished_migrations.js
@@ -7,6 +7,7 @@
* # that migration by sending a new `moveChunk` command to the donor shard causing the test to
* # hang.
* does_not_support_stepdowns,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/migration_server_status.js b/jstests/sharding/migration_server_status.js
index 3d643434e1b..f39efd75309 100644
--- a/jstests/sharding/migration_server_status.js
+++ b/jstests/sharding/migration_server_status.js
@@ -2,7 +2,7 @@
* Tests that serverStatus includes a migration status when called on the source shard of an active
* migration.
*
- * @tags: [requires_fcv_63]
+ * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible]
*/
load('./jstests/libs/chunk_manipulation_util.js');
diff --git a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
index 56485684825..b1a5c5c22d8 100644
--- a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
+++ b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js
@@ -56,7 +56,15 @@ adminDB.auth(adminUser.username, adminUser.password);
assert(st.s.getDB("admin").system.keys.count() >= 2);
let priRSConn = st.rs0.getPrimary().getDB("admin");
+if (TestData.catalogShard) {
+ // In catalog shard mode we've already used up the localhost exception on the first shard, so we
+ // have to auth to create the user below.
+ priRSConn.auth(adminUser.username, adminUser.password);
+}
priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]});
+if (TestData.catalogShard) {
+ priRSConn.logout();
+}
priRSConn.auth(rUser.username, rUser.password);
// use assert.soon since it's possible the shard primary may not have refreshed
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 7cd42e3e947..227d9855f24 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -1,4 +1,5 @@
// Tests that mongos and the shard discover changes to the shard's replica set membership.
+// @tags: [temporary_catalog_shard_incompatible]
load("jstests/replsets/rslib.js");
(function() {
diff --git a/jstests/sharding/move_chunk_interrupt_postimage.js b/jstests/sharding/move_chunk_interrupt_postimage.js
index 69537bb9ce0..ef45b888c0c 100644
--- a/jstests/sharding/move_chunk_interrupt_postimage.js
+++ b/jstests/sharding/move_chunk_interrupt_postimage.js
@@ -10,7 +10,7 @@ load("jstests/sharding/libs/create_sharded_collection_util.js");
load("jstests/libs/fail_point_util.js");
load('jstests/libs/parallel_shell_helpers.js');
-const st = new ShardingTest({mongos: 1, config: 1, shards: 2, rs: {nodes: 2}});
+const st = new ShardingTest({mongos: 1, shards: 2, rs: {nodes: 2}});
const interruptBeforeProcessingPrePostImageOriginatingOpFP =
configureFailPoint(st.rs1.getPrimary(), "interruptBeforeProcessingPrePostImageOriginatingOp");
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index aa20b4ead0a..bbae6cbe7c8 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -2,7 +2,7 @@
(function() {
'use strict';
-var st = new ShardingTest({shards: 0, mongos: 1});
+var st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, mongos: 1});
var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}});
var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}});
@@ -24,7 +24,7 @@ printjson(config.shards.find().toArray());
assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error adding a shard");
+assert.eq(TestData.catalogShard ? 3 : 2, config.shards.count(), "Error adding a shard");
assert.eq(rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
assert.eq(rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
@@ -34,7 +34,7 @@ assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}),
var res =
assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}), "failed to remove shard");
-assert.eq(1,
+assert.eq(TestData.catalogShard ? 2 : 1,
config.shards.count(),
"Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray()));
assert.eq(
@@ -44,7 +44,7 @@ assert.eq(
assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name}));
printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error re-adding a shard");
+assert.eq(TestData.catalogShard ? 3 : 2, config.shards.count(), "Error re-adding a shard");
assert.eq(
rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
assert.eq(
diff --git a/jstests/sharding/nonreplicated_uuids_on_shardservers.js b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
index 27836de0db1..5e97df80ed7 100644
--- a/jstests/sharding/nonreplicated_uuids_on_shardservers.js
+++ b/jstests/sharding/nonreplicated_uuids_on_shardservers.js
@@ -1,6 +1,6 @@
// SERVER-32255 This test ensures a node started with --shardsvr and added to a replica set receives
// UUIDs upon re-initiation.
-// @tags: [multiversion_incompatible]
+// @tags: [multiversion_incompatible, temporary_catalog_shard_incompatible]
(function() {
"use strict";
load("jstests/libs/check_uuids.js");
diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
index 080bc63057e..79c97676a34 100644
--- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js
+++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js
@@ -11,7 +11,7 @@ TestData.skipCheckingIndexesConsistentAcrossCluster = true;
TestData.skipCheckOrphans = true;
TestData.skipCheckShardFilteringMetadata = true;
-var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true});
+var st = new ShardingTest({shards: 2, mongos: 1, useBridge: true, config: 3});
var testDB = st.s.getDB('BlackHoleDB');
var configDB = st.s.getDB('config');
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 951556fbfdd..e6d8783b55b 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -103,7 +103,9 @@ config.getCollectionInfos().forEach(function(c) {
"system.indexBuilds",
"system.preimages",
"system.change_collection",
- "cache.chunks.config.system.sessions"
+ "cache.chunks.config.system.sessions",
+ "system.sessions",
+ "system.sharding_ddl_coordinators",
].includes(c.name)) {
return;
}
diff --git a/jstests/sharding/query/aggregation_currentop.js b/jstests/sharding/query/aggregation_currentop.js
index e9dc5f04328..af80a65af01 100644
--- a/jstests/sharding/query/aggregation_currentop.js
+++ b/jstests/sharding/query/aggregation_currentop.js
@@ -84,7 +84,11 @@ function createUsers(conn) {
}
// Create necessary users at both cluster and shard-local level.
-createUsers(shardConn);
+if (!TestData.catalogShard) {
+ // In catalog shard mode, the first shard is the config server, so creating the users via mongos
+ // below will also create them on the shard.
+ createUsers(shardConn);
+}
createUsers(mongosConn);
// Create a test database and some dummy data on rs0.
@@ -94,7 +98,7 @@ for (let i = 0; i < 5; i++) {
assert.commandWorked(clusterTestDB.test.insert({_id: i, a: i}));
}
-st.ensurePrimaryShard(clusterTestDB.getName(), shardRS.name);
+st.ensurePrimaryShard(clusterTestDB.getName(), st.shard0.shardName);
// Restarts a replset with a different set of parameters. Explicitly set the keyFile to null,
// since if ReplSetTest#stopSet sees a keyFile property, it attempts to auth before dbhash
@@ -417,7 +421,7 @@ function runCommonTests(conn, curOpSpec) {
if (isRemoteShardCurOp) {
assert.docEq(expectedStages, explainPlan.splitPipeline.shardsPart);
for (let i = 0; i < stParams.shards; i++) {
- let shardName = st["rs" + i].name;
+ let shardName = st["shard" + i].shardName;
assert.docEq(expectedStages, explainPlan.shards[shardName].stages);
}
} else if (isLocalMongosCurOp) {
@@ -474,6 +478,17 @@ assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}), ErrorCod
assert(clusterAdminDB.logout());
assert(clusterAdminDB.auth("user_inprog", "pwd"));
+const expectedOutput = TestData.catalogShard ?
+[
+ {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}},
+ {_id: {shard: "config", host: st.rs0.getPrimary().host}}
+] :
+[
+ {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
+ {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
+];
assert.eq(clusterAdminDB
.aggregate([
{$currentOp: {allUsers: true, idleConnections: true}},
@@ -481,11 +496,7 @@ assert.eq(clusterAdminDB
{$sort: {_id: 1}}
])
.toArray(),
- [
- {_id: {shard: "aggregation_currentop-rs0", host: st.rs0.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}},
- {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}
- ]);
+ expectedOutput);
// Test that a $currentOp pipeline with {localOps:true} returns operations from the mongoS
// itself rather than the shards.
@@ -871,7 +882,7 @@ runNoAuthTests(mongosConn, {localOps: true});
//
// Take the replica set out of the cluster.
-shardConn = restartReplSet(st.rs0, {shardsvr: null});
+shardConn = restartReplSet(st.rs1, {shardsvr: null});
shardTestDB = shardConn.getDB(jsTestName());
shardAdminDB = shardConn.getDB("admin");
@@ -932,6 +943,6 @@ assert.commandWorked(shardAdminDB.killOp(op.opid));
awaitShell();
// Add the shard back into the replset so that it can be validated by st.stop().
-shardConn = restartReplSet(st.rs0, {shardsvr: ""});
+shardConn = restartReplSet(st.rs1, {shardsvr: ""});
st.stop();
})();
diff --git a/jstests/sharding/query/current_op_no_shards.js b/jstests/sharding/query/current_op_no_shards.js
index 992b68f9f63..d9af6d3cf19 100644
--- a/jstests/sharding/query/current_op_no_shards.js
+++ b/jstests/sharding/query/current_op_no_shards.js
@@ -1,6 +1,9 @@
/**
* Test that running a $currentOp aggregation on a cluster with no shards returns an empty result
* set, and does not cause the mongoS floating point failure described in SERVER-30084.
+ *
+ * Requires no shards so there can't be a catalog shard.
+ * @tags: [catalog_shard_incompatible]
*/
(function() {
const st = new ShardingTest({shards: 0});
@@ -14,4 +17,4 @@ assert.eq(adminDB.aggregate([{$currentOp: {}}]).itcount(), 0);
assert.eq(adminDB.currentOp().inprog.length, 0);
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/query/explain_agg_read_pref.js b/jstests/sharding/query/explain_agg_read_pref.js
index 1735c1a1138..7ecdc123018 100644
--- a/jstests/sharding/query/explain_agg_read_pref.js
+++ b/jstests/sharding/query/explain_agg_read_pref.js
@@ -1,5 +1,7 @@
/**
* Tests that readPref applies on an explain for an aggregation command.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/query/owning_shard_expression.js b/jstests/sharding/query/owning_shard_expression.js
index 5bbd7721196..fd0707ca584 100644
--- a/jstests/sharding/query/owning_shard_expression.js
+++ b/jstests/sharding/query/owning_shard_expression.js
@@ -2,7 +2,7 @@
* Tests that $_internalOwningShard expression correctly computes the shard id the document belongs
* to, while executing on mongod.
*
- * @tags: [requires_fcv_63]
+ * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/query/view_rewrite.js b/jstests/sharding/query/view_rewrite.js
index f105f9f2e71..3c27db5e400 100644
--- a/jstests/sharding/query/view_rewrite.js
+++ b/jstests/sharding/query/view_rewrite.js
@@ -1,6 +1,8 @@
/**
* Tests that query options are not dropped by mongos when a query against a view is rewritten as an
* aggregation against the underlying collection.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index 9dddcea1594..0936a3f560b 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -3,7 +3,8 @@
(function() {
'use strict';
-var shardingTest = new ShardingTest({shards: 0});
+var shardingTest = new ShardingTest({shards: TestData.catalogShard ? 1 : 0});
+
assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode');
var configReplSetTest = shardingTest.configRS;
diff --git a/jstests/sharding/read_write_concern_defaults_commands_api.js b/jstests/sharding/read_write_concern_defaults_commands_api.js
index f1236364b8b..3d8764993e5 100644
--- a/jstests/sharding/read_write_concern_defaults_commands_api.js
+++ b/jstests/sharding/read_write_concern_defaults_commands_api.js
@@ -343,7 +343,7 @@ jsTestLog("Testing standalone replica set with implicit default write concern {w
jsTestLog("Testing sharded cluster with implicit default write concern majority...");
{
- let st = new ShardingTest({shards: 1, rs: {nodes: 2}});
+ let st = new ShardingTest({shards: 2, rs: {nodes: 2}});
// Mongos succeeds.
verifyDefaultState(st.s, true /* isImplicitDefaultWCMajority */);
@@ -351,12 +351,12 @@ jsTestLog("Testing sharded cluster with implicit default write concern majority.
verifyDefaultRWCommandsInvalidInput(st.s);
// Shard node fails.
- verifyDefaultRWCommandsFailWithCode(st.rs0.getPrimary(), {failureCode: 51301});
- assert.commandFailedWithCode(st.rs0.getSecondary().adminCommand({getDefaultRWConcern: 1}),
+ verifyDefaultRWCommandsFailWithCode(st.rs1.getPrimary(), {failureCode: 51301});
+ assert.commandFailedWithCode(st.rs1.getSecondary().adminCommand({getDefaultRWConcern: 1}),
51301);
// Secondaries fail setDefaultRWConcern before executing the command.
assert.commandFailedWithCode(
- st.rs0.getSecondary().adminCommand(
+ st.rs1.getSecondary().adminCommand(
{setDefaultRWConcern: 1, defaultReadConcern: {level: "local"}}),
ErrorCodes.NotWritablePrimary);
diff --git a/jstests/sharding/read_write_concern_defaults_propagation.js b/jstests/sharding/read_write_concern_defaults_propagation.js
index 518a8ab47e8..3bee0ebcc2a 100644
--- a/jstests/sharding/read_write_concern_defaults_propagation.js
+++ b/jstests/sharding/read_write_concern_defaults_propagation.js
@@ -10,7 +10,9 @@ var st = new ShardingTest({
other: {
rs: true,
rs0: {nodes: 1},
- }
+ },
+ // The config server needs enough nodes to allow a custom write concern default of w:2
+ config: 3,
});
const mongosAndConfigNodes = [st.s0, st.s1, st.s2, ...st.configRS.nodes];
diff --git a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
index a805b82883a..15eebaac2c2 100644
--- a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
+++ b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js
@@ -2,7 +2,14 @@
* Test that a reconfig for a shard that would change the implicit default write concern to w:1
* fails if CWWC is not set.
*
- * @tags: [requires_majority_read_concern, requires_persistence, requires_fcv_51]
+ * Temporary catalog shard incompatible because it hits a sharding metadata hook failure on cluster
+ * shutdown.
+ * @tags: [
+ * requires_majority_read_concern,
+ * requires_persistence,
+ * requires_fcv_51,
+ * temporary_catalog_shard_incompatible,
+ * ]
*/
(function() {
@@ -80,7 +87,7 @@ shardServer = new ReplSetTest(
shardServer.startSet();
shardServer.initiateWithHighElectionTimeout();
-const st = new ShardingTest({shards: 0, mongos: 1});
+const st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, mongos: 1});
var admin = st.getDB('admin');
jsTestLog("Adding the shard to the cluster should succeed.");
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index d7a37ec780a..db4d0ae4831 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -164,7 +164,7 @@ assert.eq(1, st.s.getDB('test2').foo.find().itcount());
// Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
// Have to take out rst2 and put rst1 back into the set so that it can clean up.
jsTestLog("Resetting the sharding test to its initial state to allow the test to shut down.");
-assert.commandWorked(st.admin.runCommand({movePrimary: 'test2', to: st.rs0.name}));
+assert.commandWorked(st.admin.runCommand({movePrimary: 'test2', to: st.shard0.shardName}));
removeShardAndCleanup(st, coll, rst2);
rst2.stopSet();
diff --git a/jstests/sharding/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index 90041649f12..c5cbed3c20d 100644
--- a/jstests/sharding/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -3,7 +3,7 @@ load("jstests/replsets/rslib.js");
/**
* Test for making sure that the replica seed list in the config server does not
* become invalid when a replica set reconfig happens.
- * @tags: [multiversion_incompatible]
+ * @tags: [multiversion_incompatible, temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 402cc7f9016..e99dcab248f 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -22,8 +22,15 @@
'use strict';
load("jstests/replsets/rslib.js");
-var st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
-var replTest = st.rs0;
+var st, replTest;
+if (TestData.catalogShard) {
+ // Use a second shard so we don't shut down the config server.
+ st = new ShardingTest({shards: 2, rs: {oplogSize: 10}});
+ replTest = st.rs1;
+} else {
+ st = new ShardingTest({shards: 1, rs: {oplogSize: 10}});
+ replTest = st.rs0;
+}
assert.commandWorked(st.s0.adminCommand({enableSharding: 'test'}));
assert.commandWorked(st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
diff --git a/jstests/sharding/resharding_change_stream_namespace_filtering.js b/jstests/sharding/resharding_change_stream_namespace_filtering.js
index 85b5fd8a33d..9beaec35ffd 100644
--- a/jstests/sharding/resharding_change_stream_namespace_filtering.js
+++ b/jstests/sharding/resharding_change_stream_namespace_filtering.js
@@ -3,7 +3,8 @@
* on collection Y. Exercises the fix for SERVER-64780.
* @tags: [
* uses_change_streams,
- * requires_fcv_50
+ * requires_fcv_50,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/resharding_disallow_drop.js b/jstests/sharding/resharding_disallow_drop.js
index 144abd55b16..dca009de969 100644
--- a/jstests/sharding/resharding_disallow_drop.js
+++ b/jstests/sharding/resharding_disallow_drop.js
@@ -3,6 +3,7 @@
* @tags: [
* requires_fcv_53,
* featureFlagRecoverableShardsvrReshardCollectionCoordinator,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
index 2ee6c76aaf1..c005f1e0e77 100644
--- a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
+++ b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js
@@ -1,6 +1,9 @@
/**
* Tests that resharding participants do not block replication while waiting for the
* ReshardingCoordinatorService to be rebuilt.
+ *
+ * Looks like a test incompatibility, but should be verified and maybe rework the test.
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/resharding_retryable_writes.js b/jstests/sharding/resharding_retryable_writes.js
index 5ff2d8fe7af..e2d1d781ea5 100644
--- a/jstests/sharding/resharding_retryable_writes.js
+++ b/jstests/sharding/resharding_retryable_writes.js
@@ -4,7 +4,9 @@
* txnCloners were not started until after waiting for reshardingMinimumOperationDurationMillis to
* elapse.
*
- * @tags: [uses_atclustertime]
+ * The higher minimumOperationDurationMS parameter is not being set on the config server. Correctly
+ * waits for the default value, but not the expected higher one.
+ * @tags: [uses_atclustertime, temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/resharding_size_estimate.js b/jstests/sharding/resharding_size_estimate.js
index 13b43a3d237..a6c8e3c799b 100644
--- a/jstests/sharding/resharding_size_estimate.js
+++ b/jstests/sharding/resharding_size_estimate.js
@@ -3,6 +3,7 @@
*
* @tags: [
* uses_atclustertime,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js
index bd2ea9044e8..e43b440aa05 100644
--- a/jstests/sharding/retryable_writes.js
+++ b/jstests/sharding/retryable_writes.js
@@ -1,6 +1,8 @@
/**
* Test basic retryable write without errors by checking that the resulting collection after the
* retry is as expected and it does not create additional oplog entries.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index 8c6781aeb1f..ec76e5bfa52 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -85,4 +85,12 @@ checkDocCount(collAllShards, returnPartialFlag, true, 0);
jsTest.log("DONE!");
+if (TestData.catalogShard) {
+ // Sharding test stop requires the config server to be up, so restart the first shard if it's
+ // the config server.
+ st.rs0.startSet({restart: true});
+ st.rs0.initiate();
+ st.rs0.awaitReplication();
+}
+
st.stop();
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index 109b8fe96d0..38a4a0c3269 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -1,5 +1,7 @@
/**
+ * Requires no shards.
* @tags: [
+ * catalog_shard_incompatible,
* ]
*/
load('jstests/libs/sessions_collection.js');
diff --git a/jstests/sharding/set_cluster_parameter.js b/jstests/sharding/set_cluster_parameter.js
index e2229f224fd..d5552bf002f 100644
--- a/jstests/sharding/set_cluster_parameter.js
+++ b/jstests/sharding/set_cluster_parameter.js
@@ -4,9 +4,12 @@
* We have a restart in the test with some stored values that must be preserved so it cannot run in
* inMemory variants
*
+ * TODO SERVER-74447: Verify cluster parameters added to dedicated config server work correctly
+ * when config is added as a shard.
* @tags: [
* does_not_support_stepdowns,
* requires_persistence,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/set_fcv_logging.js b/jstests/sharding/set_fcv_logging.js
index 99e3aa38849..68b39649406 100644
--- a/jstests/sharding/set_fcv_logging.js
+++ b/jstests/sharding/set_fcv_logging.js
@@ -5,7 +5,11 @@
* FCV is upgrading or downgrading (6744301)
* FCV upgrade or downgrade success (6744302).
*
- * @tags: [multiversion_incompatible, does_not_support_stepdowns]
+ * @tags: [
+ * multiversion_incompatible,
+ * does_not_support_stepdowns,
+ * temporary_catalog_shard_incompatible,
+ * ]
*/
(function() {
diff --git a/jstests/sharding/set_fcv_to_downgrading_fast.js b/jstests/sharding/set_fcv_to_downgrading_fast.js
index 6871af023ed..545489516fa 100644
--- a/jstests/sharding/set_fcv_to_downgrading_fast.js
+++ b/jstests/sharding/set_fcv_to_downgrading_fast.js
@@ -2,7 +2,14 @@
* Tests that FCV downgrade will reach the transitional kDowngrading state quickly (within a few
* seconds).
*
- * @tags: [featureFlagDowngradingToUpgrading, multiversion_incompatible, does_not_support_stepdowns]
+ * Catalog shard incompatible because we do not currently allow downgrading FCV with a catalog
+ * shard. TODO SERVER-73279: Enable in catalog shard mode when it supports FCV downgrade.
+ * @tags: [
+ * featureFlagDowngradingToUpgrading,
+ * multiversion_incompatible,
+ * does_not_support_stepdowns,
+ * catalog_shard_incompatible,
+ * ]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/shard_collection_config_db.js b/jstests/sharding/shard_collection_config_db.js
index e4787dc13a0..12a456ce894 100644
--- a/jstests/sharding/shard_collection_config_db.js
+++ b/jstests/sharding/shard_collection_config_db.js
@@ -1,3 +1,5 @@
+// Requires no shards.
+// @tags: [catalog_shard_incompatible]
(function() {
'use strict';
@@ -32,7 +34,6 @@ jsTest.log('Only system.sessions may be sharded');
st.stop();
-// Cannot shard things in config without shards.
{
var st = new ShardingTest({shards: 0});
var admin = st.s.getDB('admin');
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index 43c10bbbd22..b0b018b5b29 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -1,7 +1,10 @@
/**
* Tests that the config server connection string in the shard identity document of both the
* primary and secondary will get updated whenever the config server membership changes.
- * @tags: [requires_persistence]
+ *
+ * Shuts down the first shard but expects the config server to still be up. See if we can rework to
+ * get coverage in catalog shard mode.
+ * @tags: [requires_persistence, temporary_catalog_shard_incompatible]
*/
// Checking UUID consistency involves talking to a shard node, which in this test is shutdown
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 4aa2afdc524..12e2c24c8ec 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -1,5 +1,6 @@
// replica set as solo shard
// TODO: Add assertion code that catches hang
+// @tags: [temporary_catalog_shard_incompatible]
// The UUID and index check must be able to contact the shard primaries, but this test manually
// stops 2/3 nodes of a replica set.
diff --git a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
index 1a07396ddeb..d2e40b75b1a 100644
--- a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
+++ b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
@@ -1,6 +1,8 @@
/**
* Tests that shard removal triggers an update of the catalog cache so that routers don't continue
* to target shards that have been removed.
+ *
+ * @tags: [temporary_catalog_shard_incompatible]
*/
(function() {
'use strict';
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
index a3190fecb29..ce97d916494 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js
@@ -5,6 +5,7 @@
* @tags: [
* requires_majority_read_concern,
* requires_persistence,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/sharding_non_transaction_snapshot_read.js b/jstests/sharding/sharding_non_transaction_snapshot_read.js
index 9cf744c7c45..9c93e9d6082 100644
--- a/jstests/sharding/sharding_non_transaction_snapshot_read.js
+++ b/jstests/sharding/sharding_non_transaction_snapshot_read.js
@@ -4,6 +4,7 @@
* @tags: [
* requires_majority_read_concern,
* requires_persistence,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index b7df5a40f16..33df001f014 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -8,6 +8,7 @@
// storage engines which do not support the command.
// @tags: [
// requires_fsync,
+// temporary_catalog_shard_incompatible,
// ]
(function() {
diff --git a/jstests/sharding/single_shard_transaction_with_arbiter.js b/jstests/sharding/single_shard_transaction_with_arbiter.js
index 7f91157fd43..846767f4458 100644
--- a/jstests/sharding/single_shard_transaction_with_arbiter.js
+++ b/jstests/sharding/single_shard_transaction_with_arbiter.js
@@ -1,8 +1,10 @@
/**
* Tests that single shard transactions succeed against replica sets that contain arbiters.
*
+ * A config server can't have arbiter nodes.
* @tags: [
* uses_transactions,
+ * catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/snapshot_reads_target_at_point_in_time.js b/jstests/sharding/snapshot_reads_target_at_point_in_time.js
index a5f81fb932e..f4a40150702 100644
--- a/jstests/sharding/snapshot_reads_target_at_point_in_time.js
+++ b/jstests/sharding/snapshot_reads_target_at_point_in_time.js
@@ -28,7 +28,6 @@ const ns = dbName + '.' + collName;
const st = new ShardingTest({
shards: 3,
mongos: 1,
- config: 1,
other: {
rs0: {nodes: 2},
rs1: {nodes: 2},
diff --git a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
index 79e087e2472..c727b227145 100644
--- a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
+++ b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js
@@ -7,6 +7,7 @@
* @tags: [
* requires_persistence,
* uses_atclustertime,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/transient_txn_error_labels.js b/jstests/sharding/transient_txn_error_labels.js
index 0962862e891..036f8136e61 100644
--- a/jstests/sharding/transient_txn_error_labels.js
+++ b/jstests/sharding/transient_txn_error_labels.js
@@ -2,6 +2,7 @@
* Test TransientTransactionErrors error label in transactions.
* @tags: [
* uses_transactions,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/transient_txn_error_labels_with_write_concern.js b/jstests/sharding/transient_txn_error_labels_with_write_concern.js
index 5838db045fa..db59d5d6c81 100644
--- a/jstests/sharding/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/sharding/transient_txn_error_labels_with_write_concern.js
@@ -2,6 +2,7 @@
* Test TransientTransactionError error label for commands in transactions with write concern.
* @tags: [
* uses_transactions,
+ * temporary_catalog_shard_incompatible,
* ]
*/
(function() {
diff --git a/jstests/sharding/txn_addingParticipantParameter.js b/jstests/sharding/txn_addingParticipantParameter.js
index 1cfff1d166e..e49b39ec67a 100644
--- a/jstests/sharding/txn_addingParticipantParameter.js
+++ b/jstests/sharding/txn_addingParticipantParameter.js
@@ -14,6 +14,11 @@ const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
+const shard0Name = TestData.catalogShard ? "config" : "txn_addingParticipantParameter-rs0";
+const shard1Name = "txn_addingParticipantParameter-rs1";
+const shard2Name = "txn_addingParticipantParameter-rs2";
+const shard3Name = "txn_addingParticipantParameter-rs3";
+
const checkParticipantListMatches = function(
coordinatorConn, lsid, txnNumber, expectedParticipantList) {
let coordDoc = coordinatorConn.getDB("config")
@@ -139,8 +144,7 @@ const testAddingParticipant = function(turnFailPointOn, expectedParticipantList,
jsTestLog("===Additional Participants Fail Point is OFF===");
-let expectedParticipantListNormal =
- ["txn_addingParticipantParameter-rs0", "txn_addingParticipantParameter-rs1"];
+let expectedParticipantListNormal = [shard0Name, shard1Name];
testAddingParticipant(false, expectedParticipantListNormal);
jsTestLog("===Additional Participants Fail Point is ON===");
@@ -151,11 +155,7 @@ const fpDataOne = {
"ns": ns,
"shardId": ["txn_addingParticipantParameter-rs2"]
};
-let expectedParticipantListOne = [
- "txn_addingParticipantParameter-rs0",
- "txn_addingParticipantParameter-rs1",
- "txn_addingParticipantParameter-rs2"
-];
+let expectedParticipantListOne = [shard0Name, shard1Name, shard2Name];
testAddingParticipant(true, expectedParticipantListOne, fpDataOne);
print("Adding multiple additional participants:");
@@ -164,11 +164,6 @@ const fpDataMultiple = {
"ns": ns,
"shardId": ["txn_addingParticipantParameter-rs2", "txn_addingParticipantParameter-rs3"]
};
-let expectedParticipantListMultiple = [
- "txn_addingParticipantParameter-rs0",
- "txn_addingParticipantParameter-rs1",
- "txn_addingParticipantParameter-rs2",
- "txn_addingParticipantParameter-rs3"
-];
+let expectedParticipantListMultiple = [shard0Name, shard1Name, shard2Name, shard3Name];
testAddingParticipant(true, expectedParticipantListMultiple, fpDataMultiple);
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
index ebd3f56ffd1..ef85bd39930 100644
--- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
+++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js
@@ -6,7 +6,7 @@
* no failures, a participant having failed over, a participant being unable to satisfy the client's
* writeConcern, and an invalid client writeConcern.
*
- * @tags: [uses_transactions, uses_multi_shard_transaction]
+ * @tags: [uses_transactions, uses_multi_shard_transaction, temporary_catalog_shard_incompatible]
*/
(function() {
diff --git a/jstests/sharding/txn_single_write_shard_failover.js b/jstests/sharding/txn_single_write_shard_failover.js
index fd45bcb70f7..a582622ed33 100644
--- a/jstests/sharding/txn_single_write_shard_failover.js
+++ b/jstests/sharding/txn_single_write_shard_failover.js
@@ -13,6 +13,7 @@
* @tags: [
* uses_multi_shard_transaction,
* uses_transactions,
+ * temporary_catalog_shard_incompatible,
* ]
*/
diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js
index fe1d64a57fa..2525e16ee86 100644
--- a/jstests/sharding/txn_two_phase_commit_server_status.js
+++ b/jstests/sharding/txn_two_phase_commit_server_status.js
@@ -1,4 +1,5 @@
// Basic test that the two-phase commit coordinator metrics fields appear in serverStatus output.
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
"use strict";
diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js
index 2af5b7a418d..7443f7aa800 100644
--- a/jstests/sharding/unique_index_on_shardservers.js
+++ b/jstests/sharding/unique_index_on_shardservers.js
@@ -1,5 +1,6 @@
// SERVER-34954 This test ensures a node started with --shardsvr and added to a replica set has
// the correct version of unique indexes upon re-initiation.
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
"use strict";
load("jstests/libs/check_unique_indexes.js");
diff --git a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
index 39c663ba8a3..325e37815df 100644
--- a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
+++ b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js
@@ -4,7 +4,11 @@
* protocol. The protocol assumes that the collection is sharded and no shard key is given to in the
* initial request.
*
- * @tags: [requires_fcv_63, featureFlagUpdateOneWithoutShardKey]
+ * @tags: [
+ * requires_fcv_63,
+ * featureFlagUpdateOneWithoutShardKey,
+ * temporary_catalog_shard_incompatible,
+ * ]
*/
(function() {
"use strict";
diff --git a/jstests/sharding/use_rsm_data_for_cs.js b/jstests/sharding/use_rsm_data_for_cs.js
index 7ae96385243..79df11a3aad 100644
--- a/jstests/sharding/use_rsm_data_for_cs.js
+++ b/jstests/sharding/use_rsm_data_for_cs.js
@@ -1,3 +1,4 @@
+// @tags: [temporary_catalog_shard_incompatible]
(function() {
'use strict';
diff --git a/jstests/sharding/warm_up_connection_pool.js b/jstests/sharding/warm_up_connection_pool.js
index a1b8e0b862a..4dceb38ef1d 100644
--- a/jstests/sharding/warm_up_connection_pool.js
+++ b/jstests/sharding/warm_up_connection_pool.js
@@ -74,7 +74,11 @@ var warmUpDisabledConnPoolStatsCheck = function(connPoolStats, currentShard) {
return undefined === connPoolStats["hosts"][currentShard];
};
-runTest(warmUpDisabledParams, warmUpDisabledConnPoolStatsCheck);
+if (!TestData.catalogShard) {
+ // In catalog shard mode we have RSM entries for the catalog shard without warming up its conn
+ // pool.
+ runTest(warmUpDisabledParams, warmUpDisabledConnPoolStatsCheck);
+}
jsTest.log("Tests establishes more connections when parameter is set.");
// Increase the amount of time to establish more connections to avoid timing out
@@ -117,5 +121,9 @@ var shutdownNodeExtraOptions = function(test) {
return {connString: nodeList[pId], nodeId: pId};
};
-runTest(shutdownNodeParams, shutdownNodeConnPoolStatsCheck, shutdownNodeExtraOptions);
+if (!TestData.catalogShard) {
+ // In catalog shard mode this shuts down the config server, which prevents mongos from starting
+ // up.
+ runTest(shutdownNodeParams, shutdownNodeConnPoolStatsCheck, shutdownNodeExtraOptions);
+}
})();
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 52dfd16648e..995a26511cf 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -1016,12 +1016,14 @@ var ShardingTest = function(params) {
* @param {int} shard server number (0, 1, 2, ...) to be restarted
*/
this.restartShardRS = function(n, options, signal, wait) {
+ const prevShardName = this._connections[n].shardName;
for (let i = 0; i < this["rs" + n].nodeList().length; i++) {
this["rs" + n].restart(i);
}
this["rs" + n].awaitSecondaryNodes();
this._connections[n] = new Mongo(this["rs" + n].getURL());
+ this._connections[n].shardName = prevShardName;
this["shard" + n] = this._connections[n];
};
@@ -1181,11 +1183,11 @@ var ShardingTest = function(params) {
var numShards = otherParams.hasOwnProperty('shards') ? otherParams.shards : 2;
var mongosVerboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 1;
var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1;
+ const usedDefaultNumConfigs = !otherParams.hasOwnProperty('config');
var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3;
let isCatalogShardMode =
otherParams.hasOwnProperty('catalogShard') ? otherParams.catalogShard : false;
-
isCatalogShardMode = isCatalogShardMode || jsTestOptions().catalogShard;
if ("shardAsReplicaSet" in otherParams) {
@@ -1242,12 +1244,14 @@ var ShardingTest = function(params) {
}
if (Array.isArray(numConfigs)) {
+ assert(!usedDefaultNumConfigs);
for (var i = 0; i < numConfigs.length; i++) {
otherParams["c" + i] = numConfigs[i];
}
numConfigs = numConfigs.length;
} else if (isObject(numConfigs)) {
+ assert(!usedDefaultNumConfigs);
var tempCount = 0;
for (var i in numConfigs) {
otherParams[i] = numConfigs[i];
@@ -1408,7 +1412,9 @@ var ShardingTest = function(params) {
numReplicas = 1;
}
- if (isCatalogShardMode && i == 0) {
+ // Unless explicitly given a number of config servers, a catalog shard uses the shard's
+ // number of nodes to increase odds of compatibility with test assertions.
+ if (isCatalogShardMode && i == 0 && !usedDefaultNumConfigs) {
numReplicas = numConfigs;
}
@@ -1810,6 +1816,7 @@ var ShardingTest = function(params) {
if (!otherParams.manualAddShard) {
var testName = this._testName;
var admin = this.admin;
+ var keyFile = this.keyFile;
this._connections.forEach(function(z, idx) {
var n = z.name || z.host || z;
@@ -1820,8 +1827,21 @@ var ShardingTest = function(params) {
print("ShardingTest " + testName + " transitioning to catalog shard");
- var result =
- assert.commandWorked(admin.runCommand({transitionToCatalogShard: 1}));
+ function transitionToCatalogShard() {
+ return assert.commandWorked(
+ admin.runCommand({transitionToCatalogShard: 1}));
+ }
+
+ // TODO SERVER-74448: Investigate if transitionToCatalogShard should be added to
+ // the localhost bypass exception like addShard.
+ if (keyFile) {
+ authutil.asCluster(admin.getMongo(), keyFile, transitionToCatalogShard);
+ } else if (mongosOptions[0] && mongosOptions[0].keyFile) {
+ authutil.asCluster(
+ admin.getMongo(), mongosOptions[0].keyFile, transitionToCatalogShard);
+ } else {
+ transitionToCatalogShard();
+ }
z.shardName = name;
} else {