summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2021-10-29 16:54:00 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-10-29 17:17:21 +0000
commitfde6bbbd32c8385a954ecb5e27b40aba12541ba6 (patch)
tree39291f4ee12d5275fdec9411392dd5f29483af69
parent8e4194f6b6704b088eebe5f380026c9ade9ddbcb (diff)
downloadmongo-fde6bbbd32c8385a954ecb5e27b40aba12541ba6.tar.gz
SERVER-60870 adding automation test case for read/write access after
-rw-r--r--jstests/serverless/read_write_post_migration.js85
-rw-r--r--jstests/sharding/tenant_migration_disallowed_on_config_server.js (renamed from jstests/sharding/tenant_migration_disallowed_in_sharded_cluster.js)50
-rw-r--r--src/mongo/db/commands/tenant_migration_donor_cmds.cpp15
-rw-r--r--src/mongo/db/commands/tenant_migration_recipient_cmds.cpp10
-rw-r--r--src/mongo/db/mongod_main.cpp7
5 files changed, 108 insertions, 59 deletions
diff --git a/jstests/serverless/read_write_post_migration.js b/jstests/serverless/read_write_post_migration.js
new file mode 100644
index 00000000000..e436147c6c6
--- /dev/null
+++ b/jstests/serverless/read_write_post_migration.js
@@ -0,0 +1,85 @@
+/**
+ * Tests read and write access after a migration aborted and also test read and write after a
+ * migration commmitted successfully.
+ * @tags: [requires_fcv_52]
+ */
+
+(function() {
+"use strict";
+
+load("jstests/libs/fail_point_util.js");
+
+function donorStartMigrationCmd(tenantID, realConnUrl) {
+ return {
+ donorStartMigration: 1,
+ tenantId: tenantID.str,
+ migrationId: UUID(),
+ recipientConnectionString: realConnUrl,
+ readPreference: {mode: "primary"}
+ };
+}
+
+/*
+ * Test running a migration, abort it and then try to insert data after migration completes.
+ * This should succeed since the migration did not happen and the owner of the data
+ * stays the same.
+ */
+function insertAndCountAfterMigrationAborted(st) {
+ const tenantID = ObjectId();
+ var kDbName = tenantID.str + "_test1";
+
+ assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
+ st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+
+ configureFailPoint(st.rs0.getPrimary().getDB('admin'),
+ "abortTenantMigrationBeforeLeavingBlockingState");
+
+ let cmdObj = donorStartMigrationCmd(tenantID, st.rs1.getURL());
+
+ assert.soon(function() {
+ let res = assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(cmdObj));
+ return res['state'] == "aborted";
+ }, "migration not in aborted state", 1 * 10000, 1 * 1000);
+
+ let db = st.s0.getDB(kDbName);
+ assert.commandWorked(db.bar.insert([{n: 1}, {n: 2}, {n: 3}]));
+ assert.eq(1, db.bar.find({n: 1}).count());
+}
+
+/*
+ * Test running a migration and then try to insert data after migration completes.
+ * This should fail since the insert would return a "committed" error
+ * because the migration finished and the owner of the data changed.
+ */
+function insertAndCountAfterMigrationCommitted(st) {
+ const tenantID = ObjectId();
+ var kDbName = tenantID.str + "_test2";
+
+ assert.commandWorked(st.s0.adminCommand({enableSharding: kDbName}));
+ st.ensurePrimaryShard(kDbName, st.shard0.shardName);
+
+ let cmdObj = donorStartMigrationCmd(tenantID, st.rs1.getURL());
+
+ assert.soon(function() {
+ let res = assert.commandWorked(st.rs0.getPrimary().getDB('admin').runCommand(cmdObj));
+ return res['state'] == "committed";
+ }, "migration not in committed state", 1 * 10000, 1 * 1000);
+
+ let db = st.s0.getDB(kDbName);
+ assert.commandFailedWithCode(db.runCommand({count: "bar"}),
+ ErrorCodes.TenantMigrationCommitted);
+ assert.commandFailedWithCode(db.bar.insert([{n: 1}, {n: 2}, {n: 3}]),
+ ErrorCodes.TenantMigrationCommitted);
+}
+
+let st = new ShardingTest({
+ shards: 2,
+ mongosOptions: {setParameter: {tenantMigrationDisableX509Auth: true}},
+ shardOptions: {setParameter: {tenantMigrationDisableX509Auth: true}}
+});
+
+insertAndCountAfterMigrationCommitted(st);
+insertAndCountAfterMigrationAborted(st);
+
+st.stop();
+})();
diff --git a/jstests/sharding/tenant_migration_disallowed_in_sharded_cluster.js b/jstests/sharding/tenant_migration_disallowed_on_config_server.js
index 86ce604709c..ca0a3e95524 100644
--- a/jstests/sharding/tenant_migration_disallowed_in_sharded_cluster.js
+++ b/jstests/sharding/tenant_migration_disallowed_on_config_server.js
@@ -1,5 +1,5 @@
/**
- * Tests that tenant migration commands cannot be run on sharded clusters.
+ * Tests that tenant migration commands cannot be run on sharded clusters for config servers.
*
* @tags: [
* incompatible_with_eft,
@@ -24,8 +24,8 @@ recipientRst.initiate();
const tenantMigrationTest =
new TenantMigrationTest({name: jsTestName(), donorRst: donorRstShard, recipientRst});
-// Run tenant migration commands on shards.
-let donorPrimary = donorRstShard.getPrimary();
+// Run tenant migration commands on config servers.
+let donorPrimary = donorRstConfig.getPrimary();
let cmdObj = TenantMigrationUtil.donorStartMigrationWithProtocol({
donorStartMigration: 1,
@@ -68,50 +68,6 @@ cmdObj = {
};
assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-// Run tenant migration commands on config servers.
-donorPrimary = donorRstConfig.getPrimary();
-
-cmdObj = TenantMigrationUtil.donorStartMigrationWithProtocol({
- donorStartMigration: 1,
- tenantId: "kTenantTest",
- migrationId: UUID(),
- recipientConnectionString: tenantMigrationTest.getRecipientConnString(),
- readPreference: {mode: "primary"}
-},
- donorPrimary.getDB("admin"));
-assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-
-cmdObj = {
- donorForgetMigration: 1,
- migrationId: UUID()
-};
-assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-
-cmdObj = {
- donorAbortMigration: 1,
- migrationId: UUID()
-};
-assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-
-cmdObj = {
- recipientSyncData: 1,
- migrationId: UUID(),
- donorConnectionString: tenantMigrationTest.getRecipientRst().getURL(),
- tenantId: "kTenantTest",
- readPreference: {mode: "primary"},
- startMigrationDonorTimestamp: Timestamp(1, 1)
-};
-assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-
-cmdObj = {
- recipientForgetMigration: 1,
- migrationId: UUID(),
- donorConnectionString: tenantMigrationTest.getRecipientRst().getURL(),
- tenantId: "kTenantTest",
- readPreference: {mode: "primary"},
-};
-assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation);
-
tenantMigrationTest.stop();
recipientRst.stopSet();
st.stop();
diff --git a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
index 90915435ad1..63c62bcd47d 100644
--- a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
+++ b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp
@@ -57,8 +57,9 @@ public:
Response typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
- "tenant migrations are not available in sharded clusters",
- serverGlobalParams.clusterRole == ClusterRole::None);
+ "tenant migrations are not available on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::None ||
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
// (Generic FCV reference): This FCV reference should exist across LTS binary versions.
uassert(
@@ -160,8 +161,9 @@ public:
void typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
- "tenant migrations are not available in sharded clusters",
- serverGlobalParams.clusterRole == ClusterRole::None);
+ "tenant migrations are not available on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::None ||
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
const auto& cmd = request();
@@ -228,8 +230,9 @@ public:
void typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
- "tenant migrations are not available in sharded clusters",
- serverGlobalParams.clusterRole == ClusterRole::None);
+ "tenant migrations are not available on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::None ||
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
const RequestType& cmd = request();
diff --git a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp
index b18f5b4c013..b854b45924e 100644
--- a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp
+++ b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp
@@ -59,8 +59,9 @@ public:
Response typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
- "tenant migrations are not available in sharded clusters",
- serverGlobalParams.clusterRole == ClusterRole::None);
+ "tenant migrations are not available on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::None ||
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
// (Generic FCV reference): This FCV reference should exist across LTS binary versions.
uassert(
@@ -175,8 +176,9 @@ public:
void typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
- "tenant migrations are not available in sharded clusters",
- serverGlobalParams.clusterRole == ClusterRole::None);
+ "tenant migrations are not available on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::None ||
+ serverGlobalParams.clusterRole == ClusterRole::ShardServer);
const auto& cmd = request();
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 6ccd371cf49..b785bd235a0 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -323,8 +323,9 @@ void registerPrimaryOnlyServices(ServiceContext* serviceContext) {
services.push_back(std::make_unique<ShardingDDLCoordinatorService>(serviceContext));
services.push_back(std::make_unique<ReshardingDonorService>(serviceContext));
services.push_back(std::make_unique<ReshardingRecipientService>(serviceContext));
+ services.push_back(std::make_unique<TenantMigrationDonorService>(serviceContext));
+ services.push_back(std::make_unique<repl::TenantMigrationRecipientService>(serviceContext));
} else {
- // Tenant migrations are not supported in sharded clusters.
services.push_back(std::make_unique<TenantMigrationDonorService>(serviceContext));
services.push_back(std::make_unique<repl::TenantMigrationRecipientService>(serviceContext));
}
@@ -1071,13 +1072,15 @@ void setUpObservers(ServiceContext* serviceContext) {
opObserverRegistry->addObserver(std::make_unique<OpObserverShardingImpl>());
opObserverRegistry->addObserver(std::make_unique<ShardServerOpObserver>());
opObserverRegistry->addObserver(std::make_unique<ReshardingOpObserver>());
+ opObserverRegistry->addObserver(std::make_unique<repl::TenantMigrationDonorOpObserver>());
+ opObserverRegistry->addObserver(
+ std::make_unique<repl::TenantMigrationRecipientOpObserver>());
} else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
opObserverRegistry->addObserver(std::make_unique<OpObserverImpl>());
opObserverRegistry->addObserver(std::make_unique<ConfigServerOpObserver>());
opObserverRegistry->addObserver(std::make_unique<ReshardingOpObserver>());
} else {
opObserverRegistry->addObserver(std::make_unique<OpObserverImpl>());
- // Tenant migrations are not supported in sharded clusters.
opObserverRegistry->addObserver(std::make_unique<repl::TenantMigrationDonorOpObserver>());
opObserverRegistry->addObserver(
std::make_unique<repl::TenantMigrationRecipientOpObserver>());