summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2022-09-15 19:38:33 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-15 21:54:28 +0000
commit55f4d89a5b15ed14d930c7b06cb39e299d886406 (patch)
tree57771e1ea25917e49b9d15bf18e60a0c8b822a35 /jstests
parentba5ad7837a0c11ddd620b5aad9249cd26f48bf89 (diff)
downloadmongo-55f4d89a5b15ed14d930c7b06cb39e299d886406.tar.gz
SERVER-66147 add support of vanilla passthrough test for shard merge protocol
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/batched_multi_deletes.js4
-rw-r--r--jstests/hooks/run_check_tenant_migration_dbhash.js16
-rw-r--r--jstests/libs/override_methods/inject_tenant_prefix.js39
3 files changed, 49 insertions, 10 deletions
diff --git a/jstests/core/batched_multi_deletes.js b/jstests/core/batched_multi_deletes.js
index 308d518cedd..7b79916b631 100644
--- a/jstests/core/batched_multi_deletes.js
+++ b/jstests/core/batched_multi_deletes.js
@@ -26,7 +26,7 @@ function populateAndMassDelete(queryPredicate) {
coll.drop();
assert.commandWorked(coll.insertMany([...Array(collCount).keys()].map(x => ({_id: x, a: x}))));
- assert.eq(collCount, coll.count());
+ assert.eq(collCount, coll.countDocuments({}));
// Verify the delete will involve the BATCHED_DELETE stage.
const expl = testDB.runCommand({
@@ -46,7 +46,7 @@ function populateAndMassDelete(queryPredicate) {
}
// Execute and verify the deletion.
- assert.eq(collCount, coll.count());
+ assert.eq(collCount, coll.countDocuments({}));
assert.commandWorked(coll.deleteMany(queryPredicate));
assert.eq(null, coll.findOne());
}
diff --git a/jstests/hooks/run_check_tenant_migration_dbhash.js b/jstests/hooks/run_check_tenant_migration_dbhash.js
index b35058b563c..b34e975c015 100644
--- a/jstests/hooks/run_check_tenant_migration_dbhash.js
+++ b/jstests/hooks/run_check_tenant_migration_dbhash.js
@@ -8,12 +8,15 @@ load("jstests/replsets/libs/tenant_migration_util.js");
const excludedDBs = ["testTenantMigration"];
const testDBName = "testTenantMigration";
const dbhashCollName = "dbhashCheck";
+const localDBName = "local";
const tenantId = TestData.tenantId;
const migrationId = UUID(TestData.migrationIdString);
let donorRst;
let recipientRst;
let donorDB;
+// For shard merge we need to use the local DB that is not blocked by tenant access blockers.
+let primaryLocalDB;
while (true) {
try {
donorRst = new ReplSetTest(TestData.donorConnectionString);
@@ -24,6 +27,9 @@ while (true) {
// failovers, but we run in a session to keep the code simple.
donorDB =
new Mongo(donorRst.getURL()).startSession({retryWrites: true}).getDatabase(testDBName);
+ primaryLocalDB =
+ new Mongo(donorRst.getURL()).startSession({retryWrites: true}).getDatabase(localDBName);
+
break;
} catch (e) {
if (!TenantMigrationUtil.checkIfRetryableErrorForTenantDbHashCheck(e)) {
@@ -42,6 +48,12 @@ if (TestData.tenantIds) {
}
// Mark that we have completed the dbhash check.
-assert.commandWorked(donorDB.runCommand(
- {insert: dbhashCollName, documents: [{_id: migrationId}], writeConcern: {w: "majority"}}));
+// useLocalDBForDbCheck is used for Shard Merge since we use the local DB for validation.
+if (TestData.useLocalDBForDBCheck) {
+ assert.commandWorked(primaryLocalDB.runCommand(
+ {insert: dbhashCollName, documents: [{_id: migrationId}], writeConcern: {w: 1}}));
+} else {
+ assert.commandWorked(donorDB.runCommand(
+ {insert: dbhashCollName, documents: [{_id: migrationId}], writeConcern: {w: "majority"}}));
+}
})();
diff --git a/jstests/libs/override_methods/inject_tenant_prefix.js b/jstests/libs/override_methods/inject_tenant_prefix.js
index 2d46f138291..40ad4484c49 100644
--- a/jstests/libs/override_methods/inject_tenant_prefix.js
+++ b/jstests/libs/override_methods/inject_tenant_prefix.js
@@ -18,6 +18,10 @@ const originalCloseMethod = Mongo.prototype.close;
// multiple internal routing connections for the lifetime of the test execution.
const initialConn = db.getMongo();
+const testTenantMigrationDB = "testTenantMigration";
+// For shard merge we need to use the local DB that is not blocked by tenant access blockers.
+const localDB = "local";
+
/**
* Asserts that the provided connection is an internal routing connection, not the top-level proxy
* connection. The proxy connection also has an internal routing connection, so it is excluded from
@@ -169,6 +173,20 @@ function removeTenantIdFromString(string) {
}
/**
+ * @returns Whether we are currently running a shard merge passthrough.
+ */
+function isShardMergePassthrough(conn) {
+ const flagDoc = assert.commandWorked(
+ originalRunCommand.apply(conn, ["admin", {getParameter: 1, featureFlagShardMerge: 1}, 0]));
+ const fcvDoc = assert.commandWorked(assert.commandWorked(originalRunCommand.apply(
+ conn, ["admin", {getParameter: 1, featureCompatibilityVersion: 1}, 0])));
+ return flagDoc.hasOwnProperty("featureFlagShardMerge") && flagDoc.featureFlagShardMerge.value &&
+ MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version,
+ flagDoc.featureFlagShardMerge.version) >= 0 &&
+ TestData.useLocalDBForDBCheck;
+}
+
+/**
* Prepends a tenant prefix to all database name and namespace fields in the provided object, where
* applicable.
*/
@@ -436,8 +454,15 @@ function convertServerConnectionStringToURI(input) {
*/
function getOperationStateDocument(conn) {
const collection = isShardSplitPassthrough() ? "shardSplitDonors" : "tenantMigrationDonors";
- const filter =
- isShardSplitPassthrough() ? {tenantIds: TestData.tenantIds} : {tenantId: TestData.tenantId};
+ let filter = {tenantId: TestData.tenantId};
+ if (isShardSplitPassthrough()) {
+ filter = {tenantIds: TestData.tenantIds};
+ } else if (isShardMergePassthrough(conn)) {
+ // TODO (SERVER-68643) No longer require to check for shard merge since shard merge will be
+ // the only protocol left.
+ filter = {};
+ }
+
const findRes = assert.commandWorked(
originalRunCommand.apply(conn, ["config", {find: collection, filter}, 0]));
const docs = findRes.cursor.firstBatch;
@@ -455,15 +480,16 @@ function getOperationStateDocument(conn) {
/**
* Marks the outgoing tenant migration or shard split operation as having caused the shell to
- * reroute commands by inserting a document for it into the testTenantMigration.rerouted collection.
+ * reroute commands by inserting a document for it into the testTenantMigration.rerouted collection
+ * or local.rerouted collection for the shard merge protocol.
*/
function recordRerouteDueToTenantMigration(conn, migrationStateDoc) {
assertRoutingConnection(conn);
-
+ const dbToCheck = TestData.useLocalDBForDBCheck ? localDB : testTenantMigrationDB;
while (true) {
try {
const res = originalRunCommand.apply(conn, [
- "testTenantMigration",
+ dbToCheck,
{
insert: "rerouted",
documents: [{_id: migrationStateDoc._id}],
@@ -639,9 +665,10 @@ function runCommandRetryOnTenantMigrationErrors(
// After getting a TenantMigrationCommitted error, wait for the python test fixture
// to do a dbhash check on the donor and recipient primaries before we retry the
// command on the recipient.
+ const dbToCheck = TestData.useLocalDBForDBCheck ? localDB : testTenantMigrationDB;
assert.soon(() => {
let findRes = assert.commandWorked(originalRunCommand.apply(donorConnection, [
- "testTenantMigration",
+ dbToCheck,
{
find: "dbhashCheck",
filter: {_id: migrationStateDoc._id},