summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2020-08-25 21:18:44 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-08-28 23:40:27 +0000
commit7b0f4781f878fc7c231c2a1ed7fc20fd9ca1a1e6 (patch)
treebea15f7b4213cb060907b526794acafd0d71ae35
parentcaf0451c59b3a9304743ed82c05d13052600407d (diff)
downloadmongo-7b0f4781f878fc7c231c2a1ed7fc20fd9ca1a1e6.tar.gz
SERVER-50104 Make the test hook run a background migration on the data used by tests
-rw-r--r--buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml46
-rw-r--r--jstests/libs/override_methods/inject_tenant_prefix.js213
-rw-r--r--src/mongo/db/repl/apply_ops.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp24
-rw-r--r--src/mongo/shell/bulk_api.js2
5 files changed, 281 insertions, 6 deletions
diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
index b9eafe229a1..6a9145d61d0 100644
--- a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml
@@ -4,12 +4,55 @@ selector:
roots:
- jstests/core/**/*.js
exclude_files:
+ - jstests/core/txns/**/*.js
+ # These tests depend on hardcoded database name equality.
+ - jstests/core/json_schema/misc_validation.js
+ - jstests/core/list_databases.js
+ - jstests/core/profile1.js
+ - jstests/core/profile3.js
+ - jstests/core/views/views_stats.js
+ # These tests expect hardcoded count of write operations and this suite retries writes on
+ # migration conflict errors.
+ - jstests/core/operation_latency_histogram.js
+ - jstests/core/top.js
+ # The override cannot deep copy very large or small dates.
+ - jstests/core/index_large_and_small_dates.js
+ # These tests expect the profiler to observe batched write operations but batched writes are
+ # disabled in this suite.
+ - jstests/core/profile_insert.js
+ - jstests/core/profile_delete.js
+ - jstests/core/profile_findandmodify.js
+ - jstests/core/profile_update.js
# These tests are not expected to pass with replica-sets.
- jstests/core/opcounters_write_cmd.js
- jstests/core/read_after_optime.js
# This test expects that the connection (i.e. 'threadName') does not change throughout each test
# case. That is not always true when there is a background tenant migration.
- jstests/core/failcommand_failpoint.js
+ # The set_param1.js test attempts to compare the response from running the {getParameter: "*"}
+ # command multiple times, which may observe the change to the failpoint enabled by the migration
+ # hook.
+ - jstests/core/set_param1.js
+ # This test does not support tojson of command objects so the override cannot deep copy the
+ # command objects correctly.
+ - jstests/core/SERVER-23626.js
+ # These tests write with {w: 0} which doesn't wait for the storage transaction writing the
+ # document and the oplog entry to commit so the TenantMigrationConflict will not be caught.
+ - jstests/core/batch_write_command_w0.js
+ - jstests/core/crud_api.js
+ # These tests use benchRun which does not use runCommand.
+ - jstests/core/bench_test1.js
+ - jstests/core/bench_test3.js
+ - jstests/core/benchrun_pipeline_updates.js
+ # This test uses exhaust which does not use runCommand.
+ - jstests/core/exhaust.js
+ # These tests use db._authOrThrow which does not use runCommand.
+ - jstests/core/auth1.js
+ - jstests/core/connection_status.js
+ - jstests/core/user_management_helpers.js
+ # These tests use legacy read mode which does not use runCommand.
+ - jstests/core/comment_field.js
+ - jstests/core/invalidated_legacy_cursors.js
executor:
archive:
@@ -21,9 +64,12 @@ executor:
shell_options:
eval: >-
testingReplication = true;
+ load('jstests/libs/override_methods/inject_tenant_prefix.js');
global_vars:
TestData: &TestData
dbPrefix: "tenantMigrationDbPrefix_"
+ # TODO (SERVER-50494): Implement proxy's retry logic for batch write commands.
+ disableBatchWrites: true
readMode: commands
hooks:
- class: ContinuousTenantMigration
diff --git a/jstests/libs/override_methods/inject_tenant_prefix.js b/jstests/libs/override_methods/inject_tenant_prefix.js
new file mode 100644
index 00000000000..f8915734d05
--- /dev/null
+++ b/jstests/libs/override_methods/inject_tenant_prefix.js
@@ -0,0 +1,213 @@
+/**
+ * Overrides the database name of each accessed database ("config", "admin", "local" excluded) to
+ * have the prefix TestData.dbPrefix so that the accessed data will be migrated by the background
+ * tenant migrations run by the ContinuousTenantMigration hook.
+ */
+(function() {
+'use strict';
+
+load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'.
+load("jstests/libs/transactions_util.js");
+
+// Save references to the original methods in the IIFE's scope.
+// This scoping allows the original methods to be called by the overrides below.
+// Override this method to make the accessed database have the prefix TestData.dbPrefix.
+let originalRunCommand = Mongo.prototype.runCommand;
+
+const blacklistedDbNames = ["config", "admin", "local"];
+
+function isBlacklistedDb(dbName) {
+ return blacklistedDbNames.includes(dbName);
+}
+
+/**
+ * If the database with the given name can be migrated, prepends TestData.dbPrefix to the name if
+ * it does not already start with the prefix.
+ */
+function prependDbPrefixToDbNameIfApplicable(dbName) {
+ if (dbName.length === 0) {
+ // There are input validation tests that use invalid database names, those should be
+ // ignored.
+ return dbName;
+ }
+ return isBlacklistedDb(dbName) ? dbName : TestData.dbPrefix + dbName;
+}
+
+/**
+ * If the database for the given namespace can be migrated, prepends TestData.dbPrefix to the
+ * namespace if it does not already start with the prefix.
+ */
+function prependDbPrefixToNsIfApplicable(ns) {
+ if (ns.length === 0 || !ns.includes(".")) {
+ // There are input validation tests that use invalid namespaces, those should be ignored.
+ return ns;
+ }
+ let splitNs = ns.split(".");
+ splitNs[0] = prependDbPrefixToDbNameIfApplicable(splitNs[0]);
+ return splitNs.join(".");
+}
+
+/**
+ * If the given database name starts TestData.dbPrefix, removes the prefix.
+ */
+function extractOriginalDbName(dbName) {
+ return dbName.replace(TestData.dbPrefix, "");
+}
+
+/**
+ * If the database name for the given namespace starts TestData.dbPrefix, removes the prefix.
+ */
+function extractOriginalNs(ns) {
+ let splitNs = ns.split(".");
+ splitNs[0] = extractOriginalDbName(splitNs[0]);
+ return splitNs.join(".");
+}
+
+/**
+ * Removes all occurrences of TestDatabase.dbPrefix in the string.
+ */
+function removeDbPrefixFromString(string) {
+ return string.replace(new RegExp(TestData.dbPrefix, "g"), "");
+}
+
+/**
+ * Prepends TestDatabase.dbPrefix to all the database name and namespace fields inside the given
+ * object.
+ */
+function prependDbPrefix(obj) {
+ for (let k of Object.keys(obj)) {
+ let v = obj[k];
+ if (typeof v === "string") {
+ if (k === "dbName" || k == "db") {
+ obj[k] = prependDbPrefixToDbNameIfApplicable(v);
+ } else if (k === "namespace" || k === "ns") {
+ obj[k] = prependDbPrefixToNsIfApplicable(v);
+ }
+ } else if (Array.isArray(v)) {
+ obj[k] = v.map((item) => {
+ return (typeof item === "object" && item !== null) ? prependDbPrefix(item) : item;
+ });
+ } else if (typeof v === "object" && v !== null && Object.keys(v).length > 0) {
+ obj[k] = prependDbPrefix(v);
+ }
+ }
+ return obj;
+}
+
+/**
+ * Removes TestDatabase.dbPrefix from all the database name and namespace fields inside the given
+ * object.
+ */
+function removeDbPrefix(obj) {
+ for (let k of Object.keys(obj)) {
+ let v = obj[k];
+ let originalK = removeDbPrefixFromString(k);
+ if (typeof v === "string") {
+ if (k === "dbName" || k == "db" || k == "dropped") {
+ obj[originalK] = extractOriginalDbName(v);
+ } else if (k === "namespace" || k === "ns") {
+ obj[originalK] = extractOriginalNs(v);
+ } else if (k === "errmsg" || k == "name") {
+ obj[originalK] = removeDbPrefixFromString(v);
+ }
+ } else if (Array.isArray(v)) {
+ obj[originalK] = v.map((item) => {
+ return (typeof item === "object" && item !== null) ? removeDbPrefix(item) : item;
+ });
+ } else if (typeof v === "object" && v !== null && Object.keys(v).length > 0) {
+ obj[originalK] = removeDbPrefix(v);
+ }
+ }
+ return obj;
+}
+
+const kCmdsWithNsAsFirstField =
+ new Set(["renameCollection", "checkShardingIndex", "dataSize", "datasize", "splitVector"]);
+
+/**
+ * Returns a new cmdObj with TestData.dbPrefix prepended to all database name and namespace fields.
+ */
+function createCmdObjWithDbPrefix(cmdObj) {
+ const cmdName = Object.keys(cmdObj)[0];
+ let cmdObjWithDbPrefix = TransactionsUtil.deepCopyObject({}, cmdObj);
+
+ // Handle commands with special database and namespace field names.
+ if (kCmdsWithNsAsFirstField.has(cmdName)) {
+ cmdObjWithDbPrefix[cmdName] = prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix[cmdName]);
+ }
+
+ switch (cmdName) {
+ case "renameCollection":
+ cmdObjWithDbPrefix.to = prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix.to);
+ break;
+ case "internalRenameIfOptionsAndIndexesMatch":
+ cmdObjWithDbPrefix.from = prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix.from);
+ cmdObjWithDbPrefix.to = prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix.to);
+ break;
+ case "configureFailPoint":
+ if (cmdObjWithDbPrefix.data) {
+ if (cmdObjWithDbPrefix.data.namespace) {
+ cmdObjWithDbPrefix.data.namespace =
+ prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix.data.namespace);
+ } else if (cmdObjWithDbPrefix.data.ns) {
+ cmdObjWithDbPrefix.data.ns =
+ prependDbPrefixToNsIfApplicable(cmdObjWithDbPrefix.data.ns);
+ }
+ }
+ break;
+ case "applyOps":
+ for (let op of cmdObjWithDbPrefix.applyOps) {
+ if (typeof op.ns === "string" && op.ns.endsWith("system.views") && op.o._id &&
+ typeof op.o._id === "string") {
+ // For views, op.ns and op.o._id must be equal.
+ op.o._id = prependDbPrefixToNsIfApplicable(op.o._id);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ // Recursively override the database name and namespace fields. Exclude 'configureFailPoint'
+ // since data.errorExtraInfo.namespace or data.errorExtraInfo.ns can sometimes refer to
+ // collection name instead of namespace.
+ if (cmdName != "configureFailPoint") {
+ prependDbPrefix(cmdObjWithDbPrefix);
+ }
+
+ return cmdObjWithDbPrefix;
+}
+
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ // Create another cmdObj from this command with TestData.dbPrefix prepended to all the
+ // applicable database names and namespaces.
+ const cmdObjWithDbPrefix = createCmdObjWithDbPrefix(cmdObj);
+
+ let resObj = originalRunCommand.apply(
+ this, [prependDbPrefixToDbNameIfApplicable(dbName), cmdObjWithDbPrefix, options]);
+
+ // Remove TestData.dbPrefix from all database names and namespaces in the resObj since tests
+ // assume the command was run against the original database.
+ removeDbPrefix(resObj);
+
+ return resObj;
+};
+
+Mongo.prototype.runCommandWithMetadata = function(dbName, metadata, commandArgs) {
+ // Create another cmdObj from this command with TestData.dbPrefix prepended to all the
+ // applicable database names and namespaces.
+ const commandArgsWithDbPrefix = createCmdObjWithDbPrefix(commandArgs);
+
+ let resObj = originalRunCommand.apply(
+ this, [prependDbPrefixToDbNameIfApplicable(dbName), metadata, commandArgsWithDbPrefix]);
+
+ // Remove TestData.dbPrefix from all database names and namespaces in the resObj since tests
+ // assume the command was run against the original database.
+ removeDbPrefix(resObj);
+
+ return resObj;
+};
+
+OverrideHelpers.prependOverrideInParallelShell(
+ "jstests/libs/override_methods/inject_tenant_prefix.js");
+}());
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 7e590f14580..076146c191f 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -260,7 +260,7 @@ Status _applyOps(OperationContext* opCtx,
result->append("codeName", ErrorCodes::errorString(ex.code()));
result->append("errmsg", ex.what());
result->append("results", ab.arr());
- return Status(ex.code(), ex.what());
+ return ex.toStatus();
}
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 1ed66938941..58133d54a59 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -229,10 +229,26 @@ void _logOpsInner(OperationContext* opCtx,
uasserted(ErrorCodes::NotMaster, ss);
}
- // The oplogEntry for renameCollection has nss set to the fromCollection's ns. renameCollection
- // can be across databases, but a tenant will never be able to rename into a database with a
- // different prefix, so it is safe to use the fromCollection's db's prefix for this check.
- tenant_migration_donor::onWriteToDatabase(opCtx, nss.db());
+ // TODO (SERVER-50598): Not allow tenant migration donor to write "commitIndexBuild" and
+ // "abortIndexBuild" oplog entries in the blocking state.
+ // Allow that for now since if the donor doesn't write either a commit or abort oplog entry,
+ // some resources will not be released on the donor nodes, and this can lead to deadlocks.
+ auto isCommitOrAbortIndexBuild =
+ std::any_of(records->begin(), records->end(), [](Record record) {
+ auto o = record.data.toBson().getObjectField("o");
+ return o.hasField("commitIndexBuild") || o.hasField("abortIndexBuild");
+ });
+
+ if (!isCommitOrAbortIndexBuild) {
+ // Throw TenantMigrationConflict error if the database for 'nss' is being migrated.
+ // The oplog entry for renameCollection has 'nss' set to the fromCollection's ns.
+ // renameCollection can be across databases, but a tenant will never be able to rename into
+ // a database with a different prefix, so it is safe to use the fromCollection's db's prefix
+ // for this check.
+ tenant_migration_donor::onWriteToDatabase(opCtx, nss.db());
+ } else {
+ invariant(records->size() == 1);
+ }
Status result = oplogCollection->insertDocumentsForOplog(opCtx, records, timestamps);
if (!result.isOK()) {
diff --git a/src/mongo/shell/bulk_api.js b/src/mongo/shell/bulk_api.js
index 0b880175648..61b89819b69 100644
--- a/src/mongo/shell/bulk_api.js
+++ b/src/mongo/shell/bulk_api.js
@@ -536,7 +536,7 @@ var _bulk_api_module = (function() {
// Set max byte size
var maxBatchSizeBytes = 1024 * 1024 * 16;
- var maxNumberOfDocsInBatch = 1000;
+ var maxNumberOfDocsInBatch = (TestData && TestData.disableBatchWrites) ? 1 : 1000;
var writeConcern = null;
var currentOp;