summaryrefslogtreecommitdiff
path: root/jstests/serverless/shard_split_test_max_bson_limit.js
diff options
context:
space:
mode:
authormathisbessamdb <mathis.bessa@mongodb.com>2022-05-10 22:59:14 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-10 23:26:48 +0000
commite22aedb467254e680b705f04642e8770264af42d (patch)
tree52f43addd3c8c36d0e386b5c6de79e0bda39ad95 /jstests/serverless/shard_split_test_max_bson_limit.js
parent6a16461988b990f6ea2ebe4e129bad1addada035 (diff)
downloadmongo-e22aedb467254e680b705f04642e8770264af42d.tar.gz
SERVER-64906 Copy applicable Tenant Migration jstests to shard split
Diffstat (limited to 'jstests/serverless/shard_split_test_max_bson_limit.js')
-rw-r--r--jstests/serverless/shard_split_test_max_bson_limit.js100
1 files changed, 100 insertions, 0 deletions
diff --git a/jstests/serverless/shard_split_test_max_bson_limit.js b/jstests/serverless/shard_split_test_max_bson_limit.js
new file mode 100644
index 00000000000..d24c617dbba
--- /dev/null
+++ b/jstests/serverless/shard_split_test_max_bson_limit.js
@@ -0,0 +1,100 @@
+/**
+ * Tests that large write error results from bulk write operations are within the BSON size limit.
+ *
+ * @tags: [
+ * incompatible_with_eft,
+ * incompatible_with_macos,
+ * incompatible_with_windows_tls,
+ * requires_majority_read_concern,
+ * requires_persistence,
+ * serverless,
+ * requires_fcv_52,
+ * featureFlagShardSplit
+ * ]
+ */
+(function() {
+'use strict';
+
+load("jstests/libs/fail_point_util.js");
+load("jstests/libs/parallelTester.js");
+load("jstests/libs/uuid_util.js");
+load("jstests/serverless/libs/basic_serverless_test.js");
+
+const kCollName = "testColl";
+const kTenantDefinedDbName = "0";
+
+function bulkWriteDocsUnordered(primaryHost, dbName, collName, numDocs) {
+ const primary = new Mongo(primaryHost);
+ let primaryDB = primary.getDB(dbName);
+
+ let batch = [];
+ for (let i = 0; i < numDocs; ++i) {
+ batch.push({x: i});
+ }
+
+ let request = {insert: collName, documents: batch, writeConcern: {w: 1}, ordered: false};
+ res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request),
+ ErrorCodes.TenantMigrationCommitted);
+
+ return res;
+}
+
+jsTestLog("Testing that large write errors fit within the BSON size limit.");
+
+const test = new BasicServerlessTest({
+ recipientSetName: "recipientSet",
+ recipientTagName: "recipientTagName",
+ quickGarbageCollection: true
+});
+test.addRecipientNodes();
+
+const tenantId = "bulkUnorderedInserts-committed";
+
+const dbName = test.tenantDB(tenantId, kTenantDefinedDbName);
+const primary = test.donor.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const numWriteOps =
+ assert.commandWorked(primaryDB.hello()).maxWriteBatchSize; // num of writes to run in bulk.
+
+assert.commandWorked(primaryDB.runCommand({create: kCollName}));
+
+// Do a large unordered bulk insert that fails all inserts in order to generate a large write
+// result.
+const writeFp = configureFailPoint(primaryDB, "hangDuringBatchInsert");
+const bulkWriteThread =
+ new Thread(bulkWriteDocsUnordered, primary.host, dbName, kCollName, numWriteOps);
+
+bulkWriteThread.start();
+writeFp.wait();
+
+const operation = test.createSplitOperation([tenantId]);
+assert.commandWorked(operation.commit());
+
+writeFp.off();
+bulkWriteThread.join();
+
+const bulkWriteRes = bulkWriteThread.returnData();
+const writeErrors = bulkWriteRes.writeErrors;
+
+assert.gt(writeErrors.length, 0);
+
+writeErrors.forEach((err, arrIndex) => {
+ assert.eq(err.code, ErrorCodes.TenantMigrationCommitted);
+ if (arrIndex == 0) {
+ assert(err.errmsg);
+ } else {
+ assert(!err.errmsg);
+ }
+});
+
+operation.forget();
+test.waitForGarbageCollection(operation.migrationId, [tenantId]);
+
+// This assert is more or less a sanity check since jsThreads need to convert data it returns
+// into a BSON object. So if we have reached this assert, we already know that the write result
+// is within the BSON limits.
+assert.lte(Object.bsonsize(bulkWriteRes),
+ assert.commandWorked(primaryDB.hello()).maxBsonObjectSize);
+
+test.stop();
+})();