summaryrefslogtreecommitdiff
path: root/jstests/replsets/tenant_migration_test_max_bson_limit.js
blob: d12e404972589499d9bb3ababd71e91c9b388291 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
/**
 * Tests that large write error results from bulk write operations are within the BSON size limit.
 *
 * Tenant migrations are not expected to be run on servers with ephemeralForTest.
 *
 * @tags: [requires_fcv_47, requires_majority_read_concern, incompatible_with_eft,
 * incompatible_with_windows_tls, incompatible_with_macos, requires_persistence]
 */
(function() {
'use strict';

load("jstests/libs/fail_point_util.js");
load("jstests/libs/parallelTester.js");
load("jstests/libs/uuid_util.js");
load("jstests/replsets/libs/tenant_migration_test.js");
load("jstests/replsets/libs/tenant_migration_util.js");

const kCollName = "testColl";
const kTenantDefinedDbName = "0";

function bulkWriteDocsUnordered(primaryHost, dbName, collName, numDocs) {
    const primary = new Mongo(primaryHost);
    let primaryDB = primary.getDB(dbName);

    let batch = [];
    for (let i = 0; i < numDocs; ++i) {
        batch.push({x: i});
    }

    let request = {insert: collName, documents: batch, writeConcern: {w: 1}, ordered: false};
    res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request),
                                       ErrorCodes.TenantMigrationCommitted);

    return res;
}

jsTestLog("Testing that large write errors fit within the BSON size limit.");

const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()});
if (!tenantMigrationTest.isFeatureFlagEnabled()) {
    jsTestLog("Skipping test because the tenant migrations feature flag is disabled");
    return;
}

const tenantId = "bulkUnorderedInserts-committed";
const migrationOpts = {
    migrationIdString: extractUUIDFromObject(UUID()),
    tenantId,
};

const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName);
const primary = tenantMigrationTest.getDonorPrimary();
const primaryDB = primary.getDB(dbName);
const numWriteOps =
    assert.commandWorked(primaryDB.hello()).maxWriteBatchSize;  // num of writes to run in bulk.

assert.commandWorked(primaryDB.runCommand({create: kCollName}));

// Do a large unordered bulk insert that fails all inserts in order to generate a large write
// result.
const writeFp = configureFailPoint(primaryDB, "hangDuringBatchInsert");
const bulkWriteThread =
    new Thread(bulkWriteDocsUnordered, primary.host, dbName, kCollName, numWriteOps);

bulkWriteThread.start();
writeFp.wait();

TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts));

writeFp.off();
bulkWriteThread.join();

const bulkWriteRes = bulkWriteThread.returnData();
const writeErrors = bulkWriteRes.writeErrors;

assert.gt(writeErrors.length, 0);

writeErrors.forEach((err, arrIndex) => {
    assert.eq(err.code, ErrorCodes.TenantMigrationCommitted);
    if (arrIndex == 0) {
        assert(err.errmsg);
    } else {
        assert(!err.errmsg);
    }
});

// This assert is more or less a sanity check since jsThreads need to convert data it returns
// into a BSON object. So if we have reached this assert, we already know that the write result
// is within the BSON limits.
assert.lte(Object.bsonsize(bulkWriteRes),
           assert.commandWorked(primaryDB.hello()).maxBsonObjectSize);

tenantMigrationTest.stop();
})();