1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
/**
* Tests that the donor
* - rejects reads with atClusterTime/afterClusterTime >= blockOpTime reads and linearizable
* reads after the split commits.
*
* @tags: [
* incompatible_with_eft,
* incompatible_with_macos,
* incompatible_with_windows_tls,
* requires_majority_read_concern,
* requires_persistence,
* serverless,
* requires_fcv_63
* ]
*/
import {findSplitOperation, ShardSplitTest} from "jstests/serverless/libs/shard_split_test.js";
load("jstests/libs/fail_point_util.js");
load("jstests/serverless/shard_split_concurrent_reads_on_donor_util.js");
const kCollName = "testColl";
/**
* Tests that after the split commits, the donor rejects linearizable reads and reads with
* atClusterTime/afterClusterTime >= blockOpTime.
*/
let countTenantMigrationCommittedErrorsPrimary = 0;
let countTenantMigrationCommittedErrorsSecondaries = 0;
function testRejectReadsAfterMigrationCommitted(testCase, primary, dbName, collName, migrationId) {
const donorDoc = findSplitOperation(primary, migrationId);
let nodes = [primary];
if (testCase.isSupportedOnSecondaries) {
nodes = donorRst.nodes;
if (testCase.requiresReadTimestamp) {
countTenantMigrationCommittedErrorsSecondaries += 2;
} else {
countTenantMigrationCommittedErrorsSecondaries += 1;
}
}
if (testCase.requiresReadTimestamp) {
countTenantMigrationCommittedErrorsPrimary += 2;
} else {
countTenantMigrationCommittedErrorsPrimary += 1;
}
nodes.forEach(node => {
const db = node.getDB(dbName);
if (testCase.requiresReadTimestamp) {
runCommandForConcurrentReadTest(db,
testCase.command(collName, donorDoc.blockOpTime.ts),
ErrorCodes.TenantMigrationCommitted,
testCase.isTransaction);
runCommandForConcurrentReadTest(
db,
testCase.command(collName, donorDoc.commitOrAbortOpTime.ts),
ErrorCodes.TenantMigrationCommitted,
testCase.isTransaction);
} else {
runCommandForConcurrentReadTest(db,
testCase.command(collName),
ErrorCodes.TenantMigrationCommitted,
testCase.isTransaction);
}
});
}
const testCases = shardSplitConcurrentReadTestCases;
const test = new ShardSplitTest({
recipientTagName: "recipientTag",
recipientSetName: "recipientSet",
quickGarbageCollection: true
});
test.addRecipientNodes();
const tenantId = ObjectId();
let donorRst = test.donor;
const donorPrimary = test.getDonorPrimary();
// Force the donor to preserve all snapshot history to ensure that transactional reads do not
// fail with TransientTransactionError "Read timestamp is older than the oldest available
// timestamp".
donorRst.nodes.forEach(node => {
configureFailPoint(node, "WTPreserveSnapshotHistoryIndefinitely");
});
const operation = test.createSplitOperation([tenantId]);
assert.commandWorked(operation.commit());
test.removeRecipientNodesFromDonor();
// Wait for the last oplog entry on the primary to be visible in the committed snapshot view of
// the oplog on all the secondaries. This is to ensure that snapshot reads on secondaries with
// unspecified atClusterTime have read timestamp >= commitTimestamp.
donorRst.awaitLastOpCommitted();
for (const [testCaseName, testCase] of Object.entries(testCases)) {
jsTest.log(`Testing inCommitted with testCase ${testCaseName}`);
const dbName = `${tenantId.str}_${testCaseName}`;
testRejectReadsAfterMigrationCommitted(
testCase, donorPrimary, dbName, kCollName, operation.migrationId);
}
// check on primary
ShardSplitTest.checkShardSplitAccessBlocker(donorPrimary, tenantId, {
numTenantMigrationCommittedErrors: countTenantMigrationCommittedErrorsPrimary
});
let secondaries = donorRst.getSecondaries();
// check on secondaries
secondaries.forEach(node => {
ShardSplitTest.checkShardSplitAccessBlocker(node, tenantId, {
numTenantMigrationCommittedErrors: countTenantMigrationCommittedErrorsSecondaries
});
});
test.stop();
|