diff options
author | Jennifer Peshansky <jennifer.peshansky@mongodb.com> | 2022-11-03 16:13:20 +0000 |
---|---|---|
committer | Jennifer Peshansky <jennifer.peshansky@mongodb.com> | 2022-11-03 16:13:20 +0000 |
commit | e74d2910bbe76790ad131d53fee277829cd95982 (patch) | |
tree | cabe148764529c9623652374fbc36323a550cd44 /jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js | |
parent | 280145e9940729480bb8a35453d4056afac87641 (diff) | |
parent | ba467f46cc1bc49965e1d72b541eff0cf1d7b22e (diff) | |
download | mongo-e74d2910bbe76790ad131d53fee277829cd95982.tar.gz |
Merge branch 'master' into jenniferpeshansky/SERVER-70854jenniferpeshansky/SERVER-70854
Diffstat (limited to 'jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js')
-rw-r--r-- | jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js new file mode 100644 index 00000000000..f1733c96ffd --- /dev/null +++ b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js @@ -0,0 +1,137 @@ +/** + * Tests that retrying a retryable update doesn't cause it to have multiple sampled query documents. + * + * @tags: [requires_fcv_62, featureFlagAnalyzeShardKey] + */ +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); +load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); + +// Make the periodic job for writing sampled queries have a period of 1 second to speed up the test. +const queryAnalysisWriterIntervalSecs = 1; + +function testRetryExecutedWrite(rst) { + const dbName = "testDb"; + const collName = "testCollExecutedWrite"; + const ns = dbName + "." + collName; + + const lsid = {id: UUID()}; + const txnNumber = NumberLong(1); + + const primary = rst.getPrimary(); + const db = primary.getDB(dbName); + const coll = db.getCollection(collName); + assert.commandWorked(coll.insert({a: 0})); + const collectionUuid = QuerySamplingUtil.getCollectionUuid(db, collName); + + const updateOp0 = {q: {a: 0}, u: {$set: {b: 0}}, multi: false, upsert: false, sampleId: UUID()}; + const updateOp1 = + {q: {a: {$lt: 1}}, u: {$set: {b: "$x"}}, multi: false, upsert: true, sampleId: UUID()}; + + const originalCmdObj = {update: collName, updates: [updateOp0], lsid, txnNumber}; + const expectedSampledQueryDocs = [{ + sampleId: updateOp0.sampleId, + cmdName: "update", + cmdObj: QuerySamplingUtil.makeCmdObjIgnoreSessionInfo(originalCmdObj) + }]; + + const originalRes = assert.commandWorked(db.runCommand(originalCmdObj)); + assert.eq(originalRes.nModified, 1, originalRes); + + QuerySamplingUtil.assertSoonSampledQueryDocuments( + primary, ns, collectionUuid, expectedSampledQueryDocs); + + // Retry updateOp0 with the same sampleId but batched with the new updateOp1. + const retryCmdObj0 = Object.assign({}, originalCmdObj); + retryCmdObj0.updates = [updateOp0, updateOp1]; + expectedSampledQueryDocs.push({ + sampleId: updateOp1.sampleId, + cmdName: "update", + cmdObj: Object.assign(QuerySamplingUtil.makeCmdObjIgnoreSessionInfo(retryCmdObj0), + {updates: [updateOp1]}) + }); + + const retryRes0 = assert.commandWorked(db.runCommand(retryCmdObj0)); + assert.eq(retryRes0.nModified, 2, retryRes0); + + QuerySamplingUtil.assertSoonSampledQueryDocuments( + primary, ns, collectionUuid, expectedSampledQueryDocs); + + // Retry both updateOp0 and updateOp1 different sampleIds. + const retryCmdObj1 = Object.assign({}, retryCmdObj0); + retryCmdObj1.updates = [ + Object.assign({}, updateOp0, {sampleId: UUID()}), + Object.assign({}, updateOp1, {sampleId: UUID()}) + ]; + + const retryRes1 = assert.commandWorked(db.runCommand(retryCmdObj1)); + assert.eq(retryRes1.nModified, 2, retryRes1); + + // Wait for one interval to verify that no writes occurred as a result of the retry. + sleep(queryAnalysisWriterIntervalSecs * 1000); + + QuerySamplingUtil.assertSoonSampledQueryDocuments( + primary, ns, collectionUuid, expectedSampledQueryDocs); +} + +function testRetryUnExecutedWrite(rst) { + const dbName = "testDb"; + const collName = "testCollUnExecutedWrite"; + const ns = dbName + "." + collName; + + const lsid = {id: UUID()}; + const txnNumber = NumberLong(1); + + const primary = rst.getPrimary(); + const db = primary.getDB(dbName); + const coll = db.getCollection(collName); + assert.commandWorked(coll.insert({a: 0})); + const collectionUuid = QuerySamplingUtil.getCollectionUuid(db, collName); + + const updateOp0 = {q: {a: 0}, u: {$set: {b: 0}}, multi: false, upsert: false, sampleId: UUID()}; + const originalCmdObj = {update: collName, updates: [updateOp0], lsid, txnNumber}; + const expectedSampledQueryDocs = [{ + sampleId: updateOp0.sampleId, + cmdName: "update", + cmdObj: QuerySamplingUtil.makeCmdObjIgnoreSessionInfo(originalCmdObj) + }]; + + const fp = configureFailPoint(primary, "failAllUpdates"); + + // The update fails after it has been added to the sample buffer. + assert.commandFailedWithCode(db.runCommand(originalCmdObj), ErrorCodes.InternalError); + + QuerySamplingUtil.assertSoonSampledQueryDocuments( + primary, ns, collectionUuid, expectedSampledQueryDocs); + + fp.off(); + + // Retry with the same sampleId. + const retryCmdObj = originalCmdObj; + const retryRes = assert.commandWorked(db.runCommand(retryCmdObj)); + assert.eq(retryRes.nModified, 1, retryRes); + + // Wait for one interval to verify that no writes occurred as a result of the retry. + sleep(queryAnalysisWriterIntervalSecs * 1000); + + QuerySamplingUtil.assertSoonSampledQueryDocuments( + primary, ns, collectionUuid, expectedSampledQueryDocs); +} + +const st = new ShardingTest({ + shards: 1, + rs: { + nodes: 2, + // Make the periodic job for writing sampled queries have a period of 1 second to speed up + // the test. + setParameter: {queryAnalysisWriterIntervalSecs} + } +}); + +testRetryExecutedWrite(st.rs0); +testRetryUnExecutedWrite(st.rs0); + +st.stop(); +})(); |