summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/sample_pushdown_transaction.js
blob: 16790447b01007acf957a9c267b1894735cc72d1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
/**
 * Verify that $sample push down works properly in a transaction. This test was designed to
 * reproduce SERVER-57642.
 *
 * Requires WiredTiger for random cursor support.
 * @tags: [requires_wiredtiger, requires_replication]
 */
(function() {
'use strict';

load('jstests/libs/analyze_plan.js');  // For planHasStage.

// Set up.
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
const collName = 'sample_pushdown';
const dbName = 'test';
const testDB = rst.getPrimary().getDB(dbName);
const coll = testDB[collName];

// In order to construct a plan that uses a storage engine random cursor, we not only need more
// than 100 records in our collection, we also need the sample size to be less than 5% of the
// number of documents in our collection.
const numDocs = 1000;
const sampleSize = numDocs * .03;
let docs = [];
for (let i = 0; i < numDocs; ++i) {
    docs.push({a: i});
}
assert.commandWorked(coll.insert(docs));
const pipeline = [{$sample: {size: sampleSize}}, {$match: {a: {$gte: 0}}}];

// Verify that our pipeline uses $sample push down.
const explain = coll.explain().aggregate(pipeline);
assert(aggPlanHasStage(explain, "$sampleFromRandomCursor"), tojson(explain));

// Start the transaction.
const session = testDB.getMongo().startSession({causalConsistency: false});
const sessionDB = session.getDatabase(dbName);
session.startTransaction();

// Run the pipeline.
const randDocs = sessionDB[collName].aggregate(pipeline).toArray();

// Verify that we have at least one result.
assert.gt(randDocs.length, 0, tojson(randDocs));

// Clean up.
assert.commandWorked(session.abortTransaction_forTesting());
rst.stopSet();
})();