diff options
author | David Bradford <david.bradford@mongodb.com> | 2018-04-25 11:42:28 -0400 |
---|---|---|
committer | David Bradford <david.bradford@mongodb.com> | 2018-04-25 11:42:28 -0400 |
commit | 00f32ac53c595f098ea200ab7b9d7278be4a5193 (patch) | |
tree | 349308af092b597e7e137989271dc65596ec2cde /jstests/core/removeb.js | |
parent | 4ca260618eb84c4ed577cd3dc6995a3b8521d632 (diff) | |
download | mongo-00f32ac53c595f098ea200ab7b9d7278be4a5193.tar.gz |
SERVER-34544: Tests should use bulk inserts for setup with large amounts of data
Diffstat (limited to 'jstests/core/removeb.js')
-rw-r--r-- | jstests/core/removeb.js | 92 |
1 files changed, 55 insertions, 37 deletions
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js index f5b038bf2c8..d73e9394d3b 100644 --- a/jstests/core/removeb.js +++ b/jstests/core/removeb.js @@ -2,40 +2,58 @@ // Test removal of Records that have been reused since the remove operation began. SERVER-5198 -t = db.jstests_removeb; -t.drop(); - -t.ensureIndex({a: 1}); - -// Make the index multikey to trigger cursor dedup checking. -t.insert({a: [-1, -2]}); -t.remove({}); - -// Insert some data. -for (i = 0; i < 20000; ++i) { - if (i % 100 == 0) { - print(i + " of first set of 20000 documents inserted"); - } - t.insert({a: i}); -} - -p = startParallelShell( - // Wait until the remove operation (below) begins running. - 'while( db.jstests_removeb.count() == 20000 );' + - // Insert documents with increasing 'a' values. These inserted documents may - // reuse Records freed by the remove operation in progress and will be - // visited by the remove operation if it has not completed. - 'for( i = 20000; i < 40000; ++i ) {' + - ' db.jstests_removeb.insert( { a:i } );' + - ' if (i % 1000 == 0) {' + - ' print( i-20000 + \" of second set of 20000 documents inserted\" );' + - ' }' + - '}'); - -// Remove using the a:1 index in ascending direction. -var res = t.remove({a: {$gte: 0}}); -assert(!res.hasWriteError(), 'The remove operation failed.'); - -p(); - -t.drop(); +(function() { + "use strict"; + + const t = db.jstests_removeb; + t.drop(); + + t.ensureIndex({a: 1}); + + // Make the index multikey to trigger cursor dedup checking. + t.insert({a: [-1, -2]}); + t.remove({}); + + const insertDocs = function(collection, nDocs) { + print("Bulk inserting " + nDocs + " documents"); + + const bulk = collection.initializeUnorderedBulkOp(); + for (let i = 0; i < nDocs; ++i) { + bulk.insert({a: i}); + } + + assert.writeOK(bulk.execute()); + + print("Bulk insert " + nDocs + " documents completed"); + }; + + insertDocs(t, 20000); + + const p = startParallelShell(function() { + // Wait until the remove operation (below) begins running. + while (db.jstests_removeb.count() === 20000) { + } + + // Insert documents with increasing 'a' values. These inserted documents may + // reuse Records freed by the remove operation in progress and will be + // visited by the remove operation if it has not completed. + for (let i = 20000; i < 40000; i += 100) { + const bulk = db.jstests_removeb.initializeUnorderedBulkOp(); + for (let j = 0; j < 100; ++j) { + bulk.insert({a: i + j}); + } + assert.writeOK(bulk.execute()); + if (i % 1000 === 0) { + print(i - 20000 + " of second set of 20000 documents inserted"); + } + } + }); + + // Remove using the a:1 index in ascending direction. + var res = t.remove({a: {$gte: 0}}); + assert(!res.hasWriteError(), 'The remove operation failed.'); + + p(); + + t.drop(); +})(); |