diff options
Diffstat (limited to 'jstests')
130 files changed, 119 insertions, 5560 deletions
diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js index cbb84c4a687..0c728b8fc14 100644 --- a/jstests/auth/getMore.js +++ b/jstests/auth/getMore.js @@ -17,11 +17,6 @@ adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]})); assert.eq(1, adminDB.auth("admin", "admin")); - let ismmap = false; - if (!isMongos) { - ismmap = assert.commandWorked(adminDB.serverStatus()).storageEngine.name == "mmapv1"; - } - // Set up the test database. const testDBName = "auth_getMore"; let testDB = adminDB.getSiblingDB(testDBName); @@ -117,19 +112,6 @@ testDB.logout(); } - // Test that "Mallory" cannot use a repairCursor cursor created by "Alice". - if (!isMongos && ismmap) { - assert.eq(1, testDB.auth("Alice", "pwd")); - res = assert.commandWorked(testDB.runCommand({repairCursor: "foo"})); - cursorId = res.cursor.id; - testDB.logout(); - assert.eq(1, testDB.auth("Mallory", "pwd")); - assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: "foo"}), - ErrorCodes.Unauthorized, - "read from another user's repairCursor cursor"); - testDB.logout(); - } - // // Test that a user can call getMore on an indexStats cursor they created, even if the // indexStats privilege has been revoked in the meantime. diff --git a/jstests/concurrency/fsm_workload_helpers/server_types.js b/jstests/concurrency/fsm_workload_helpers/server_types.js index 1d9debe8fcf..a90cc9c751f 100644 --- a/jstests/concurrency/fsm_workload_helpers/server_types.js +++ b/jstests/concurrency/fsm_workload_helpers/server_types.js @@ -50,22 +50,6 @@ function getStorageEngineName(db) { } /** - * Returns true if the current storage engine is mmapv1, and false otherwise. - */ -function isMMAPv1(db) { - return getStorageEngineName(db) === 'mmapv1'; -} - -/** - * Returns true if an update can cause the RecordId of a document to change. - */ -function recordIdCanChangeOnUpdate(db) { - // A RecordId on MMAPv1 is just its location on disk, which can change if the document grows and - // needs to be moved. - return isMMAPv1(db); -} - -/** * Returns true if the current storage engine is wiredTiger, and false otherwise. */ function isWiredTiger(db) { diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js index 438915745ee..ed082444d9e 100644 --- a/jstests/concurrency/fsm_workloads/create_capped_collection.js +++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js @@ -8,7 +8,6 @@ * * @tags: [requires_capped] */ -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1 var $config = (function() { @@ -65,42 +64,6 @@ var $config = (function() { var smallDocSize = Math.floor(options.size / 8) - 1; var largeDocSize = Math.floor(options.size / 2) - 1; - // Truncation in MMAPv1 has well defined behavior. - if (isMongod(db) && isMMAPv1(db)) { - ids.push(this.insert(db, myCollName, largeDocSize)); - - // Insert a large document and verify that a truncation has occurred. - // There should be 1 document in the collection and it should always be - // the most recently inserted document. - - ids.push(this.insert(db, myCollName, largeDocSize)); - - count = db[myCollName].find().itcount(); - assertWhenOwnDB.eq(count, 1, 'expected truncation to occur'); - assertWhenOwnDB.eq(ids.slice(ids.length - count), - this.getObjectIds(db, myCollName), - 'expected truncation to remove the oldest document'); - - // Insert multiple small documents and verify that truncation has occurred. There - // should be at most 4 documents in the collection (fewer based on the maximum - // number of documents allowed if specified during collection creation), and they - // should be the most recently inserted documents. - - ids.push(this.insert(db, myCollName, smallDocSize)); - ids.push(this.insert(db, myCollName, smallDocSize)); - ids.push(this.insert(db, myCollName, smallDocSize)); - - var prevCount = count; - count = db[myCollName].find().itcount(); - - var expectedCount = options.max && options.max < 4 ? options.max : 4; - - assertWhenOwnDB.eq(count, expectedCount, 'expected truncation to occur'); - assertWhenOwnDB.eq(ids.slice(ids.length - count), - this.getObjectIds(db, myCollName), - 'expected truncation to remove the oldest documents'); - } - // Truncation of capped collections is generally unreliable. Instead of relying on it // to occur after a certain size document is inserted we test its occurrence. We set a // reasonable threshold of documents to insert before a user might expect truncation to diff --git a/jstests/concurrency/fsm_workloads/create_database.js b/jstests/concurrency/fsm_workloads/create_database.js index 2a08cfb39d2..49992626c64 100644 --- a/jstests/concurrency/fsm_workloads/create_database.js +++ b/jstests/concurrency/fsm_workloads/create_database.js @@ -134,11 +134,7 @@ var $config = (function() { return { data, // We only run a few iterations to reduce the amount of data cumulatively - // written to disk by mmapv1. For example, setting 10 threads and 180 - // iterations (with an expected 6 transitions per create/drop roundtrip) - // causes this workload to write at least 32MB (.ns and .0 files) * 10 threads - // * 30 iterations worth of data to disk, or about 10GB, which can be slow on - // test hosts. + // written to disk. threadCount: 10, iterations: 120, states, transitions, }; diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js index 9d372d354f8..3a87701b086 100644 --- a/jstests/concurrency/fsm_workloads/drop_database.js +++ b/jstests/concurrency/fsm_workloads/drop_database.js @@ -26,16 +26,6 @@ var $config = (function() { var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}}; - return { - threadCount: 10, - // We only run a few iterations to reduce the amount of data cumulatively - // written to disk by mmapv1. For example, setting 10 threads and 5 - // iterations causes this workload to write at least 32MB (.ns and .0 files) - // * 10 threads * 5 iterations worth of data to disk, which can be slow on - // test hosts. - iterations: 5, - states: states, - transitions: transitions - }; + return {threadCount: 10, iterations: 20, states: states, transitions: transitions}; })(); diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js index 7c32f6aefec..fd49788830c 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js @@ -5,10 +5,12 @@ * * Each thread inserts a single document into a collection, and then * repeatedly performs the findAndModify command. Attempts to trigger - * a document move by growing the size of the inserted document using - * the $set and $mul update operators. + * the same conditions that with MMAPv1 caused a document move, + * by growing the size of the inserted document using the $set and $mul + * update operators. Now checks that document moves don't happen and + * that large changes in document size are handled correctly. */ -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1 +load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod var $config = (function() { @@ -95,12 +97,11 @@ var $config = (function() { // Get the DiskLoc of the document after its potential move var after = db[collName].find({_id: before._id}).showDiskLoc().next(); - if (isMongod(db) && isMMAPv1(db)) { - // Since the document has at least doubled in size, and the default - // allocation strategy of mmapv1 is to use power of two sizes, the - // document will have always moved - assertWhenOwnColl.neq( - before.$recordId, after.$recordId, 'document should have moved'); + if (isMongod(db)) { + // Even though the document has at least doubled in size, the document + // must never move. + assertWhenOwnColl.eq( + before.$recordId, after.$recordId, 'document should not have moved'); } } diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js index f4525b48e42..257b603d1fb 100644 --- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js @@ -63,12 +63,7 @@ var $config = (function() { return { threadCount: 10, - // We only run a few iterations to reduce the amount of data cumulatively - // written to disk by mmapv1. For example, setting 10 threads and 5 - // iterations causes this workload to write at least 32MB (.ns and .0 files) - // * 10 threads * 5 iterations worth of data to disk, which can be slow on - // test hosts. - iterations: 5, + iterations: 20, data: data, states: states, transitions: transitions, diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js index 84c880c0fd3..e73f7a20959 100644 --- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js @@ -56,12 +56,7 @@ var $config = (function() { return { threadCount: 10, - // We only run a few iterations to reduce the amount of data cumulatively - // written to disk by mmapv1. For example, setting 10 threads and 5 - // iterations causes this workload to write at least 32MB (.ns and .0 files) - // * 10 threads * 5 iterations worth of data to disk, which can be slow on - // test hosts. - iterations: 5, + iterations: 20, data: data, states: states, transitions: transitions, diff --git a/jstests/concurrency/fsm_workloads/touch_base.js b/jstests/concurrency/fsm_workloads/touch_base.js index d69b7c94b2e..e964ad04603 100644 --- a/jstests/concurrency/fsm_workloads/touch_base.js +++ b/jstests/concurrency/fsm_workloads/touch_base.js @@ -9,7 +9,7 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config -// For isMongod, isMMAPv1, and isEphemeral. +// For isMongod and isEphemeral. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); var $config = extendWorkload($config, function($config, $super) { @@ -23,7 +23,7 @@ var $config = extendWorkload($config, function($config, $super) { $config.states.touch = function touch(db, collName) { var res = db.runCommand(this.generateTouchCmdObj(collName)); - if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) { + if (isMongod(db) && isEphemeral(db)) { assertAlways.commandWorked(res); } else { // SERVER-16850 and SERVER-16797 diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js index 2ca02e2f38e..7654b8bac19 100644 --- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js +++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js @@ -9,7 +9,7 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config -// For isMongod and recordIdCanChangeOnUpdate. +// For isMongod load('jstests/concurrency/fsm_workload_helpers/server_types.js'); var $config = extendWorkload($config, function($config, $super) { @@ -20,16 +20,9 @@ var $config = extendWorkload($config, function($config, $super) { assertAlways.eq(0, res.nUpserted, tojson(res)); if (isMongod(db)) { - if (!recordIdCanChangeOnUpdate(db)) { - // If a document's RecordId cannot change, then we should not - // have updated any document more than once, since the update - // stage internally de-duplicates based on RecordId. - assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res)); - } else { - // If RecordIds can change, then there are no guarantees on how - // many documents were updated. - assertAlways.gte(res.nMatched, 0, tojson(res)); - } + // If a document's RecordId cannot change, then we should not have updated any document + // more than once, since the update stage internally de-duplicates based on RecordId. + assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res)); } else { // mongos assertAlways.gte(res.nMatched, 0, tojson(res)); } diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js index 45a079091e0..f339a8e004e 100644 --- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js +++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js @@ -11,7 +11,7 @@ * Uses an ordered, bulk operation to perform the updates. */ -// For isMongod, recordIdCanChangeOnUpdate, and supportsDocumentLevelConcurrency. +// For isMongod and supportsDocumentLevelConcurrency. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); var $config = (function() { @@ -41,17 +41,10 @@ var $config = (function() { find: function find(db, collName) { var docs = db[collName].find().toArray(); - if (isMongod(db) && !recordIdCanChangeOnUpdate(db)) { - // If the RecordId cannot change and we aren't updating any fields in any indexes, - // we should always see all matching documents, since they would not be able to move - // ahead or behind our collection scan or index scan. - assertWhenOwnColl.eq(this.docCount, docs.length); - } else { - // On MMAPv1, we may see more than 'this.docCount' documents during our find. This - // can happen if an update causes the document to grow such that it is moved in - // front of an index or collection scan which has already returned it. - assertWhenOwnColl.gte(docs.length, this.docCount); - } + // We aren't updating any fields in any indexes, so we should always see all + // matching documents, since they would not be able to move ahead or behind + // our collection scan or index scan. + assertWhenOwnColl.eq(this.docCount, docs.length); if (isMongod(db) && supportsDocumentLevelConcurrency(db)) { // Storage engines which support document-level concurrency will automatically retry diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js index adbe1076ee6..5b9015fab0a 100644 --- a/jstests/concurrency/fsm_workloads/yield.js +++ b/jstests/concurrency/fsm_workloads/yield.js @@ -1,7 +1,5 @@ 'use strict'; -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod - /** * yield.js * @@ -119,15 +117,6 @@ var $config = (function() { * more yielding, and inserts the documents to be used. */ function setup(db, collName, cluster) { - // Enable this failpoint to trigger more yields. In MMAPV1, if a record fetch is about to - // page fault, the query will yield. This failpoint will mock page faulting on such - // fetches every other time. - - cluster.executeOnMongodNodes(function enableFailPoint(db) { - assertAlways.commandWorked( - db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'alwaysOn'})); - }); - // Lower the following parameters to force even more yields. cluster.executeOnMongodNodes(function lowerYieldParams(db) { assertAlways.commandWorked( @@ -151,10 +140,6 @@ var $config = (function() { * Reset parameters and disable failpoint. */ function teardown(db, collName, cluster) { - cluster.executeOnMongodNodes(function disableFailPoint(db) { - assertAlways.commandWorked( - db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'off'})); - }); cluster.executeOnMongodNodes(function resetYieldParams(db) { assertAlways.commandWorked( db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 128})); diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js index 3e67b680282..e907600d781 100644 --- a/jstests/core/apitest_dbcollection.js +++ b/jstests/core/apitest_dbcollection.js @@ -270,15 +270,5 @@ assert.eq(0, db.getCollection("test_db").getIndexes().length, "24"); t.totalIndexSize(), 'db.collection.totalIndexSize() cannot be undefined on a non-empty collection'); - if (db.isMaster().msg !== 'isdbgrid' && db.serverStatus().storageEngine.name === 'mmapv1') { - // Only in MMAPv1 do we guarantee that storageSize only changes when you write to a - // collection. - assert.eq(stats.storageSize, t.storageSize()); - assert.eq(stats.totalIndexSize, t.totalIndexSize()); - assert.eq(t.storageSize() + t.totalIndexSize(), - t.totalSize(), - 'incorrect db.collection.totalSize() on a non-empty collection'); - } - t.drop(); }()); diff --git a/jstests/core/cannot_create_system_dot_indexes.js b/jstests/core/cannot_create_system_dot_indexes.js index 6488a76702e..50928d5a27d 100644 --- a/jstests/core/cannot_create_system_dot_indexes.js +++ b/jstests/core/cannot_create_system_dot_indexes.js @@ -3,13 +3,6 @@ (function() { "use strict"; - // This test should not be run on mmapv1 because the 'system.indexes' collection exists on that - // storage engine. - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - if (isMMAPv1) { - return; - } - // Cannot create system.indexes using the 'create' command. assert.commandFailedWithCode(db.createCollection("system.indexes"), ErrorCodes.InvalidNamespace); diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js index ca7f287c6cb..901160de169 100644 --- a/jstests/core/commands_namespace_parsing.js +++ b/jstests/core/commands_namespace_parsing.js @@ -54,8 +54,6 @@ assert.commandWorked(isMaster); const isMongos = (isMaster.msg === "isdbgrid"); - const isMMAPv1 = (jsTest.options().storageEngine === "mmapv1"); - db.commands_namespace_parsing.drop(); assert.writeOK(db.commands_namespace_parsing.insert({a: 1})); @@ -322,12 +320,6 @@ assertFailsWithInvalidNamespacesForField( "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand); - if (isMMAPv1 && !isMongos) { - // Test touch fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "touch", {touch: "", data: true, index: true}, isNotFullyQualified, isNotAdminCommand); - } - // Test collStats fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( "collStats", {collStats: ""}, isNotFullyQualified, isNotAdminCommand); diff --git a/jstests/core/covered_multikey.js b/jstests/core/covered_multikey.js index 9607b48841d..ec4ed0d5c0b 100644 --- a/jstests/core/covered_multikey.js +++ b/jstests/core/covered_multikey.js @@ -9,10 +9,6 @@ (function() { "use strict"; - // The MMAP storage engine does not store path-level multikey metadata, so it cannot participate - // in related query planning optimizations. - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - // For making assertions about explain output. load("jstests/libs/analyze_plan.js"); @@ -26,11 +22,7 @@ assert.eq({a: 1}, coll.findOne({a: 1, b: 2}, {_id: 0, a: 1})); let explainRes = coll.explain("queryPlanner").find({a: 1, b: 2}, {_id: 0, a: 1}).finish(); assert(isIxscan(db, explainRes.queryPlanner.winningPlan)); - if (isMMAPv1) { - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - } else { - assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - } + assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); coll.drop(); assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5})); @@ -45,11 +37,7 @@ .find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}) .sort({c: -1, d: -1}) .finish(); - if (isMMAPv1) { - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - } else { - assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - } + assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); // Verify that a query cannot be covered over a path which is multikey due to an empty array. coll.drop(); diff --git a/jstests/core/dbstats.js b/jstests/core/dbstats.js index 324f5dd7603..941ebb125b1 100644 --- a/jstests/core/dbstats.js +++ b/jstests/core/dbstats.js @@ -18,7 +18,6 @@ } const isMongoS = serverIsMongos(); - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; const isUsingPersistentStorage = !isMongoS && serverUsingPersistentStorage(); let testDB = db.getSiblingDB("dbstats_js"); @@ -32,34 +31,16 @@ let dbStats = testDB.runCommand({dbStats: 1}); assert.commandWorked(dbStats); - if (isMMAPv1) { - if (isMongoS) { - // When this test is run against mongoS with the mmapV1 storage engine the 'objects' and - // 'indexes' counts will vary depending on whether 'testColl' is sharded and on the # of - // shards (due to inclusion of system.indexes & system.namespaces counts). - assert(dbStats.hasOwnProperty("objects"), tojson(dbStats)); - assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats)); - } else { - assert.eq(7, - dbStats.objects, - tojson(dbStats)); // Includes testColl, system.indexes & system.namespaces - assert.eq(2, dbStats.indexes, tojson(dbStats)); - } - // 'dataSize' and 'avgObjSize' include document padding space under MMAPv1. - assert(dbStats.hasOwnProperty("dataSize"), tojson(dbStats)); - assert(dbStats.hasOwnProperty("avgObjSize"), tojson(dbStats)); + assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only + const dataSize = Object.bsonsize(doc); + assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats)); + assert.eq(dataSize, dbStats.dataSize, tojson(dbStats)); + + // Index count will vary on mongoS if an additional index is needed to support sharding. + if (isMongoS) { + assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats)); } else { - assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only - const dataSize = Object.bsonsize(doc); - assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats)); - assert.eq(dataSize, dbStats.dataSize, tojson(dbStats)); - - // Index count will vary on mongoS if an additional index is needed to support sharding. - if (isMongoS) { - assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats)); - } else { - assert.eq(2, dbStats.indexes, tojson(dbStats)); - } + assert.eq(2, dbStats.indexes, tojson(dbStats)); } assert(dbStats.hasOwnProperty("storageSize"), tojson(dbStats)); @@ -71,9 +52,8 @@ assert(dbStats.hasOwnProperty("fsTotalSize"), tojson(dbStats)); } - // Confirm extentFreeList field existence. Displayed for mongoD running MMAPv1 and for mongoS - // regardless of storage engine. - if (isMMAPv1 || isMongoS) { + // Confirm extentFreeList field existence. Displayed for mongoS regardless of storage engine. + if (isMongoS) { assert(dbStats.hasOwnProperty("extentFreeList"), tojson(dbStats)); assert(dbStats.extentFreeList.hasOwnProperty("num"), tojson(dbStats)); assert(dbStats.extentFreeList.hasOwnProperty("totalSize"), tojson(dbStats)); @@ -90,15 +70,7 @@ dbStats = testDB.runCommand({dbStats: 1}); assert.commandWorked(dbStats); - if (isMMAPv1) { - assert.eq( - 4, - dbStats.collections, - tojson(dbStats)); // testColl + system.views + system.indexes + system.namespaces - } else { - assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views - } - + assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views assert.eq(1, dbStats.views, tojson(dbStats)); } diff --git a/jstests/core/distinct_multikey.js b/jstests/core/distinct_multikey.js index e2250fbbe93..d883908370e 100644 --- a/jstests/core/distinct_multikey.js +++ b/jstests/core/distinct_multikey.js @@ -6,8 +6,6 @@ load("jstests/libs/analyze_plan.js"); - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - let coll = db.jstest_distinct_multikey; coll.drop(); assert.commandWorked(coll.createIndex({a: 1})); @@ -67,15 +65,8 @@ result = coll.distinct("a", {a: {$gte: 2}}); assert.eq([7, 8], result.sort()); explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}}); - if (isMMAPv1) { - // MMAPv1 does not support path-level multikey metadata tracking. It cannot use a distinct - // scan since it does not know that the "a" field is not multikey. - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - } else { - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - } + assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); + assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); // Test distinct over a trailing multikey field. result = coll.distinct("b", {a: {$gte: 2}}); @@ -94,15 +85,8 @@ result = coll.distinct("b", {a: 3}); assert.eq([1, 7, 8], result.sort()); explain = coll.explain("queryPlanner").distinct("b", {a: 3}); - if (isMMAPv1) { - // MMAPv1 does not support path-level multikey metadata tracking. It cannot use a distinct - // scan since it does not know that the "a" field is not multikey. - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - } else { - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - } + assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); + assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); // Test distinct over a trailing non-multikey dotted path where the leading field is multikey. coll.drop(); @@ -114,13 +98,6 @@ result = coll.distinct("b.c", {a: 3}); assert.eq([1, 7, 8], result.sort()); explain = coll.explain("queryPlanner").distinct("b.c", {a: 3}); - if (isMMAPv1) { - // MMAPv1 does not support path-level multikey metadata tracking. It cannot use a distinct - // scan since it does not know that the "a" field is not multikey. - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - } else { - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - } + assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION")); + assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); }()); diff --git a/jstests/core/explain_multikey.js b/jstests/core/explain_multikey.js index 963e6c7b3dc..91763555ffb 100644 --- a/jstests/core/explain_multikey.js +++ b/jstests/core/explain_multikey.js @@ -52,16 +52,7 @@ var stage = createIndexAndRunExplain(testOptions); assert.eq(true, stage.isMultiKey, "expected index to be multikey: " + tojson(stage)); - if (jsTest.options().storageEngine !== "mmapv1") { - assert.eq( - {a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage)); - } else { - // Path-level multikey tracking is supported for all storage engines that use the - // KVCatalog. MMAPv1 is the only storage engine that does not. - // - // TODO SERVER-22727: Store path-level multikey information in MMAPv1 index catalog. - assert(!stage.hasOwnProperty("multiKeyPaths"), tojson(stage)); - } + assert.eq({a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage)); // Drop the collection and insert a document that shouldn't cause the index to be multikey. testOptions.docToInsert = { @@ -71,15 +62,7 @@ stage = createIndexAndRunExplain(testOptions); assert.eq(false, stage.isMultiKey, "expected index not to be multikey: " + tojson(stage)); - if (jsTest.options().storageEngine !== "mmapv1") { - assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage)); - } else { - // Path-level multikey tracking is supported for all storage engines that use the - // KVCatalog. MMAPv1 is the only storage engine that does not. - // - // TODO SERVER-22727: Store path-level multikey information in MMAPv1 index catalog. - assert(!stage.hasOwnProperty("multiKeyPaths"), tojson(stage)); - } + assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage)); } verifyMultikeyInfoInExplainOutput({ diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js index adfce04a72a..d02b16f45cc 100644 --- a/jstests/core/expr_index_use.js +++ b/jstests/core/expr_index_use.js @@ -5,8 +5,6 @@ load("jstests/libs/analyze_plan.js"); - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - const coll = db.expr_index_use; coll.drop(); @@ -179,12 +177,9 @@ confirmExpectedExprExecution({$eq: ["$g.h", [1]]}, {nReturned: 1}); confirmExpectedExprExecution({$eq: ["$g.h", 1]}, {nReturned: 0}); - // Comparison against a non-multikey field of a multikey index can use an index, on storage - // engines other than MMAPv1. + // Comparison against a non-multikey field of a multikey index can use an index const metricsToCheck = {nReturned: 1}; - if (!isMMAPv1) { - metricsToCheck.expectedIndex = {i: 1, j: 1}; - } + metricsToCheck.expectedIndex = {i: 1, j: 1}; confirmExpectedExprExecution({$eq: ["$i", 1]}, metricsToCheck); metricsToCheck.nReturned = 0; confirmExpectedExprExecution({$eq: ["$i", 2]}, metricsToCheck); diff --git a/jstests/core/index_multikey.js b/jstests/core/index_multikey.js index bccec9ea14e..3514cb2e388 100644 --- a/jstests/core/index_multikey.js +++ b/jstests/core/index_multikey.js @@ -8,9 +8,6 @@ // For making assertions about explain output. load("jstests/libs/analyze_plan.js"); - const storageEngine = jsTest.options().storageEngine || "wiredTiger"; - const storageEngineSupportsMultikeyPaths = (storageEngine !== "mmapv1"); - const coll = db.getCollection("index_multikey"); coll.drop(); @@ -26,11 +23,9 @@ assert.eq(ixscan.isMultiKey, false, "empty index should not be marked multikey; plan: " + tojson(ixscan)); - if (storageEngineSupportsMultikeyPaths) { - assert.eq(ixscan.multiKeyPaths, - {a: [], b: []}, - "empty index should have no multiKeyPaths; plan: " + tojson(ixscan)); - } + assert.eq(ixscan.multiKeyPaths, + {a: [], b: []}, + "empty index should have no multiKeyPaths; plan: " + tojson(ixscan)); // After a failed insert, the index should not be marked as multikey. assert.commandFailedWithCode(coll.insert({a: [[1], {0: 1}]}), 16746); @@ -38,21 +33,16 @@ assert.eq(ixscan.isMultiKey, false, "index should not be marked multikey after failed insert; plan: " + tojson(ixscan)); - if (storageEngineSupportsMultikeyPaths) { - assert.eq( - ixscan.multiKeyPaths, - {a: [], b: []}, - "index should have empty multiKeyPaths after failed insert; plan: " + tojson(ixscan)); - } + assert.eq(ixscan.multiKeyPaths, + {a: [], b: []}, + "index should have empty multiKeyPaths after failed insert; plan: " + tojson(ixscan)); assert.commandWorked(coll.insert({a: [1, 2, 3]})); ixscan = getIndexScanExplainOutput(); assert.eq(ixscan.isMultiKey, true, "index should have been marked as multikey after insert; plan: " + tojson(ixscan)); - if (storageEngineSupportsMultikeyPaths) { - assert.eq(ixscan.multiKeyPaths, - {a: ["a"], b: []}, - "index has wrong multikey paths after insert; plan: " + ixscan); - } -}()); + assert.eq(ixscan.multiKeyPaths, + {a: ["a"], b: []}, + "index has wrong multikey paths after insert; plan: " + ixscan); +})(); diff --git a/jstests/core/list_collections1.js b/jstests/core/list_collections1.js index ef6f6240d67..f23331b97ce 100644 --- a/jstests/core/list_collections1.js +++ b/jstests/core/list_collections1.js @@ -4,8 +4,8 @@ // Basic functional tests for the listCollections command. // -// Note that storage engines are allowed to advertise internal collections to the user (in -// particular, the MMAPv1 storage engine currently advertises the "system.indexes" collection). +// Note that storage engines used to be allowed to advertise internal collections to the user (in +// particular, the MMAPv1 storage engine used to advertise the "system.indexes" collection). // Hence, this test suite does not test for a particular number of collections returned in // listCollections output, but rather tests for existence or absence of particular collections in // listCollections output. diff --git a/jstests/core/list_indexes_invalidation.js b/jstests/core/list_indexes_invalidation.js index 7dc70e822e7..75a62da531a 100644 --- a/jstests/core/list_indexes_invalidation.js +++ b/jstests/core/list_indexes_invalidation.js @@ -15,8 +15,8 @@ collRenamed.drop(); assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}, {c: 1}])); - // Get the first two indexes. Use find on 'system.indexes' on MMAPv1, listIndexes otherwise. - let cmd = db.system.indexes.count() ? {find: 'system.indexes'} : {listIndexes: collName}; + // Get the first two indexes. + let cmd = {listIndexes: collName}; Object.extend(cmd, {batchSize: 2}); let res = db.runCommand(cmd); assert.commandWorked(res, 'could not run ' + tojson(cmd)); diff --git a/jstests/core/list_namespaces_invalidation.js b/jstests/core/list_namespaces_invalidation.js index 687f680ed09..da5d392b9f5 100644 --- a/jstests/core/list_namespaces_invalidation.js +++ b/jstests/core/list_namespaces_invalidation.js @@ -17,10 +17,8 @@ assert.commandWorked(dbInvalid.createCollection('coll' + i.toString())); } - // Get the first two namespaces. Use find on 'system.namespaces' on MMAPv1, listCollections - // otherwise. - let cmd = dbInvalid.system.indexes.count() ? {find: 'system.namespaces'} - : {listCollections: dbInvalidName}; + // Get the first two namespaces using listCollections. + let cmd = {listCollections: dbInvalidName}; Object.extend(cmd, {batchSize: batchSize}); let res = dbInvalid.runCommand(cmd); assert.commandWorked(res, 'could not run ' + tojson(cmd)); diff --git a/jstests/core/profile3.js b/jstests/core/profile3.js index 3a810ea142f..b59e0b1f674 100644 --- a/jstests/core/profile3.js +++ b/jstests/core/profile3.js @@ -41,10 +41,6 @@ try { assert.eq(profileCursor({nMatched: {$exists: 1}}).count(), 3); assert.eq(profileCursor({nMatched: 1}).count(), 2); assert.eq(profileCursor({nMatched: 0}).count(), 1); - if (db.serverStatus().storageEngine.name == "mmapv1") { - assert.eq(profileCursor({nmoved: 1}).count(), 1); - } - db.system.profile.drop(); } finally { diff --git a/jstests/core/profile_no_such_db.js b/jstests/core/profile_no_such_db.js index 5567f59f715..905c49ae409 100644 --- a/jstests/core/profile_no_such_db.js +++ b/jstests/core/profile_no_such_db.js @@ -23,11 +23,9 @@ jsTest.log('Testing profiling level ' + level); // Setting the profiling level creates the database. - // Note: in storage engines other than MMAPv1 setting the profiling level to 0 puts the - // database - // in a weird state where it exists internally, but doesn't show up in listDatabases, and - // won't - // exist if you restart the server. + // Note: setting the profiling level to 0 puts the database in a weird state where it + // exists internally, but doesn't show up in listDatabases, and won't exist if you + // restart the server. var res = db.setProfilingLevel(level); assert.eq(res.was, defaultProfilingLevel); assert(dbExists() || level == 0); diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js index bf2cfe02714..685594cb45f 100644 --- a/jstests/core/profile_update.js +++ b/jstests/core/profile_update.js @@ -109,24 +109,6 @@ assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); // - // Confirm 'nmoved' for MMAPv1. - // - if (db.serverStatus().storageEngine.name === "mmapv1") { - coll.drop(); - assert.writeOK(coll.insert({_id: 1})); - - assert.writeOK(coll.update({_id: 1}, {$set: {b: new Array(128).toString()}})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - assert.eq(profileObj.nmoved, 1, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - } - - // // Confirm "fromMultiPlanner" metric. // coll.drop(); diff --git a/jstests/core/projection_dotted_paths.js b/jstests/core/projection_dotted_paths.js index a9316d70467..e76feb7a2ee 100644 --- a/jstests/core/projection_dotted_paths.js +++ b/jstests/core/projection_dotted_paths.js @@ -77,11 +77,8 @@ assert.eq(resultDoc, {b: {c: 1}}); explain = coll.find({a: 2}, {_id: 0, "b.c": 1}).explain("queryPlanner"); assert(isIxscan(db, explain.queryPlanner.winningPlan)); - if (jsTest.options().storageEngine !== "mmapv1") { - // Storage engines other than MMAPv1 track path-level multikey info, and can use this info - // to generate a covered plan. - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - } + // Path-level multikey info allows for generating a covered plan. + assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); // Verify that dotted projections work for multiple levels of nesting. assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1})); diff --git a/jstests/core/sort_array.js b/jstests/core/sort_array.js index 1343d60972e..ed24d1be965 100644 --- a/jstests/core/sort_array.js +++ b/jstests/core/sort_array.js @@ -6,10 +6,6 @@ (function() { "use strict"; - // The MMAP storage engine does not store path-level multikey metadata, so it cannot participate - // in related query planning optimizations. - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - load("jstests/libs/analyze_plan.js"); let coll = db.jstests_array_sort; @@ -133,25 +129,22 @@ expected: [{_id: 0}, {_id: 1}] }); - if (!isMMAPv1) { - // Test that, for storage engines which support path-level multikey tracking, a multikey - // index can provide a sort over a non-multikey field. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); - assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}})); - explain = coll.find({a: 2}).sort({"b.c": -1}).explain(); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "SORT")); - - const pipeline = [ - {$match: {a: 2}}, - {$sort: {"b.c": -1}}, - ]; - explain = coll.explain().aggregate(pipeline); - assert(aggPlanHasStage(explain, "IXSCAN")); - assert(!aggPlanHasStage(explain, "SORT")); - assert(!aggPlanHasStage(explain, "$sort")); - } + // Test that a multikey index can provide a sort over a non-multikey field. + coll.drop(); + assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); + assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}})); + explain = coll.find({a: 2}).sort({"b.c": -1}).explain(); + assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); + assert(!planHasStage(db, explain.queryPlanner.winningPlan, "SORT")); + + const pipeline = [ + {$match: {a: 2}}, + {$sort: {"b.c": -1}}, + ]; + explain = coll.explain().aggregate(pipeline); + assert(aggPlanHasStage(explain, "IXSCAN")); + assert(!aggPlanHasStage(explain, "SORT")); + assert(!aggPlanHasStage(explain, "$sort")); // Test that we can correctly sort by an array field in agg when there are additional fields not // involved in the sort pattern. diff --git a/jstests/core/views/views_creation.js b/jstests/core/views/views_creation.js index 18213a6e352..7fd2686dff5 100644 --- a/jstests/core/views/views_creation.js +++ b/jstests/core/views/views_creation.js @@ -20,14 +20,10 @@ ErrorCodes.InvalidNamespace, "Created an illegal view named 'system.views'"); - // We don't run this check on MMAPv1 as it automatically creates a system.indexes collection - // when creating a database, which causes this command to fail with NamespaceAlreadyExists. - if (jsTest.options().storageEngine !== "mmapv1") { - assert.commandFailedWithCode( - viewsDB.runCommand({create: "system.indexes", viewOn: "collection"}), - ErrorCodes.InvalidNamespace, - "Created an illegal view named 'system.indexes'"); - } + assert.commandFailedWithCode( + viewsDB.runCommand({create: "system.indexes", viewOn: "collection"}), + ErrorCodes.InvalidNamespace, + "Created an illegal view named 'system.indexes'"); // Collections that start with 'system.' that are not special to MongoDB fail with a different // error code. diff --git a/jstests/disk/datafile_options.js b/jstests/disk/datafile_options.js deleted file mode 100644 index 13d8a359693..00000000000 --- a/jstests/disk/datafile_options.js +++ /dev/null @@ -1,37 +0,0 @@ -// This test fiddles with preallocation, an mmap only behavior. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_datafile_options"; - -load('jstests/libs/command_line/test_parsed_options.js'); - -jsTest.log("Testing \"noprealloc\" command line option"); -var expectedResult = {"parsed": {"storage": {"mmapv1": {"preallocDataFiles": false}}}}; -testGetCmdLineOptsMongod({noprealloc: ""}, expectedResult); - -jsTest.log("Testing \"storage.mmapv1.preallocDataFiles\" config file option"); -expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/enable_prealloc.json", - "storage": {"mmapv1": {"preallocDataFiles": true}} - } -}; -testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_prealloc.json"}, - expectedResult); - -jsTest.log("Testing with no explicit data file option setting"); -expectedResult = { - "parsed": {"storage": {}} -}; -testGetCmdLineOptsMongod({}, expectedResult); - -// Test that we preserve switches explicitly set to false in config files. See SERVER-13439. -jsTest.log("Testing explicitly disabled \"noprealloc\" config file option"); -expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_noprealloc.ini", - "storage": {"mmapv1": {"preallocDataFiles": true}} - } -}; -testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noprealloc.ini"}, - expectedResult); diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js index 25fc2292521..285b5588115 100644 --- a/jstests/disk/directoryperdb.js +++ b/jstests/disk/directoryperdb.js @@ -5,14 +5,11 @@ var storageEngine = db.serverStatus().storageEngine.name; // The pattern which matches the names of database files var dbFileMatcher; -if (storageEngine == 'mmapv1') { - // Matches mmapv1 *.ns and *.0, *.1, etc files. - dbFileMatcher = /\.(ns|\d+)$/; -} else if (storageEngine == 'wiredTiger') { +if (storageEngine == 'wiredTiger') { // Matches wiredTiger collection-*.wt and index-*.wt files dbFileMatcher = /(collection|index)-.+\.wt$/; } else { - assert(false, 'This test must be run against mmapv1 or wiredTiger'); + assert(false, 'This test must be run against wiredTiger'); } // Set up helper functions. @@ -27,9 +24,7 @@ assertDocumentCount = function(db, count) { * Returns the current connection which gets restarted with wiredtiger. */ checkDBFilesInDBDirectory = function(conn, dbToCheck) { - if (storageEngine == 'mmapv1') { - conn.adminCommand({fsync: 1}); - } else if (storageEngine == 'wiredTiger') { + if (storageEngine == 'wiredTiger') { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true}); } @@ -64,9 +59,7 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) { * Returns the restarted connection with wiredtiger. */ checkDBDirectoryNonexistent = function(conn, dbToCheck) { - if (storageEngine == 'mmapv1') { - conn.adminCommand({fsync: 1}); - } else if (storageEngine == 'wiredTiger') { + if (storageEngine == 'wiredTiger') { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true}); } @@ -82,14 +75,7 @@ checkDBDirectoryNonexistent = function(conn, dbToCheck) { } // Check db directories to ensure db files in them have been destroyed. - // mmapv1 removes the database directory, pending SERVER-1379. - if (storageEngine == 'mmapv1') { - var files = listFiles(dbpath); - var fileNotFound = true; - for (f in files) { - assert(files[f].name != dbToCheck, 'Directory ' + dbToCheck + ' still exists'); - } - } else if (storageEngine == 'wiredTiger') { + if (storageEngine == 'wiredTiger') { var dir = dbpath + dbToCheck; // The KV catalog escapes non alpha-numeric characters with its UTF-8 byte sequence in // decimal when creating the directory on disk. @@ -169,4 +155,4 @@ if (!_isWindows()) { assertDocumentCount(dbUU, 1); m = checkDBFilesInDBDirectory(m, dbUU); } -MongoRunner.stopMongod(m);
\ No newline at end of file +MongoRunner.stopMongod(m); diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js deleted file mode 100644 index 78a31f609ba..00000000000 --- a/jstests/disk/diskfull.js +++ /dev/null @@ -1,15 +0,0 @@ -// Enable failpoint - -// The `allocateDiskFull` failpoint is mmap only. -// @tags: [requires_mmapv1] -assert.commandWorked(db.adminCommand({configureFailPoint: "allocateDiskFull", mode: "alwaysOn"})); - -var d = db.getSisterDB("DiskFullTestDB"); -var c = d.getCollection("DiskFullTestCollection"); - -var writeError1 = c.insert({a: 6}).getWriteError(); -assert.eq(12520, writeError1.code); - -// All subsequent requests should fail -var writeError2 = c.insert({a: 6}).getWriteError(); -assert.eq(12520, writeError2.code); diff --git a/jstests/disk/filesize.js b/jstests/disk/filesize.js deleted file mode 100644 index 709c82612ed..00000000000 --- a/jstests/disk/filesize.js +++ /dev/null @@ -1,44 +0,0 @@ -// Test for SERVER-7430: Warning about smallfiles should include filename - -// `--smallfiles` is mmap only. -// @tags: [requires_mmapv1] -var baseName = "filesize"; - -// Start mongod with --smallfiles -var m = MongoRunner.runMongod({nojournal: "", smallfiles: ""}); - -var db = m.getDB(baseName); - -// Skip on 32 bits, since 32-bit servers don't warn about small files -if (db.serverBuildInfo().bits == 32) { - print("Skip on 32-bit"); - MongoRunner.stopMongod(m); -} else { - // Restart mongod without --smallFiles - MongoRunner.stopMongod(m); - m = MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: m.dbpath, - port: m.port, - nojournal: "", - }); - - db = m.getDB(baseName); - var log = db.adminCommand({getLog: "global"}).log; - - // Find log message like: - // "openExisting file size 16777216 but - // mmapv1GlobalOptions.smallfiles=false: /data/db/filesize/local.0" - var found = false, logline = ''; - for (i = log.length - 1; i >= 0; i--) { - logline = log[i]; - if (logline.indexOf("openExisting file") >= 0 && logline.indexOf("local.0") >= 0) { - found = true; - break; - } - } - - assert(found); - MongoRunner.stopMongod(m); -} diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js deleted file mode 100644 index fda2a59e498..00000000000 --- a/jstests/disk/newcollection.js +++ /dev/null @@ -1,39 +0,0 @@ -// SERVER-594 test - -// When `capped: false`, the `size` option on `createCollection` is only considered by mmapv1. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_newcollection"; -var m = MongoRunner.runMongod({noprealloc: "", smallfiles: ""}); -db = m.getDB("test"); - -var t = db[baseName]; -var getTotalNonLocalNonAdminSize = function() { - var totalNonLocalNonAdminDBSize = 0; - m.getDBs().databases.forEach(function(dbStats) { - // We accept the local database's and admin database's space overhead. - if (dbStats.name == "local" || dbStats.name == "admin") - return; - - // Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk. - // See SERVER-11051. - if (dbStats.sizeOnDisk == 1 && dbStats.empty) - return; - totalNonLocalNonAdminDBSize += dbStats.sizeOnDisk; - }); - return totalNonLocalNonAdminDBSize; -}; - -for (var pass = 0; pass <= 1; pass++) { - db.createCollection(baseName, {size: 15.8 * 1024 * 1024}); - if (pass == 0) - t.drop(); - - size = getTotalNonLocalNonAdminSize(); - t.save({}); - assert.eq(size, getTotalNonLocalNonAdminSize()); - assert(size <= 32 * 1024 * 1024); - - t.drop(); -} -MongoRunner.stopMongod(m);
\ No newline at end of file diff --git a/jstests/disk/parallel_collection_scan_on_capped_collection.js b/jstests/disk/parallel_collection_scan_on_capped_collection.js deleted file mode 100644 index cc95c32a2b5..00000000000 --- a/jstests/disk/parallel_collection_scan_on_capped_collection.js +++ /dev/null @@ -1,71 +0,0 @@ -/** -* Tests that calling the 'parallelCollectionScan' command on a capped collection -* always only returns one cursor and that the document insertion order is maintained -* when iterating over that cursor. -* -* This test requires the use of mmapv1 as the storage engine. The 'parallelCollectionScan' -* command is not yet fully supported for wiredTiger and currently will always return only -* one cursor regardless of the type of collection the command is run on. -* @tags: [requires_mmapv1] -*/ - -(function() { - 'use strict'; - let nonCappedCollName = 'noncapped_coll'; - let cappedCollName = 'capped_coll'; - - // Create a non-capped collection. - assert.commandWorked(db.runCommand({create: nonCappedCollName})); - // Create a capped collection with the size of 4096 bytes. - assert.commandWorked(db.runCommand({create: cappedCollName, capped: true, size: 4096})); - - let nonCappedBulk = db[nonCappedCollName].initializeUnorderedBulkOp(); - let cappedBulk = db[cappedCollName].initializeUnorderedBulkOp(); - - // Add enough documents to each collection to ensure that more than one extent - // on disk is populated. The 'parallelCollectionScan' command on non-capped - // collections returns up to one cursor per extent. - for (let i = 0; i < 500; i++) { - nonCappedBulk.insert({key: i}); - cappedBulk.insert({key: i}); - } - assert.writeOK(nonCappedBulk.execute()); - assert.writeOK(cappedBulk.execute()); - - // Tests that calling 'parallelCollectionScan' with 'numCursors'>=1 on a - // non-capped collection will return multiple cursors. - let cmd = {parallelCollectionScan: nonCappedCollName, numCursors: 2}; - let res = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd)); - assert.eq(res.cursors.length, 2); - - // Tests that calling 'parallelCollectionScan' on a capped collection will return only - // one cursor for the case where 'numCursors'>=1. - let maxCursors = 3; - for (let numCursors = 1; numCursors < maxCursors; numCursors++) { - cmd = {parallelCollectionScan: cappedCollName, numCursors: numCursors}; - res = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd)); - assert.eq(res.cursors.length, 1); - } - - // Tests that the document return order of 'parallelCollectionScan' on a capped collection - // is consistent with the document insertion order. - cmd = {parallelCollectionScan: cappedCollName, numCursors: 1}; - let pcsResult = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd)); - assert.eq(pcsResult.cursors.length, 1); - let pcsCursor = pcsResult.cursors[0].cursor; - let pcsGetMore = { - getMore: pcsResult.cursors[0].cursor.id, - collection: cappedCollName, - batchSize: 1 - }; - let pcsGetMoreResult = - assert.commandWorked(db.runCommand(pcsGetMore), 'Command failed: ' + tojson(pcsGetMore)); - // The sequence of values being returned should be monotonically increasing by one until the - // last batch. - let initKey = pcsGetMoreResult.cursor.nextBatch[0].key; - for (let i = initKey; i < (initKey + db[cappedCollName].count()); i++) { - assert.eq(pcsGetMoreResult.cursor.nextBatch[0].key, i); - pcsGetMoreResult = assert.commandWorked(db.runCommand(pcsGetMore), - 'Command Failed: ' + tojson(pcsGetMore)); - } -}()); diff --git a/jstests/disk/preallocate.js b/jstests/disk/preallocate.js deleted file mode 100644 index e8ff9961a14..00000000000 --- a/jstests/disk/preallocate.js +++ /dev/null @@ -1,51 +0,0 @@ -// check that there is preallocation, and there are 2 files - -// Preallocation is an mmap only behavior. -// @tags: [requires_mmapv1] - -var baseName = "jstests_preallocate"; - -var m = MongoRunner.runMongod({}); - -var getTotalNonLocalNonAdminSize = function() { - var totalNonLocalNonAdminDBSize = 0; - m.getDBs().databases.forEach(function(dbStats) { - // We accept the local database's and admin database's space overhead. - if (dbStats.name == "local" || dbStats.name == "admin") - return; - - // Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk. - // See SERVER-11051. - if (dbStats.sizeOnDisk == 1 && dbStats.empty) - return; - totalNonLocalNonAdminDBSize += dbStats.sizeOnDisk; - }); - return totalNonLocalNonAdminDBSize; -}; - -assert.eq(0, getTotalNonLocalNonAdminSize()); - -m.getDB(baseName).createCollection(baseName + "1"); - -// Windows does not currently use preallocation -expectedMB = 64 + 16; -if (m.getDB(baseName).serverBits() < 64) - expectedMB /= 4; - -assert.soon(function() { - return getTotalNonLocalNonAdminSize() >= expectedMB * 1024 * 1024; -}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB"); - -MongoRunner.stopMongod(m); - -m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath}); - -size = getTotalNonLocalNonAdminSize(); - -m.getDB(baseName).createCollection(baseName + "2"); - -sleep(2000); // give prealloc a chance - -assert.eq(size, getTotalNonLocalNonAdminSize()); - -MongoRunner.stopMongod(m); diff --git a/jstests/disk/preallocate2.js b/jstests/disk/preallocate2.js deleted file mode 100644 index ee12a610fa2..00000000000 --- a/jstests/disk/preallocate2.js +++ /dev/null @@ -1,21 +0,0 @@ -// check that there is preallocation on insert - -// Preallocation is an mmap only behavior. -// @tags: [requires_mmapv1] - -var baseName = "jstests_preallocate2"; - -var m = MongoRunner.runMongod({}); - -m.getDB(baseName)[baseName].save({i: 1}); - -// Windows does not currently use preallocation -expectedMB = (_isWindows() ? 70 : 100); -if (m.getDB(baseName).serverBits() < 64) - expectedMB /= 4; - -assert.soon(function() { - return m.getDBs().totalSize > expectedMB * 1000000; -}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB"); - -MongoRunner.stopMongod(m);
\ No newline at end of file diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js deleted file mode 100644 index 5121a709050..00000000000 --- a/jstests/disk/preallocate_directoryperdb.js +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Test for SERVER-2417 - should not preallocate a database file while we are - * dropping its directory in directoryperdb mode. - */ - -// Preallocation is an mmap only behavior. -// @tags: [requires_mmapv1] - -var baseDir = "jstests_disk_preallocate_directoryperdb"; -var baseName = "preallocate_directoryperdb"; -var baseName2 = "preallocate_directoryperdb2"; -var baseName3 = "preallocate_directoryperdb3"; -dbpath = MongoRunner.dataPath + baseDir + "/"; - -function checkDb2DirAbsent() { - files = listFiles(dbpath); - // printjson( files ); - for (var f in files) { - var name = files[f].name; - assert.eq(-1, name.indexOf(dbpath + baseName2), "baseName2 dir still present"); - } -} - -var m = MongoRunner.runMongod( - {smallfiles: "", directoryperdb: "", dbpath: dbpath, bind_ip: "127.0.0.1"}); -db = m.getDB(baseName); -db2 = m.getDB(baseName2); -var bulk = db[baseName].initializeUnorderedBulkOp(); -var bulk2 = db2[baseName2].initializeUnorderedBulkOp(); -var big = new Array(5000).toString(); -for (var i = 0; i < 3000; ++i) { - bulk.insert({b: big}); - bulk2.insert({b: big}); -} -assert.writeOK(bulk.execute()); -assert.writeOK(bulk2.execute()); - -// Due to our write pattern, we expect db2's .3 file to be queued up in the file -// allocator behind db's .3 file at the time db2 is dropped. This will -// (incorrectly) cause db2's dir to be recreated until SERVER-2417 is fixed. -db2.dropDatabase(); - -checkDb2DirAbsent(); - -db.dropDatabase(); - -// Try writing a new database, to ensure file allocator is still working. -db3 = m.getDB(baseName3); -c3 = db[baseName3]; -assert.writeOK(c3.insert({})); -assert.eq(1, c3.count()); - -checkDb2DirAbsent(); - -MongoRunner.stopMongod(m);
\ No newline at end of file diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js deleted file mode 100644 index 2305ed7bd75..00000000000 --- a/jstests/disk/quota.js +++ /dev/null @@ -1,51 +0,0 @@ -// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local' -// database). - -// `--quotaFiles` is mmap only. -// @tags: [requires_mmapv1] - -baseName = "jstests_disk_quota"; - -var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""}); -db = m.getDB(baseName); - -big = new Array(10000).toString(); - -// Insert documents until quota is exhausted. -var coll = db[baseName]; -var res = coll.insert({b: big}); -while (!res.hasWriteError()) { - res = coll.insert({b: big}); -} - -dotTwoDataFile = baseName + ".2"; -files = listFiles(m.dbpath); -for (i in files) { - // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated - // (SERVER-3410) but no .2 file is expected. - assert.neq(dotTwoDataFile, files[i].baseName); -} - -dotTwoDataFile = "local" + ".2"; -// Check that quota does not apply to local db, and a .2 file can be created. -l = m.getDB("local")[baseName]; -for (i = 0; i < 10000; ++i) { - assert.writeOK(l.insert({b: big})); - dotTwoFound = false; - if (i % 100 != 0) { - continue; - } - files = listFiles(m.dbpath); - for (f in files) { - if (files[f].baseName == dotTwoDataFile) { - dotTwoFound = true; - } - } - if (dotTwoFound) { - break; - } -} - -assert(dotTwoFound); - -MongoRunner.stopMongod(m); diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js deleted file mode 100644 index 07215567b02..00000000000 --- a/jstests/disk/quota2.js +++ /dev/null @@ -1,36 +0,0 @@ -// Test for quotaFiles off by one file limit issue - SERVER-3420. - -// `--quotaFiles` is mmap only. -// @tags: [requires_mmapv1] - -if (0) { // SERVER-3420 - - baseName = "jstests_disk_quota2"; - - var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""}); - db = m.getDB(baseName); - - big = new Array(10000).toString(); - - // Insert documents until quota is exhausted. - var coll = db[baseName]; - var res = coll.insert({b: big}); - while (!res.hasWriteError()) { - res = coll.insert({b: big}); - } - - // Trigger allocation of an additional file for a 'special' namespace. - for (n = 0; !db.getLastError(); ++n) { - db.createCollection('' + n); - } - - // Check that new docs are saved in the .0 file. - for (i = 0; i < n; ++i) { - c = db['' + i]; - res = c.insert({b: big}); - if (!res.hasWriteError()) { - var recordId = c.find().showRecord()[0].$recordId; - assert.eq(0, recordId >> 32); - } - } -} diff --git a/jstests/disk/quota3.js b/jstests/disk/quota3.js deleted file mode 100644 index 27117e7e741..00000000000 --- a/jstests/disk/quota3.js +++ /dev/null @@ -1,23 +0,0 @@ -// Test for quotaFiles being ignored allocating a large collection - SERVER-3511. - -// `--quotaFiles` is mmap only. -// @tags: [requires_mmapv1] - -if (0) { // SERVER-3511 - - baseName = "jstests_disk_quota3"; - dbpath = MongoRunner.dataPath + baseName; - - var m = MongoRunner.runMongod({dbpath: dbpath, quotaFiles: 3, smallfiles: ""}); - db = m.getDB(baseName); - - db.createCollection(baseName, {size: 128 * 1024 * 1024}); - assert(db.getLastError()); - - dotFourDataFile = dbpath + "/" + baseName + ".4"; - files = listFiles(dbpath); - for (i in files) { - // .3 file may be preallocated but not .4 - assert.neq(dotFourDataFile, files[i].name); - } -} diff --git a/jstests/disk/repair.js b/jstests/disk/repair.js deleted file mode 100644 index 49b9725c56f..00000000000 --- a/jstests/disk/repair.js +++ /dev/null @@ -1,66 +0,0 @@ -// check --repairpath and --repair - -// `--repairpath` is mmap only. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_repair"; -var dbpath = MongoRunner.dataPath + baseName + "/"; -var repairpath = dbpath + "repairDir/"; - -resetDbpath(dbpath); -resetDbpath(repairpath); - -var m = MongoRunner.runMongod({ - dbpath: dbpath, - repairpath: repairpath, - noCleanData: true, -}); -db = m.getDB(baseName); -db[baseName].save({}); -assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true})); -function check() { - files = listFiles(dbpath); - for (f in files) { - assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name), "backup dir in dbpath"); - } - - assert.eq.automsg("1", "db[ baseName ].count()"); -} -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -m = MongoRunner.runMongod({ - port: m.port, - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(baseName); -assert.commandWorked(db.runCommand({repairDatabase: 1})); -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -rc = runMongoProgram( - "mongod", "--repair", "--port", m.port, "--dbpath", dbpath, "--repairpath", repairpath); -assert.eq.automsg("0", "rc"); -m = MongoRunner.runMongod({ - port: m.port, - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(baseName); -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -rc = runMongoProgram("mongod", "--repair", "--port", m.port, "--dbpath", dbpath); -assert.eq.automsg("0", "rc"); -m = MongoRunner.runMongod({ - port: m.port, - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(baseName); -check(); -MongoRunner.stopMongod(m); diff --git a/jstests/disk/repair2.js b/jstests/disk/repair2.js deleted file mode 100644 index 11be561ef71..00000000000 --- a/jstests/disk/repair2.js +++ /dev/null @@ -1,147 +0,0 @@ -// repair with --directoryperdb - -// `--repairpath` is mmap only. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_repair2"; - -function check() { - files = listFiles(dbpath); - for (f in files) { - assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name), - "backup dir " + files[f].name + " in dbpath"); - } - - assert.eq.automsg("1", "db[ baseName ].count()"); -} - -var dbpath = MongoRunner.dataPath + baseName + "/"; -var repairpath = dbpath + "repairDir/"; -var longDBName = Array(61).join('a'); -var longRepairPath = dbpath + Array(61).join('b') + '/'; - -resetDbpath(dbpath); -resetDbpath(repairpath); - -var m = MongoRunner.runMongod({ - directoryperdb: "", - dbpath: dbpath, - repairpath: repairpath, - noCleanData: true, -}); -db = m.getDB(baseName); -db[baseName].save({}); -assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true})); - -// Check that repair files exist in the repair directory, and nothing else -db.adminCommand({fsync: 1}); -files = listFiles(repairpath + "/backup_repairDatabase_0/" + baseName); -var fileCount = 0; -for (f in files) { - print(files[f].name); - if (files[f].isDirectory) - continue; - fileCount += 1; - assert(/\.bak$/.test(files[f].name), - "In database repair directory, found unexpected file: " + files[f].name); -} -assert(fileCount > 0, "Expected more than zero nondirectory files in the database directory"); - -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(baseName); -assert.commandWorked(db.runCommand({repairDatabase: 1})); -check(); -MongoRunner.stopMongod(m); - -// Test long database names -resetDbpath(repairpath); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(longDBName); -assert.writeOK(db[baseName].save({})); -assert.commandWorked(db.runCommand({repairDatabase: 1})); -MongoRunner.stopMongod(m); - -// Test long repairPath -resetDbpath(longRepairPath); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - repairpath: longRepairPath, - noCleanData: true, -}); -db = m.getDB(longDBName); -assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true})); -check(); -MongoRunner.stopMongod(m); - -// Test database name and repairPath with --repair -resetDbpath(longRepairPath); -var returnCode = runMongoProgram("mongod", - "--port", - m.port, - "--repair", - "--directoryperdb", - "--dbpath", - dbpath, - "--repairpath", - longRepairPath); -assert.eq(returnCode, 0); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(longDBName); -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -returnCode = runMongoProgram("mongod", - "--port", - m.port, - "--repair", - "--directoryperdb", - "--dbpath", - dbpath, - "--repairpath", - repairpath); -assert.eq(returnCode, 0); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - repairpath: repairpath, - noCleanData: true, -}); -db = m.getDB(baseName); -check(); -MongoRunner.stopMongod(m); - -resetDbpath(repairpath); -returnCode = - runMongoProgram("mongod", "--port", m.port, "--repair", "--directoryperdb", "--dbpath", dbpath); -assert.eq(returnCode, 0); -m = MongoRunner.runMongod({ - port: m.port, - directoryperdb: "", - dbpath: dbpath, - noCleanData: true, -}); -db = m.getDB(baseName); -check(); diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js deleted file mode 100644 index f339a666a70..00000000000 --- a/jstests/disk/repair3.js +++ /dev/null @@ -1,77 +0,0 @@ -// test --repairpath on another partition - -// `--repairpath` is mmap only. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_repair3"; -var repairbase = MongoRunner.dataDir + "/repairpartitiontest"; -var repairpath = repairbase + "/dir"; - -doIt = false; -files = listFiles(MongoRunner.dataDir); -for (i in files) { - if (files[i].name == repairbase) { - doIt = true; - } -} - -if (!doIt) { - print("path " + repairpath + " missing, skipping repair3 test"); - doIt = false; -} - -if (doIt) { - var dbpath = MongoRunner.dataPath + baseName + "/"; - - resetDbpath(dbpath); - resetDbpath(repairpath); - - var m = MongoRunner.runMongod({ - nssize: 8, - noprealloc: "", - smallfiles: "", - dbpath: dbpath, - repairpath: repairpath, - }); - db = m.getDB(baseName); - db[baseName].save({}); - assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: false})); - function check() { - files = listFiles(dbpath); - for (f in files) { - assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name), - "backup dir in dbpath"); - } - - assert.eq.automsg("1", "db[ baseName ].count()"); - } - - check(); - MongoRunner.stopMongod(m); - - resetDbpath(repairpath); - var rc = runMongoProgram("mongod", - "--nssize", - "8", - "--noprealloc", - "--smallfiles", - "--repair", - "--port", - m.port, - "--dbpath", - dbpath, - "--repairpath", - repairpath); - assert.eq.automsg("0", "rc"); - m = MongoRunner.runMongod({ - nssize: 8, - noprealloc: "", - smallfiles: "", - port: m.port, - dbpath: dbpath, - repairpath: repairpath, - }); - db = m.getDB(baseName); - check(); - MongoRunner.stopMongod(m); -} diff --git a/jstests/disk/repair4.js b/jstests/disk/repair4.js deleted file mode 100644 index 15033dc5986..00000000000 --- a/jstests/disk/repair4.js +++ /dev/null @@ -1,53 +0,0 @@ -// test that disk space check happens on --repairpath partition - -// `--repairpath` is mmap only. -// @tags: [requires_mmapv1] - -var baseName = "jstests_disk_repair4"; -var smallbase = MongoRunner.dataDir + "/repairpartitiontest"; -var smallpath = smallbase + "/dir"; - -doIt = false; -files = listFiles(MongoRunner.dataDir); -for (i in files) { - if (files[i].name == smallbase) { - doIt = true; - } -} - -if (!doIt) { - print("path " + smallpath + " missing, skipping repair4 test"); - doIt = false; -} - -if (doIt) { - var repairpath = MongoRunner.dataPath + baseName + "/"; - - resetDbpath(smallpath); - resetDbpath(repairpath); - - var m = MongoRunner.runMongod({ - nssize: "8", - noprealloc: "", - smallfiles: "", - dbpath: smallpath, - repairpath: repairpath, - bind_ip: "127.0.0.1", - }); - - db = m.getDB(baseName); - db[baseName].save({}); - assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true})); - function check() { - files = listFiles(smallpath); - for (f in files) { - assert(!new RegExp("^" + smallpath + "backup_").test(files[f].name), - "backup dir in dbpath"); - } - - assert.eq.automsg("1", "db[ baseName ].count()"); - } - - check(); - MongoRunner.stopMongod(m); -} diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js deleted file mode 100644 index f9b43300070..00000000000 --- a/jstests/disk/repair5.js +++ /dev/null @@ -1,57 +0,0 @@ -// SERVER-2351 Test killop with repair command. - -// `repairDatabase` on WiredTiger does not respond to `killop`. -// @tags: [requires_mmapv1] -(function() { - 'use strict'; - var baseName = "jstests_disk_repair5"; - - var dbpath = MongoRunner.dataPath + baseName + "/"; - - resetDbpath(dbpath); - - var m = MongoRunner.runMongod({ - dbpath: dbpath, - restart: true, - cleanData: false - }); // So that the repair dir won't get removed - - var dbTest = m.getDB(baseName); - - // Insert a lot of data so repair runs a long time - var bulk = dbTest[baseName].initializeUnorderedBulkOp(); - var big = new Array(5000).toString(); - for (var i = 0; i < 20000; ++i) { - bulk.insert({i: i, b: big}); - } - assert.writeOK(bulk.execute()); - - function killRepair() { - while (1) { - var p = db.currentOp().inprog; - for (var i in p) { - var o = p[i]; - printjson(o); - - // Find the active 'repairDatabase' op and kill it. - if (o.active && o.command && o.command.repairDatabase) { - db.killOp(o.opid); - return; - } - } - } - } - - var s = startParallelShell(killRepair.toString() + "; killRepair();", m.port); - sleep(100); // make sure shell is actually running, lame - - // Repair should fail due to killOp. - assert.commandFailed(dbTest.runCommand({repairDatabase: 1})); - - s(); - - assert.eq(20000, dbTest[baseName].find().itcount()); - assert(dbTest[baseName].validate().valid); - - MongoRunner.stopMongod(m); -})(); diff --git a/jstests/dur/checksum.js b/jstests/dur/checksum.js deleted file mode 100644 index 789bae418ec..00000000000 --- a/jstests/dur/checksum.js +++ /dev/null @@ -1,72 +0,0 @@ -// Test checksum validation of journal files. - -var testname = "dur_checksum"; -var path = MongoRunner.dataPath + testname; - -function startMongodWithJournal() { - return MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: path, - journal: "", - smallfiles: "", - journalOptions: 1 /*DurDumpJournal*/ - }); -} - -jsTest.log("Starting with good.journal to make sure everything works"); -resetDbpath(path); -mkdir(path + '/journal'); -copyFile("jstests/libs/dur_checksum_good.journal", path + "/journal/j._0"); -var conn = startMongodWithJournal(); -var db = conn.getDB('test'); -assert.eq(db.foo.count(), 2); -MongoRunner.stopMongod(conn); - -// dur_checksum_bad_last.journal is good.journal with the bad checksum on the last section. -jsTest.log("Starting with bad_last.journal"); -resetDbpath(path); -mkdir(path + '/journal'); -copyFile("jstests/libs/dur_checksum_bad_last.journal", path + "/journal/j._0"); -conn = startMongodWithJournal(); -var db = conn.getDB('test'); -assert.eq(db.foo.count(), 1); // 2nd insert "never happened" -MongoRunner.stopMongod(conn); - -// dur_checksum_bad_first.journal is good.journal with the bad checksum on the prior section. -// This means there is a good commit after the bad one. We currently ignore this, but a future -// version of the server may be able to detect this case. -jsTest.log("Starting with bad_first.journal"); -resetDbpath(path); -mkdir(path + '/journal'); -copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0"); -conn = startMongodWithJournal(); -var db = conn.getDB('test'); -assert.eq(db.foo.count(), 0); // Neither insert happened. -MongoRunner.stopMongod(conn); - -// If we detect an error in a non-final journal file, that is considered an error. -jsTest.log("Starting with bad_last.journal followed by good.journal"); -resetDbpath(path); -mkdir(path + '/journal'); -copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0"); -copyFile("jstests/libs/dur_checksum_good.journal", path + "/journal/j._1"); - -exitCode = runMongoProgram("mongod", - "--port", - allocatePort(), - "--dbpath", - path, - "--journal", - "--smallfiles", - "--journalOptions", - 1 /*DurDumpJournal*/ - + - 2 /*DurScanOnly*/); - -assert.eq(exitCode, 100 /*EXIT_UNCAUGHT*/); - -// TODO Possibly we could check the mongod log to verify that the correct type of exception was -// thrown. But that would introduce a dependency on the mongod log format, which we may not want. - -jsTest.log("SUCCESS checksum.js"); diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js deleted file mode 100644 index 3eb04c706c9..00000000000 --- a/jstests/dur/closeall.js +++ /dev/null @@ -1,97 +0,0 @@ -// testing dropDatabase concurrency -// this is also a test of saveState() as that will get exercised by the update - -function f(variant, quickCommits, paranoid) { - var ourdb = "closealltest"; - - print("closeall.js start mongod variant:" + variant + "." + quickCommits + "." + paranoid); - var options = (paranoid == 1 ? 8 : 0); // 8 is DurParanoid - print("closeall.js --journalOptions " + options); - var N = 1000; - if (options) - N = 300; - - // use replication to exercise that code too with a close, and also to test local.sources with a - // close - var conn = MongoRunner.runMongod({journal: "", journalOptions: options + ""}); - - // we'll use two connections to make a little parallelism - var db1 = conn.getDB(ourdb); - var db2 = new Mongo(db1.getMongo().host).getDB(ourdb); - if (quickCommits) { - print("closeall.js QuickCommits variant (using a small syncdelay)"); - assert(db2.adminCommand({setParameter: 1, syncdelay: 5}).ok); - } - - print("closeall.js run test"); - - print("wait for initial sync to finish"); // SERVER-4852 - assert.writeOK(db1.foo.insert({}, {writeConcern: {w: 2}})); - assert.writeOK(db1.foo.remove({}, {writeConcern: {w: 2}})); - print("initial sync done"); - - var writeOps = startParallelShell('var coll = db.getSiblingDB("' + ourdb + '").foo; \ - for( var i = 0; i < ' + - N + '; i++ ) { \ - var bulk = coll.initializeUnorderedBulkOp(); \ - bulk.insert({ x: 1 }); \ - if ( i % 7 == 0 ) \ - bulk.insert({ x: 99, y: 2 }); \ - if ( i % 49 == 0 ) \ - bulk.find({ x: 99 }).update( \ - { $set: { a: 1, b: 2, c: 3, d: 4 }}); \ - if( i == 800 ) \ - coll.ensureIndex({ x: 1 }); \ - assert.writeOK(bulk.execute()); \ - }', - conn.port); - - for (var i = 0; i < N; i++) { - var res = null; - try { - if (variant == 1) - sleep(0); - else if (variant == 2) - sleep(1); - else if (variant == 3 && i % 10 == 0) - print(i); - res = db2.dropDatabase(); - } catch (e) { - print("\n\n\nFAIL closeall.js dropDatabase command invocation threw an exception. i:" + - i); - try { - print("getlasterror:"); - printjson(db2.getLastErrorObj()); - print("trying one more dropDatabase:"); - res = db2.dropDatabase(); - printjson(res); - } catch (e) { - print("got another exception : " + e); - } - print("\n\n\n"); - throw e; - } - assert(res.ok, "dropDatabase res.ok=false"); - } - - writeOps(); - - print("closeall.js shutting down servers"); - MongoRunner.stopMongod(conn); -} - -// Skip this test on 32-bit Windows (unfixable failures in MapViewOfFileEx) -// -if (_isWindows() && getBuildInfo().bits == 32) { - print("Skipping closeall.js on 32-bit Windows"); -} else { - for (var variant = 0; variant < 4; variant++) { - for (var quickCommits = 0; quickCommits <= 1; quickCommits++) { // false then true - for (var paranoid = 0; paranoid <= 1; paranoid++) { // false then true - f(variant, quickCommits, paranoid); - sleep(500); - } - } - } - print("SUCCESS closeall.js"); -} diff --git a/jstests/dur/data/empty.bson b/jstests/dur/data/empty.bson deleted file mode 100644 index e69de29bb2d..00000000000 --- a/jstests/dur/data/empty.bson +++ /dev/null diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js deleted file mode 100644 index 628db20bd92..00000000000 --- a/jstests/dur/diskfull.js +++ /dev/null @@ -1,165 +0,0 @@ -/** Test running out of disk space with durability enabled. -To set up the test, it's required to set up a small partition something like the following: -sudo umount /data/db/diskfulltest/ -rm -rf /data/db/diskfulltest -mkdir -p /data/images -dd bs=512 count=83968 if=/dev/zero of=/data/images/diskfulltest.img -/sbin/mkfs.ext2 -m 0 -F /data/images/diskfulltest.img -mkdir -p /data/db/diskfulltest -mount -o loop /data/images/diskfulltest.img /data/db/diskfulltest -*/ - -startPath = MongoRunner.dataDir + "/diskfulltest"; -recoverPath = MongoRunner.dataDir + "/dur_diskfull"; - -doIt = false; -files = listFiles(MongoRunner.dataDir); -for (i in files) { - if (files[i].name == startPath) { - doIt = true; - } -} - -if (!doIt) { - print("path " + startPath + " missing, skipping diskfull test"); - doIt = false; -} - -function checkNoJournalFiles(path, pass) { - var files = listFiles(path); - if (files.some(function(f) { - return f.name.indexOf("prealloc") < 0; - })) { - if (pass == null) { - // wait a bit longer for mongod to potentially finish if it is still running. - sleep(10000); - return checkNoJournalFiles(path, 1); - } - print("\n\n\n"); - print("FAIL path:" + path); - print("unexpected files:"); - printjson(files); - assert(false, "FAIL a journal/lsn file is present which is unexpected"); - } -} - -/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */ -function clear() { - files = listFiles(startPath); - files.forEach(function(x) { - removeFile(x.name); - }); -} - -function log(str) { - print(); - if (str) - print(testname + " step " + step++ + " " + str); - else - print(testname + " step " + step++); -} - -function work() { - log("work"); - try { - var d = conn.getDB("test"); - var big = new Array(5000).toString(); - var bulk = d.foo.initializeUnorderedBulkOp(); - // This part of the test depends on the partition size used in the build env - // Currently, unused, but with larger partitions insert enough documents here - // to create a second db file - for (i = 0; i < 1; ++i) { - bulk.insert({_id: i, b: big}); - } - assert.writeOK(bulk.execute()); - } catch (e) { - print(e); - raise(e); - } finally { - log("endwork"); - } -} - -function verify() { - log("verify"); - var d = conn.getDB("test"); - c = d.foo.count(); - v = d.foo.validate(); - // not much we can guarantee about the writes, just validate when possible - if (c != 0 && !v.valid) { - printjson(v); - print(c); - assert(v.valid); - assert.gt(c, 0); - } -} - -function runFirstMongodAndFillDisk() { - log(); - - clear(); - conn = MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: startPath, - journal: "", - smallfiles: "", - journalOptions: 8 + 64, - noprealloc: "" - }); - - assert.throws(work, null, "no exception thrown when exceeding disk capacity"); - MongoRunner.stopMongod(conn); - - sleep(5000); -} - -function runSecondMongdAndRecover() { - // restart and recover - log(); - conn = MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: startPath, - journal: "", - smallfiles: "", - journalOptions: 8 + 64, - noprealloc: "" - }); - verify(); - - log("stop"); - MongoRunner.stopMongod(conn); - - // stopMongod seems to be asynchronous (hmmm) so we sleep here. - sleep(5000); - - // at this point, after clean shutdown, there should be no journal files - log("check no journal files"); - checkNoJournalFiles(startPath + "/journal/"); - - log(); -} - -function someWritesInJournal() { - runFirstMongodAndFillDisk(); - runSecondMongdAndRecover(); -} - -function noWritesInJournal() { - // It is too difficult to consistently trigger cases where there are no existing journal files - // due to lack of disk space, but - // if we were to test this case we would need to manualy remove the lock file. - // removeFile( startPath + "/mongod.lock" ); -} - -if (doIt) { - var testname = "dur_diskfull"; - var step = 1; - var conn = null; - - someWritesInJournal(); - noWritesInJournal(); - - print(testname + " SUCCESS"); -} diff --git a/jstests/dur/dur2.js b/jstests/dur/dur2.js deleted file mode 100644 index 3cba67aee5c..00000000000 --- a/jstests/dur/dur2.js +++ /dev/null @@ -1,104 +0,0 @@ -/* test durability - runs mongod, kill -9's, recovers -*/ - -var debugging = false; -var testname = "dur2"; -var step = 1; -var conn = null; - -var start = new Date(); -function howLongSecs() { - return (new Date() - start) / 1000; -} - -function log(str) { - if (str) - print("\n" + testname + " step " + step++ + " " + str); - else - print(testname + " step " + step++); -} - -function verify() { - log("verify"); - var d = conn.getDB("test"); - var mycount = d.foo.count(); - // print("count:" + mycount); - assert(mycount > 2, "count wrong"); -} - -function work() { - log("work"); - x = 'x'; - while (x.length < 1024) - x += x; - var d = conn.getDB("test"); - d.foo.drop(); - d.foo.insert({}); - - // go long enough we will have time to kill it later during recovery - var j = 2; - var MaxTime = 90; - while (1) { - d.foo.insert({_id: j, z: x}); - d.foo.update({_id: j}, {$inc: {a: 1}}); - if (j % 25 == 0) - d.foo.remove({_id: j}); - j++; - if (j % 3 == 0) - d.foo.update({_id: j}, {$inc: {a: 1}}, true); - if (j % 10000 == 0) - print(j); - if (howLongSecs() > MaxTime) - break; - } - - verify(); - d.runCommand({getLastError: 1, fsync: 1}); -} - -if (debugging) { - // mongod already running in debugger - print( - "DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR"); - conn = db.getMongo(); - work(); - sleep(30000); - quit(); -} - -// directories -var path = MongoRunner.dataPath + testname + "dur"; - -log("run mongod with --dur"); -conn = MongoRunner.runMongod({ - dbpath: path, - journal: "", - smallfiles: "", - journalOptions: 8 /*DurParanoid*/ -}); -work(); - -log("kill -9"); -MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}); - -// journal file should be present, and non-empty as we killed hard -assert(listFiles(path + "/journal/").length > 0, - "journal directory is unexpectantly empty after kill"); - -// restart and recover -log("restart mongod and recover"); -conn = MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: path, - journal: "", - smallfiles: "", - journalOptions: 8 -}); -verify(); - -log("stopping mongod " + conn.port); -MongoRunner.stopMongod(conn); - -print(testname + " SUCCESS"); diff --git a/jstests/dur/indexbg.js b/jstests/dur/indexbg.js deleted file mode 100644 index e37b9157076..00000000000 --- a/jstests/dur/indexbg.js +++ /dev/null @@ -1,8 +0,0 @@ -path = MongoRunner.dataDir + '/indexbg_dur'; - -var m = MongoRunner.runMongod({journal: "", smallfiles: "", journalOptions: 24}); -t = m.getDB('test').test; -t.save({x: 1}); -t.createIndex({x: 1}, {background: true}); -t.count(); -MongoRunner.stopMongod(m);
\ No newline at end of file diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js deleted file mode 100644 index b385a29f70e..00000000000 --- a/jstests/dur/indexbg2.js +++ /dev/null @@ -1,19 +0,0 @@ -path = MongoRunner.dataDir + '/indexbg2_dur'; - -var m = MongoRunner.runMongod({journal: "", smallfiles: ""}); - -t = m.getDB('test').test; -t.createIndex({a: 1}); -t.createIndex({b: 1}); -t.createIndex({x: 1}, {background: true}); -for (var i = 0; i < 1000; ++i) { - t.insert({_id: i, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'}); - t.remove({_id: i}); -} -sleep(1000); -for (var i = 1000; i < 2000; ++i) { - t.insert({_id: i, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'}); - t.remove({_id: i}); -} -assert.writeOK(t.insert({_id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'})); -MongoRunner.stopMongod(m);
\ No newline at end of file diff --git a/jstests/dur/journaling_options.js b/jstests/dur/journaling_options.js deleted file mode 100644 index 28402e9536c..00000000000 --- a/jstests/dur/journaling_options.js +++ /dev/null @@ -1,119 +0,0 @@ - -function doTest() { - var baseName = "jstests_dur_journaling_options"; - - load('jstests/libs/command_line/test_parsed_options.js'); - - jsTest.log("Testing \"dur\" command line option"); - var expectedResult = {"parsed": {"storage": {"journal": {"enabled": true}}}}; - testGetCmdLineOptsMongod({dur: ""}, expectedResult); - - jsTest.log("Testing \"nodur\" command line option"); - expectedResult = {"parsed": {"storage": {"journal": {"enabled": false}}}}; - testGetCmdLineOptsMongod({nodur: ""}, expectedResult); - - jsTest.log("Testing \"journal\" command line option"); - expectedResult = {"parsed": {"storage": {"journal": {"enabled": true}}}}; - testGetCmdLineOptsMongod({journal: ""}, expectedResult); - - jsTest.log("Testing \"nojournal\" command line option"); - expectedResult = {"parsed": {"storage": {"journal": {"enabled": false}}}}; - testGetCmdLineOptsMongod({nojournal: ""}, expectedResult); - - jsTest.log("Testing \"storage.journal.enabled\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/enable_journal.json", - "storage": {"journal": {"enabled": false}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_journal.json"}, - expectedResult); - - // Test that we preserve switches explicitly set to false in config files. See SERVER-13439. - jsTest.log("Testing explicitly disabled \"journal\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_journal.ini", - "storage": {"journal": {"enabled": false}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_journal.ini"}, - expectedResult); - - jsTest.log("Testing explicitly disabled \"nojournal\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_nojournal.ini", - "storage": {"journal": {"enabled": true}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nojournal.ini"}, - expectedResult); - - jsTest.log("Testing explicitly disabled \"dur\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_dur.ini", - "storage": {"journal": {"enabled": false}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_dur.ini"}, expectedResult); - - jsTest.log("Testing explicitly disabled \"nodur\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_nodur.ini", - "storage": {"journal": {"enabled": true}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nodur.ini"}, - expectedResult); - - // Test that switches in old config files with no value have an implicit value of true - jsTest.log("Testing implicitly enabled \"journal\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/implicitly_enable_journal.ini", - "storage": {"journal": {"enabled": true}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_journal.ini"}, - expectedResult); - - jsTest.log("Testing implicitly enabled \"nojournal\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/implicitly_enable_nojournal.ini", - "storage": {"journal": {"enabled": false}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_nojournal.ini"}, - expectedResult); - - jsTest.log("Testing implicitly enabled \"dur\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/implicitly_enable_dur.ini", - "storage": {"journal": {"enabled": true}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_dur.ini"}, - expectedResult); - - jsTest.log("Testing implicitly enabled \"nodur\" config file option"); - expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/implicitly_enable_nodur.ini", - "storage": {"journal": {"enabled": false}} - } - }; - testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_nodur.ini"}, - expectedResult); - - print(baseName + " succeeded."); -} - -if (!jsTestOptions().noJournal) { - doTest(); -} diff --git a/jstests/dur/lsn.js b/jstests/dur/lsn.js deleted file mode 100644 index 9ec638c8d30..00000000000 --- a/jstests/dur/lsn.js +++ /dev/null @@ -1,145 +0,0 @@ -/* test durability, specifically last sequence number function - runs mongod, kill -9's, recovers - then writes more data and verifies with DurParanoid that it matches -*/ - -var debugging = false; -var testname = "lsn"; -var step = 1; -var conn = null; - -var start = new Date(); -function howLongSecs() { - return (new Date() - start) / 1000; -} - -function log(str) { - if (str) - print("\n" + testname + " step " + step++ + " " + str); - else - print(testname + " step " + step++); -} - -function verify() { - log("verify"); - var d = conn.getDB("test"); - var mycount = d.foo.count(); - print("count:" + mycount); - assert(mycount > 2, "count wrong"); -} - -// if you do inserts here, you will want to set _id. otherwise they won't match on different -// runs so we can't do a binary diff of the resulting files to check they are consistent. -function work() { - log("work"); - x = 'x'; - while (x.length < 1024) - x += x; - var d = conn.getDB("test"); - d.foo.drop(); - d.foo.insert({}); - - // go long enough we will have time to kill it later during recovery - var j = 2; - var MaxTime = 15; - if (Math.random() < 0.05) { - print("doing a longer pass"); - MaxTime = 90; - } - while (1) { - d.foo.insert({_id: j, z: x}); - d.foo.update({_id: j}, {$inc: {a: 1}}); - if (j % 25 == 0) - d.foo.remove({_id: j}); - j++; - if (j % 3 == 0) - d.foo.update({_id: j}, {$inc: {a: 1}}, true); - if (j % 10000 == 0) - print(j); - if (howLongSecs() > MaxTime) - break; - } - - verify(); - d.runCommand({getLastError: 1, fsync: 1}); -} - -if (debugging) { - // mongod already running in debugger - print( - "DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR"); - conn = db.getMongo(); - work(); - sleep(30000); - quit(); -} - -// directories -var path2 = MongoRunner.dataPath + testname + "dur"; - -// run mongod with a short --syncdelay to make LSN writing sooner -log("run mongod --journal and a short --syncdelay"); -conn = MongoRunner.runMongod({ - dbpath: path2, - syncdelay: 2, - journal: "", - smallfiles: "", - journalOptions: 8 /*DurParanoid*/ -}); -work(); - -log("wait a while for a sync and an lsn write"); -sleep(14); // wait for lsn write - -log("kill mongod -9"); -MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}); - -// journal file should be present, and non-empty as we killed hard - -// check that there is an lsn file -{ - var files = listFiles(path2 + "/journal/"); - assert(files.some(function(f) { - return f.name.indexOf("lsn") >= 0; - }), - "lsn.js FAIL no lsn file found after kill, yet one is expected"); -} -/*assert.soon( - function () { - var files = listFiles(path2 + "/journal/"); - return files.some(function (f) { return f.name.indexOf("lsn") >= 0; }); - }, - "lsn.js FAIL no lsn file found after kill, yet one is expected" -);*/ - -// restart and recover -log("restart mongod, recover, verify"); -conn = MongoRunner.runMongod({ - restart: true, - cleanData: false, - dbpath: path2, - journal: "", - smallfiles: "", - journalOptions: 24 -}); -verify(); - -// idea here is to verify (in a simplistic way) that we are in a good state to do further ops after -// recovery -log("add data after recovery"); -{ - var d = conn.getDB("test"); - d.xyz.insert({x: 1}); - d.xyz.insert({x: 1}); - d.xyz.insert({x: 1}); - d.xyz.update({}, {$set: {x: "aaaaaaaaaaaa"}}); - d.xyz.reIndex(); - d.xyz.drop(); - sleep(1); - d.xyz.insert({x: 1}); -} - -log("stop mongod " + conn.port); -MongoRunner.stopMongod(conn); - -print(testname + " SUCCESS"); diff --git a/jstests/libs/config_files/enable_prealloc.json b/jstests/libs/config_files/enable_prealloc.json deleted file mode 100644 index ef110e2e371..00000000000 --- a/jstests/libs/config_files/enable_prealloc.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "storage" : { - "mmapv1" : { - "preallocDataFiles" : true - } - } -} diff --git a/jstests/libs/dur_checksum_bad_first.journal b/jstests/libs/dur_checksum_bad_first.journal Binary files differdeleted file mode 100644 index 07ed5924713..00000000000 --- a/jstests/libs/dur_checksum_bad_first.journal +++ /dev/null diff --git a/jstests/libs/dur_checksum_bad_last.journal b/jstests/libs/dur_checksum_bad_last.journal Binary files differdeleted file mode 100644 index bd2f2c55b27..00000000000 --- a/jstests/libs/dur_checksum_bad_last.journal +++ /dev/null diff --git a/jstests/libs/dur_checksum_good.journal b/jstests/libs/dur_checksum_good.journal Binary files differdeleted file mode 100644 index 9704fbbd5ac..00000000000 --- a/jstests/libs/dur_checksum_good.journal +++ /dev/null diff --git a/jstests/libs/retryable_writes_util.js b/jstests/libs/retryable_writes_util.js index 9d9866e6c84..f74430fb09c 100644 --- a/jstests/libs/retryable_writes_util.js +++ b/jstests/libs/retryable_writes_util.js @@ -24,7 +24,7 @@ var RetryableWritesUtil = (function() { return kRetryableWriteCommands.has(cmdName); } - const kStorageEnginesWithoutDocumentLocking = new Set(["ephemeralForTest", "mmapv1"]); + const kStorageEnginesWithoutDocumentLocking = new Set(["ephemeralForTest"]); /** * Returns true if the given storage engine supports retryable writes (i.e. supports diff --git a/jstests/mmap_v1/capped2.js b/jstests/mmap_v1/capped2.js deleted file mode 100644 index b439fb20880..00000000000 --- a/jstests/mmap_v1/capped2.js +++ /dev/null @@ -1,70 +0,0 @@ -db.capped2.drop(); -db._dbCommand({create: "capped2", capped: true, size: 1000, $nExtents: 11}); -tzz = db.capped2; - -function debug(x) { - // print( x ); -} - -var val = new Array(2000); -var c = ""; -for (i = 0; i < 2000; ++i, c += "---") { // bigger and bigger objects through the array... - val[i] = {a: c}; -} - -function checkIncreasing(i) { - res = tzz.find().sort({$natural: -1}); - assert(res.hasNext(), "A"); - var j = i; - while (res.hasNext()) { - try { - assert.eq(val[j--].a, res.next().a, "B"); - } catch (e) { - debug("capped2 err " + j); - throw e; - } - } - res = tzz.find().sort({$natural: 1}); - assert(res.hasNext(), "C"); - while (res.hasNext()) - assert.eq(val[++j].a, res.next().a, "D"); - assert.eq(j, i, "E"); -} - -function checkDecreasing(i) { - res = tzz.find().sort({$natural: -1}); - assert(res.hasNext(), "F"); - var j = i; - while (res.hasNext()) { - assert.eq(val[j++].a, res.next().a, "G"); - } - res = tzz.find().sort({$natural: 1}); - assert(res.hasNext(), "H"); - while (res.hasNext()) - assert.eq(val[--j].a, res.next().a, "I"); - assert.eq(j, i, "J"); -} - -for (i = 0;; ++i) { - debug("capped 2: " + i); - tzz.insert(val[i]); - var err = db.getLastError(); - if (err) { - debug(err); - debug(tzz.count()); - assert(i > 100, "K"); - break; - } - checkIncreasing(i); -} - -// drop and recreate. Test used to rely on the last insert emptying the collection, which it no -// longer does now that we rollback on failed inserts. -tzz.drop(); -db._dbCommand({create: "capped2", capped: true, size: 1000, $nExtents: 11}); - -for (i = 600; i >= 0; --i) { - debug("capped 2: " + i); - tzz.insert(val[i]); - checkDecreasing(i); -} diff --git a/jstests/mmap_v1/capped3.js b/jstests/mmap_v1/capped3.js deleted file mode 100644 index b01bc843c2c..00000000000 --- a/jstests/mmap_v1/capped3.js +++ /dev/null @@ -1,55 +0,0 @@ -t = db.jstests_capped3; -t2 = db.jstests_capped3_clone; -t.drop(); -t2.drop(); -for (i = 0; i < 1000; ++i) { - t.save({i: i}); -} -assert.commandWorked(db.runCommand({ - cloneCollectionAsCapped: "jstests_capped3", - toCollection: "jstests_capped3_clone", - size: 100000 -}), - "A"); -c = t2.find(); -for (i = 0; i < 1000; ++i) { - assert.eq(i, c.next().i, "B"); -} -assert(!c.hasNext(), "C"); - -t.drop(); -t2.drop(); - -for (i = 0; i < 1000; ++i) { - t.save({i: i}); -} -assert.commandWorked(db.runCommand({ - cloneCollectionAsCapped: "jstests_capped3", - toCollection: "jstests_capped3_clone", - size: 1000 -}), - "D"); -c = t2.find().sort({$natural: -1}); -i = 999; -while (c.hasNext()) { - assert.eq(i--, c.next().i, "E"); -} -// print( "i: " + i ); -var str = tojson(t2.stats()); -// print( "stats: " + tojson( t2.stats() ) ); -assert(i < 990, "F"); - -t.drop(); -t2.drop(); - -for (i = 0; i < 1000; ++i) { - t.save({i: i}); -} -assert.commandWorked(t.convertToCapped(1000), "G"); -c = t.find().sort({$natural: -1}); -i = 999; -while (c.hasNext()) { - assert.eq(i--, c.next().i, "H"); -} -assert(i < 990, "I"); -assert(i > 900, "J"); diff --git a/jstests/mmap_v1/capped7.js b/jstests/mmap_v1/capped7.js deleted file mode 100644 index a5887978dfb..00000000000 --- a/jstests/mmap_v1/capped7.js +++ /dev/null @@ -1,89 +0,0 @@ -// Test capped collection truncate via 'emptycapped' command - -Random.setRandomSeed(); - -db.capped7.drop(); -db._dbCommand({create: "capped7", capped: true, size: 1000, $nExtents: 11}); -tzz = db.capped7; - -var ten = new Array(11).toString().replace(/,/g, "-"); - -count = 0; - -/** - * Insert new documents until the capped collection loops and the document - * count doesn't increase on insert. - */ -function insertUntilFull() { - count = tzz.count(); - var j = 0; - while (1) { - tzz.save({i: ten, j: j++}); - var newCount = tzz.count(); - if (count == newCount) { - break; - } - count = newCount; - } -} - -insertUntilFull(); - -// oldCount == count before empty -oldCount = count; - -assert.eq.automsg("11", "tzz.stats().numExtents"); - -// oldSize == size before empty -var oldSize = tzz.stats().storageSize; - -assert.commandWorked(db._dbCommand({emptycapped: "capped7"})); - -// check that collection storage parameters are the same after empty -assert.eq.automsg("11", "tzz.stats().numExtents"); -assert.eq.automsg("oldSize", "tzz.stats().storageSize"); - -// check that the collection is empty after empty -assert.eq.automsg("0", "tzz.find().itcount()"); -assert.eq.automsg("0", "tzz.count()"); - -// check that we can reuse the empty collection, inserting as many documents -// as we were able to the first time through. -insertUntilFull(); -assert.eq.automsg("oldCount", "count"); -assert.eq.automsg("oldCount", "tzz.find().itcount()"); -assert.eq.automsg("oldCount", "tzz.count()"); - -assert.eq.automsg("11", "tzz.stats().numExtents"); -var oldSize = tzz.stats().storageSize; - -assert.commandWorked(db._dbCommand({emptycapped: "capped7"})); - -// check that the collection storage parameters are unchanged after another empty -assert.eq.automsg("11", "tzz.stats().numExtents"); -assert.eq.automsg("oldSize", "tzz.stats().storageSize"); - -// insert an arbitrary number of documents -var total = Random.randInt(2000); -for (var j = 1; j <= total; ++j) { - tzz.save({i: ten, j: j}); - // occasionally check that only the oldest documents are removed to make room - // for the newest documents - if (Random.rand() > 0.95) { - assert.automsg("j >= tzz.count()"); - assert.eq.automsg("tzz.count()", "tzz.find().itcount()"); - var c = tzz.find().sort({$natural: -1}); - var k = j; - assert.automsg("c.hasNext()"); - while (c.hasNext()) { - assert.eq.automsg("c.next().j", "k--"); - } - // check the same thing with a reverse iterator as well - var c = tzz.find().sort({$natural: 1}); - assert.automsg("c.hasNext()"); - while (c.hasNext()) { - assert.eq.automsg("c.next().j", "++k"); - } - assert.eq.automsg("j", "k"); - } -} diff --git a/jstests/mmap_v1/capped8.js b/jstests/mmap_v1/capped8.js deleted file mode 100644 index 78b9d1b2017..00000000000 --- a/jstests/mmap_v1/capped8.js +++ /dev/null @@ -1,109 +0,0 @@ -// Test NamespaceDetails::cappedTruncateAfter with empty extents - -Random.setRandomSeed(); - -t = db.jstests_capped8; - -function debug(x) { - // printjson( x ); -} - -/** Generate an object with a string field of specified length */ -function obj(size, x) { - return {X: x, a: new Array(size + 1).toString()}; -} - -function withinTwo(a, b) { - assert(Math.abs(a - b) <= 2, "not within one: " + a + ", " + b); -} - -var X = 0; - -/** - * Insert enough documents of the given size spec that the collection will - * contain only documents having this size spec. - */ -function insertManyRollingOver(objsize) { - // Add some variability, as the precise number can trigger different cases. - X++; - n = 250 + Random.randInt(10); - - assert(t.count() == 0 || t.findOne().X != X); - - for (i = 0; i < n; ++i) { - t.save(obj(objsize, X)); - debug(t.count()); - } - - if (t.findOne().X != X) { - printjson(t.findOne()); - print("\n\nERROR didn't roll over in insertManyRollingOver " + objsize); - print("approx amountwritten: " + (objsize * n)); - printjson(t.stats()); - assert(false); - } -} - -/** - * Insert some documents in such a way that there may be an empty extent, then - * truncate the capped collection. - */ -function insertAndTruncate(first) { - myInitialCount = t.count(); - // Insert enough documents to make the capped allocation loop over. - insertManyRollingOver(150); - myFiftyCount = t.count(); - // Insert documents that are too big to fit in the smaller extents. - insertManyRollingOver(3000); - myTwokCount = t.count(); - if (first) { - initialCount = myInitialCount; - fiftyCount = myFiftyCount; - twokCount = myTwokCount; - // Sanity checks for collection count - assert(fiftyCount > initialCount); - assert(fiftyCount > twokCount); - } else { - // Check that we are able to insert roughly the same number of documents - // after truncating. The exact values are slightly variable as a result - // of the capped allocation algorithm and where the remaining entry is. - withinTwo(initialCount, myInitialCount); - withinTwo(fiftyCount, myFiftyCount); - withinTwo(twokCount, myTwokCount); - } - count = t.count(); - // Check that we can truncate the collection successfully. - assert.commandWorked(db.runCommand({captrunc: "jstests_capped8", n: count - 1, inc: false})); - assert.eq(1, t.count()); -} - -/** Test truncating and subsequent inserts */ -function testTruncate() { - insertAndTruncate(true); - insertAndTruncate(false); - insertAndTruncate(false); -} - -var pass = 1; - -print("pass " + pass++); -t.drop(); -db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 10000, 4000]}); -testTruncate(); - -print("pass " + pass++); -t.drop(); -db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 1000, 4000]}); -testTruncate(); - -print("pass " + pass++); -t.drop(); -db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 4000]}); -testTruncate(); - -print("pass " + pass++); -t.drop(); -db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000]}); -testTruncate(); - -t.drop(); diff --git a/jstests/mmap_v1/capped_max.js b/jstests/mmap_v1/capped_max.js deleted file mode 100644 index a30e8c2a1d3..00000000000 --- a/jstests/mmap_v1/capped_max.js +++ /dev/null @@ -1,28 +0,0 @@ - -t = db.capped_max; -sz = 1024 * 16; - -t.drop(); -db.createCollection(t.getName(), {capped: true, size: sz}); -assert.lt(Math.pow(2, 62), t.stats().max.floatApprox); - -t.drop(); -db.createCollection(t.getName(), {capped: true, size: sz, max: 123456}); -assert.eq(123456, t.stats().max); - -// create a collection with the max possible doc cap (2^31-2 docs) -t.drop(); -mm = Math.pow(2, 31) - 2; -db.createCollection(t.getName(), {capped: true, size: sz, max: mm}); -assert.eq(mm, t.stats().max); - -// create a collection with the 'no max' value (2^31-1 docs) -t.drop(); -mm = Math.pow(2, 31) - 1; -db.createCollection(t.getName(), {capped: true, size: sz, max: mm}); -assert.eq(NumberLong("9223372036854775807"), t.stats().max); - -t.drop(); -res = db.createCollection(t.getName(), {capped: true, size: sz, max: Math.pow(2, 31)}); -assert.eq(0, res.ok, tojson(res)); -assert.eq(0, t.stats().ok); diff --git a/jstests/mmap_v1/capped_server13912.js b/jstests/mmap_v1/capped_server13912.js deleted file mode 100644 index 438c2b17b34..00000000000 --- a/jstests/mmap_v1/capped_server13912.js +++ /dev/null @@ -1,10 +0,0 @@ -// SERVER-13912 Capped collections with size=0 are promoted to the minimum Extent size -var name = "capped_server13912"; -var minExtentSize = 0x1000; // from ExtentManager::minSize() - -var t = db.getCollection(name); -t.drop(); - -db.createCollection(name, {capped: true, size: 0}); - -assert.eq(t.stats().storageSize, minExtentSize); diff --git a/jstests/mmap_v1/capped_server2639.js b/jstests/mmap_v1/capped_server2639.js deleted file mode 100644 index 751699b58b7..00000000000 --- a/jstests/mmap_v1/capped_server2639.js +++ /dev/null @@ -1,25 +0,0 @@ - -name = "server2639"; - -t = db.getCollection(name); -t.drop(); - -db.createCollection(name, {capped: true, size: 1}); - -size = t.stats().storageSize; - -bigString = ""; -while (bigString.length < size) - bigString += "."; - -t.insert({x: 1}); - -var res = t.insert({x: 2, bigString: bigString}); -assert.writeError(res); - -assert.eq(1, t.count()); // make sure small doc didn't get deleted -assert.eq(1, t.findOne().x); - -// make sure can still insert -t.insert({x: 2}); -assert.eq(2, t.count()); diff --git a/jstests/mmap_v1/capped_server7543.js b/jstests/mmap_v1/capped_server7543.js deleted file mode 100644 index 625f62cd4a7..00000000000 --- a/jstests/mmap_v1/capped_server7543.js +++ /dev/null @@ -1,10 +0,0 @@ - -mydb = db.getSisterDB("capped_server7543"); -mydb.dropDatabase(); - -mydb.createCollection("foo", {capped: true, size: 12288}); - -assert.eq(12288, mydb.foo.stats().storageSize); -assert.eq(1, mydb.foo.validate(true).extentCount); - -mydb.dropDatabase(); diff --git a/jstests/mmap_v1/collmod.js b/jstests/mmap_v1/collmod.js deleted file mode 100644 index 0ac6e98df60..00000000000 --- a/jstests/mmap_v1/collmod.js +++ /dev/null @@ -1,81 +0,0 @@ -// Basic js tests for the collMod command. -// Test setting the usePowerOf2Sizes flag, and modifying TTL indexes. - -function debug(x) { - // printjson( x ); -} - -var coll = "collModTest"; -var t = db.getCollection(coll); -t.drop(); - -db.createCollection(coll); - -// Verify the new collection has userFlags set to 1 -printjson(t.stats()); -assert.eq(t.stats().userFlags, 1, "fresh collection doesn't have userFlags = 1 "); - -// Modify the collection with the usePowerOf2Sizes flag. Verify userFlags now = 0. -var res = db.runCommand({"collMod": coll, "usePowerOf2Sizes": false}); -debug(res); -assert.eq(res.ok, 1, "collMod failed"); -assert.eq(t.stats().userFlags, 0, "modified collection should have userFlags = 0 "); -var nso = db.system.namespaces.findOne({name: t.getFullName()}); -debug(nso); -assert.eq(0, nso.options.flags, "options didn't sync to system.namespaces: " + tojson(nso)); - -// Try to modify it with some unrecognized value -var res = db.runCommand({"collMod": coll, "unrecognized": true}); -debug(res); -assert.eq(res.ok, 0, "collMod shouldn't return ok with unrecognized value"); - -// add a TTL index -t.ensureIndex({a: 1}, {"expireAfterSeconds": 50}); -assert.eq(1, db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 50}), "TTL index not added"); - -// try to modify it with a bad key pattern -var res = - db.runCommand({"collMod": coll, "index": {"keyPattern": "bad", "expireAfterSeconds": 100}}); -debug(res); -assert.eq(0, res.ok, "mod shouldn't work with bad keypattern"); - -// try to modify it without expireAfterSeconds field -var res = db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}}}); -debug(res); -assert.eq(0, res.ok, "TTL mod shouldn't work without expireAfterSeconds"); - -// try to modify it with a non-numeric expireAfterSeconds field -var res = - db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": "100"}}); -debug(res); -assert.eq(0, res.ok, "TTL mod shouldn't work with non-numeric expireAfterSeconds"); - -// this time modifying should finally work -var res = - db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}}); -debug(res); -assert.eq( - 1, db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}), "TTL index not modified"); - -// try to modify a faulty TTL index with a non-numeric expireAfterSeconds field -t.dropIndex({a: 1}); -t.ensureIndex({a: 1}, {"expireAfterSeconds": "50"}); -var res = - db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}}); -debug(res); -assert.eq(0, res.ok, "shouldn't be able to modify faulty index spec"); - -// try with new index, this time set both expireAfterSeconds and the usePowerOf2Sizes flag -t.dropIndex({a: 1}); -t.ensureIndex({a: 1}, {"expireAfterSeconds": 50}); -var res = db.runCommand({ - "collMod": coll, - "usePowerOf2Sizes": true, - "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100} -}); -debug(res); -assert.eq(1, res.ok, "should be able to modify both userFlags and expireAfterSeconds"); -assert.eq(t.stats().userFlags, 1, "userflags should be 1 now"); -assert.eq(1, - db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}), - "TTL index should be 100 now"); diff --git a/jstests/mmap_v1/compact.js b/jstests/mmap_v1/compact.js deleted file mode 100644 index 91ce1ba5dbc..00000000000 --- a/jstests/mmap_v1/compact.js +++ /dev/null @@ -1,83 +0,0 @@ -// compact.js - -var mydb = db.getSiblingDB('compact'); -t = mydb.compacttest; -t.drop(); - -// Assert that you can't compact a capped collection in MMAP. -assert.commandWorked(mydb.createCollection(t.getName(), {size: 4096, capped: true})); -assert.commandFailedWithCode(t.runCommand('compact'), ErrorCodes.CommandNotSupported); -t.drop(); // uncap the collection. - -t.insert({x: 3}); -t.insert({x: 3}); -t.insert({x: 5}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.insert({x: 4, z: 2, k: 'aaa'}); -t.ensureIndex({x: 1}); - -print("1"); - -var res = mydb.runCommand({compact: 'compacttest', dev: true, force: true}); -printjson(res); -assert(res.ok); -assert(t.count() == 9); -var v = t.validate(true); -assert(v.ok); -assert(v.extentCount == 1); -assert(v.deletedCount == 1); -assert(t.getIndexes().length == 2); -var ssize = t.stats().storageSize; - -print("2"); -res = mydb.runCommand({compact: 'compacttest', dev: true, paddingBytes: 1000, force: true}); -assert(res.ok); -assert(t.count() == 9); -var v = t.validate(true); -assert(v.ok); -assert( - t.stats().storageSize > ssize, - "expected more storage given padding is higher. however it rounds off so if something changed this could be"); -// printjson(t.stats()); - -print("z"); - -t.insert({x: 4, z: 2, k: {a: "", b: ""}}); -t.insert({x: 4, z: 2, k: {a: "", b: ""}}); -t.insert({x: 4, z: 2, k: {a: "", b: ""}}); -t.insert({x: 4, z: 2, k: {a: "", b: ""}}); -t.insert({x: 4, z: null, k: {f: "", b: ""}}); -t.insert({x: 4, z: null, k: {c: ""}}); -t.insert({x: 4, z: null, k: {h: ""}}); -t.insert({x: 4, z: null}); -t.insert({x: 4, z: 3}); -t.insert({x: 4, z: 2, k: {a: "", b: ""}}); -t.insert({x: 4, z: null, k: {c: ""}}); -t.insert({x: 4, z: null, k: {c: ""}}); -t.insert({x: 4, z: 3, k: {c: ""}}); - -t.ensureIndex({z: 1, k: 1}); -// t.ensureIndex({ z: 1, k: 1 }, { unique: true }); -// t.ensureIndex({ z: 1, k: 1 }, { dropDups: true, unique:true }); - -res = mydb.runCommand({compact: 'compacttest', dev: true, paddingFactor: 1.2, force: true}); -printjson(res); -assert(res.ok); -assert(t.count() > 13); -var v = t.validate(true); -assert(v.ok); - -print("3"); - -// works on an empty collection? -t.remove({}); -assert(mydb.runCommand({compact: 'compacttest', dev: true, force: true}).ok); -assert(t.count() == 0); -v = t.validate(true); -assert(v.ok); -assert(v.extentCount == 1); -assert(t.getIndexes().length == 3); diff --git a/jstests/mmap_v1/compactPreservePadding.js b/jstests/mmap_v1/compactPreservePadding.js deleted file mode 100644 index 211ecd5a087..00000000000 --- a/jstests/mmap_v1/compactPreservePadding.js +++ /dev/null @@ -1,26 +0,0 @@ -// test preservePadding - -var mydb = db.getSiblingDB('compactPreservePadding'); -var collName = "compactPreservePadding"; -var t = mydb.getCollection(collName); -t.drop(); - -// use larger keyname to avoid hitting an edge case with extents -for (i = 0; i < 10000; i++) { - t.insert({useLargerKeyName: i}); -} - -// remove half the entries -t.remove({useLargerKeyName: {$mod: [2, 0]}}); -printjson(t.stats()); -originalSize = t.stats().size; -originalStorage = t.stats().storageSize; - -// compact! -mydb.runCommand({compact: collName, preservePadding: true}); -printjson(t.stats()); - -// object sizes ('size') should be the same (unless we hit an edge case involving extents, which -// this test doesn't) and storage size should shrink -assert(originalSize == t.stats().size); -assert(originalStorage > t.stats().storageSize); diff --git a/jstests/mmap_v1/datasize.js b/jstests/mmap_v1/datasize.js deleted file mode 100644 index d12527a8922..00000000000 --- a/jstests/mmap_v1/datasize.js +++ /dev/null @@ -1,50 +0,0 @@ -// test end-to-end data allocation without powerOf2Sizes enabled -f = db.jstests_datasize; -f.drop(); - -assert.eq(0, db.runCommand({datasize: "test.jstests_datasize"}).size); -f.save({qq: 'c'}); -printjson(f.stats()); -assert.eq(48, db.runCommand({datasize: "test.jstests_datasize"}).size); -f.save({qq: 'fg'}); -printjson(f.stats()); -assert.eq(96, db.runCommand({datasize: "test.jstests_datasize"}).size); - -f.drop(); - -f.ensureIndex({qq: 1}); -assert.eq(0, db.runCommand({datasize: "test.jstests_datasize"}).size); -f.save({qq: 'c'}); -printjson(f.stats()); -assert.eq(48, db.runCommand({datasize: "test.jstests_datasize"}).size); -f.save({qq: 'fg'}); -printjson(f.stats()); -assert.eq(96, db.runCommand({datasize: "test.jstests_datasize"}).size); - -assert.eq(0, db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}}).ok); - -assert.eq(96, - db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'z'}}).size); -assert.eq(48, - db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}}).size); -assert.eq( - 48, - db.runCommand( - {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {qq: 1}}) - .size); -assert.eq( - 48, - db.runCommand( - {datasize: "test.jstests_datasize", min: {qq: 'd'}, max: {qq: 'z'}, keyPattern: {qq: 1}}) - .size); - -assert.eq(0, - db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'c'}, max: {qq: 'c'}}).size); -assert.eq(48, - db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'c'}, max: {qq: 'd'}}).size); - -assert.eq( - 0, - db.runCommand( - {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {a: 1}}) - .ok); diff --git a/jstests/mmap_v1/datasize3.js b/jstests/mmap_v1/datasize3.js deleted file mode 100644 index da5d89384b2..00000000000 --- a/jstests/mmap_v1/datasize3.js +++ /dev/null @@ -1,29 +0,0 @@ - -t = db.datasize3; -t.drop(); - -function run(options) { - var c = {dataSize: "test.datasize3"}; - if (options) - Object.extend(c, options); - return db.runCommand(c); -} - -t.insert({x: 1}); - -a = run(); -b = run({estimate: true}); -printjson(t.stats()); -assert.eq(a.size, b.size); - -t.ensureIndex({x: 1}); - -for (i = 2; i < 100; i++) - t.insert({x: i}); - -a = run({min: {x: 20}, max: {x: 50}}).size; -b = run({min: {x: 20}, max: {x: 50}, estimate: true}).size; - -ratio = Math.min(a, b) / Math.max(a, b); - -assert.lt(0.97, ratio, "sizes not equal a: " + a + " b: " + b); diff --git a/jstests/mmap_v1/disk_reuse1.js b/jstests/mmap_v1/disk_reuse1.js deleted file mode 100644 index 6dc1a1debe3..00000000000 --- a/jstests/mmap_v1/disk_reuse1.js +++ /dev/null @@ -1,52 +0,0 @@ -(function() { - "use strict"; - const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""}); - assert.neq(null, conn, "mongod failed to start."); - db = conn.getDB("test"); - const t = db.disk_reuse1; - t.drop(); - - const N = 10000; - - function k() { - return Math.floor(Math.random() * N); - } - - let s = ""; - while (s.length < 1024) - s += "abc"; - - var bulk = t.initializeUnorderedBulkOp(); - for (var i = 0; i < N; i++) { - bulk.insert({_id: i, s: s}); - } - assert.writeOK(bulk.execute()); - - const orig = t.stats(); - - t.remove({}); - - bulk = t.initializeUnorderedBulkOp(); - for (let i = 0; i < N; i++) { - bulk.insert({_id: i, s: s}); - } - assert.writeOK(bulk.execute()); - - assert.eq(orig.storageSize, t.stats().storageSize, "A"); - - for (let j = 0; j < 100; j++) { - for (let i = 0; i < N; i++) { - bulk = t.initializeUnorderedBulkOp(); - var r = Math.random(); - if (r > .5) - bulk.find({_id: i}).remove(); - else - bulk.find({_id: i}).upsert().updateOne({_id: i, s: s}); - } - - assert.writeOK(bulk.execute()); - assert.eq(orig.storageSize, t.stats().storageSize, "B" + j); - } - - MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/mmap_v1/drop.js b/jstests/mmap_v1/drop.js deleted file mode 100644 index efb50a3bd08..00000000000 --- a/jstests/mmap_v1/drop.js +++ /dev/null @@ -1,24 +0,0 @@ -var coll = db.jstests_drop; - -coll.drop(); - -res = coll.runCommand("drop"); -assert(!res.ok, tojson(res)); - -assert.eq(0, db.system.indexes.find({ns: coll + ""}).count(), "A"); -coll.save({}); -assert.eq(1, db.system.indexes.find({ns: coll + ""}).count(), "B"); -coll.ensureIndex({a: 1}); -assert.eq(2, db.system.indexes.find({ns: coll + ""}).count(), "C"); -assert.commandWorked(db.runCommand({drop: coll.getName()})); -assert.eq(0, db.system.indexes.find({ns: coll + ""}).count(), "D"); - -coll.ensureIndex({a: 1}); -assert.eq(2, db.system.indexes.find({ns: coll + ""}).count(), "E"); -assert.commandWorked(db.runCommand({deleteIndexes: coll.getName(), index: "*"}), - "delete indexes A"); -assert.eq(1, db.system.indexes.find({ns: coll + ""}).count(), "G"); - -// make sure we can still use it -coll.save({}); -assert.eq(1, coll.find().hint("_id_").toArray().length, "H"); diff --git a/jstests/mmap_v1/dur_remove_old_journals.js b/jstests/mmap_v1/dur_remove_old_journals.js deleted file mode 100644 index 0d1b930815b..00000000000 --- a/jstests/mmap_v1/dur_remove_old_journals.js +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Test that old journal files are eventually deleted. - */ - -if (db.serverBuildInfo().bits == 32) { - print("skip on 32 bit systems"); -} else { - var conn = MongoRunner.runMongod({ - journal: "", - smallfiles: "", - syncdelay: 5, // seconds between fsyncs. - }); - db = conn.getDB("test"); - - // listFiles can return Access Denied on Windows if the file - // is deleted at the same time as listFiles is run, in this - // case we sleep and retry. - function listFilesRetry(path) { - for (var i = 0; i < 5; ++i) { - try { - return listFiles(path); - } catch (e) { - print("Exception during listFiles: " + e); - // Sleep for 10 milliseconds - sleep(10); - } - } - - throw new Error("listFilesRetry failed"); - } - - // Returns true if j._0 exists. - function firstJournalFileExists() { - var files = listFilesRetry(conn.dbpath + "/journal"); - for (var i = 0; i < files.length; i++) { - if (files[i].baseName === "j._0") { - return true; - } - } - return false; - } - - // Represents the cummulative total of the number of journal files created. - function getLatestJournalFileNum() { - var files = listFilesRetry(conn.dbpath + "/journal"); - var latest = 0; - files.forEach(function(file) { - if (file.baseName !== "lsn") { - var fileNum = NumberInt(file.baseName[file.baseName.length - 1]); - latest = Math.max(latest, fileNum); - } - }); - return latest; - } - - var stringSize = 1024 * 1024; - var longString = new Array(stringSize).join("x"); - - // Insert some data to create the first journal file. - var numInserted = 0; - while (numInserted < 100) { - db.foo.insert({_id: numInserted++, s: longString}); - } - assert.soon(firstJournalFileExists, "Should have created a journal file"); - - // Do writes until the first journal file is deleted, or we give up waiting. - var maxJournalFiles = 10; - while (firstJournalFileExists() && getLatestJournalFileNum() < maxJournalFiles) { - db.foo.insert({_id: numInserted++, s: longString}); - - if (numInserted % 100 == 0) { - jsTestLog("numInserted: " + numInserted); - db.adminCommand({fsync: 1}); - db.foo.remove({}); - db.adminCommand({fsync: 1}); - gc(); - } - } - - assert(!firstJournalFileExists(), "Expected to have deleted the first journal file by now"); - MongoRunner.stopMongod(conn); -} diff --git a/jstests/mmap_v1/extent.js b/jstests/mmap_v1/extent.js deleted file mode 100644 index 35b9213dca4..00000000000 --- a/jstests/mmap_v1/extent.js +++ /dev/null @@ -1,10 +0,0 @@ -t = db.reclaimExtentsTest; -t.drop(); - -for (var i = 0; i < 50; i++) { // enough iterations to break 32 bit. - db.createCollection('reclaimExtentsTest', {size: 100000000}); - t.insert({x: 1}); - assert(t.count() == 1); - t.drop(); -} -t.drop(); diff --git a/jstests/mmap_v1/extent2.js b/jstests/mmap_v1/extent2.js deleted file mode 100644 index 269ac645986..00000000000 --- a/jstests/mmap_v1/extent2.js +++ /dev/null @@ -1,34 +0,0 @@ - - -mydb = db.getSisterDB("test_extent2"); -mydb.dropDatabase(); - -t = mydb.foo; - -function insert() { - t.insert({_id: 1, x: 1}); - t.insert({_id: 2, x: 1}); - t.insert({_id: 3, x: 1}); - t.ensureIndex({x: 1}); -} - -insert(); -t.drop(); - -start = mydb.stats(); - -for (i = 0; i < 100; i++) { - insert(); - t.drop(); -} - -end = mydb.stats(); - -printjson(start); -printjson(end); -assert.eq(start.extentFreeList.num, end.extentFreeList.num); - -// 3: 1 data, 1 _id idx, 1 x idx -// used to be 4, but we no longer waste an extent for the freelist -assert.eq(3, start.extentFreeList.num); -assert.eq(3, end.extentFreeList.num); diff --git a/jstests/mmap_v1/index_check1.js b/jstests/mmap_v1/index_check1.js deleted file mode 100644 index 609eb2e8fab..00000000000 --- a/jstests/mmap_v1/index_check1.js +++ /dev/null @@ -1,31 +0,0 @@ - -db.somecollection.drop(); - -assert.eq(0, db.system.namespaces.find({name: /somecollection/}).length(), "1"); - -db.somecollection.save({a: 1}); - -assert.eq(2, db.system.namespaces.find({name: /somecollection/}).length(), "2"); - -db.somecollection.ensureIndex({a: 1}); - -var z = db.system.namespaces.find({name: /somecollection/}).length(); -assert.gte(z, 1, "3"); - -if (z == 1) - print("warning: z==1, should only happen with alternate storage engines"); - -db.somecollection.drop(); - -assert.eq(0, db.system.namespaces.find({name: /somecollection/}).length(), "4"); - -db.somecollection.save({a: 1}); - -assert.eq(2, db.system.namespaces.find({name: /somecollection/}).length(), "5"); - -db.somecollection.ensureIndex({a: 1}); - -var x = db.system.namespaces.find({name: /somecollection/}).length(); -assert(x == 2 || x == z, "6"); - -assert(db.somecollection.validate().valid, "7"); diff --git a/jstests/mmap_v1/indexh.js b/jstests/mmap_v1/indexh.js deleted file mode 100644 index 50d3e40e11e..00000000000 --- a/jstests/mmap_v1/indexh.js +++ /dev/null @@ -1,41 +0,0 @@ -// This should get skipped when testing replication - -t = db.jstests_indexh; - -function debug(t) { - print(t); -} - -function extraDebug() { - // printjson( db.stats() ); - // db.printCollectionStats(); -} - -// index extent freeing -t.drop(); -t.save({}); -var s1 = db.stats().dataSize; -debug("s1: " + s1); -extraDebug(); -t.ensureIndex({a: 1}); -var s2 = db.stats().dataSize; -debug("s2: " + s2); -assert.automsg("s1 < s2"); -t.dropIndex({a: 1}); -var s3 = db.stats().dataSize; -debug("s3: " + s3); -extraDebug(); -assert.eq.automsg("s1", "s3"); - -// index node freeing -t.drop(); -t.ensureIndex({a: 1}); -for (i = 'a'; i.length < 500; i += 'a') { - t.save({a: i}); -} -var s4 = db.stats().indexSize; -debug("s4: " + s4); -t.remove({}); -var s5 = db.stats().indexSize; -debug("s5: " + s5); -assert.automsg("s5 < s4");
\ No newline at end of file diff --git a/jstests/mmap_v1/indexi.js b/jstests/mmap_v1/indexi.js deleted file mode 100644 index 2d6c501a31e..00000000000 --- a/jstests/mmap_v1/indexi.js +++ /dev/null @@ -1,22 +0,0 @@ -// Test that client cannot access index namespaces. - -t = db.jstests_indexi; -t.drop(); - -idx = db.jstests_indexi.$_id_; - -// Test that accessing the index namespace fails. -function checkFailingOperations() { - assert.writeError(idx.insert({x: 1})); - assert.writeError(idx.update({x: 1}, {x: 2})); - assert.writeError(idx.remove({x: 1})); - assert.commandFailed(idx.runCommand('compact')); - assert.commandFailed(idx.ensureIndex({x: 1})); -} - -// Check with base collection not present. -checkFailingOperations(); -t.save({}); - -// Check with base collection present. -checkFailingOperations(); diff --git a/jstests/mmap_v1/list_collections2.js b/jstests/mmap_v1/list_collections2.js deleted file mode 100644 index 928d34ad84b..00000000000 --- a/jstests/mmap_v1/list_collections2.js +++ /dev/null @@ -1,43 +0,0 @@ -// Test the listCollections command and system.namespaces - -mydb = db.getSisterDB("list_collections1"); -mydb.dropDatabase(); - -mydb.foo.insert({x: 5}); - -mydb.runCommand({create: "bar", temp: true}); - -res = mydb.runCommand("listCollections"); -collections = new DBCommandCursor(db, res).toArray(); - -bar = collections.filter(function(x) { - return x.name == "bar"; -})[0]; -foo = collections.filter(function(x) { - return x.name == "foo"; -})[0]; - -assert(bar); -assert(foo); - -assert.eq(bar.name, mydb.bar.getName()); -assert.eq(foo.name, mydb.foo.getName()); - -assert(mydb.bar.temp, tojson(bar)); - -getCollectionName = function(infoObj) { - return infoObj.name; -}; - -assert.eq(mydb._getCollectionInfosSystemNamespaces().map(getCollectionName), - mydb._getCollectionInfosCommand().map(getCollectionName)); - -assert.eq(mydb.getCollectionInfos().map(getCollectionName), - mydb._getCollectionInfosCommand().map(getCollectionName)); - -// Test the listCollections command and querying system.namespaces when a filter is specified. -assert.eq(mydb._getCollectionInfosSystemNamespaces({name: "foo"}).map(getCollectionName), - mydb._getCollectionInfosCommand({name: "foo"}).map(getCollectionName), - "listCollections command and querying system.namespaces returned different results"); - -mydb.dropDatabase(); diff --git a/jstests/mmap_v1/list_indexes2.js b/jstests/mmap_v1/list_indexes2.js deleted file mode 100644 index 71d15b3e900..00000000000 --- a/jstests/mmap_v1/list_indexes2.js +++ /dev/null @@ -1,14 +0,0 @@ -// test listIndexes compatability command and system.indexes - -t = db.list_indexes2; -t.drop(); - -t.insert({x: 1}); - -assert.eq(t._getIndexesSystemIndexes(), t._getIndexesCommand()); - -t.ensureIndex({x: 1}); - -assert.eq(t._getIndexesSystemIndexes(), t._getIndexesCommand()); - -assert.eq(t.getIndexes(), t._getIndexesCommand()); diff --git a/jstests/mmap_v1/repair_cursor1.js b/jstests/mmap_v1/repair_cursor1.js deleted file mode 100644 index 0a01bed20f3..00000000000 --- a/jstests/mmap_v1/repair_cursor1.js +++ /dev/null @@ -1,19 +0,0 @@ - -t = db.repair_cursor1; -t.drop(); - -t.insert({x: 1}); -t.insert({x: 2}); - -res = t.runCommand("repairCursor"); -assert(res.ok, tojson(res)); - -t2 = db.repair_cursor1a; -t2.drop(); - -cursor = new DBCommandCursor(db, res); -cursor.forEach(function(z) { - t2.insert(z); -}); -assert.eq(t.find().itcount(), t2.find().itcount()); -assert.eq(t.hashAllDocs(), t2.hashAllDocs()); diff --git a/jstests/mmap_v1/reverse_empty_extent.js b/jstests/mmap_v1/reverse_empty_extent.js deleted file mode 100644 index 9b2cb4a1002..00000000000 --- a/jstests/mmap_v1/reverse_empty_extent.js +++ /dev/null @@ -1,33 +0,0 @@ -// Test to make sure that a reverse cursor can correctly handle empty extents (SERVER-6980) - -// Create a collection with three small extents -db.jstests_reversecursor.drop(); -db.runCommand({"create": "jstests_reversecursor", $nExtents: [4096, 4096, 4096]}); - -// Function to check whether all three extents are non empty -function extentsSpanned() { - var extents = db.jstests_reversecursor.validate(true).extents; - return (extents[0].firstRecord != "null" && extents[1].firstRecord != "null" && - extents[2].firstRecord != "null"); -} - -// Insert enough documents to span all three extents -a = 0; -while (!extentsSpanned()) { - db.jstests_reversecursor.insert({a: a++}); -} - -// Delete all the elements in the middle -db.jstests_reversecursor.remove({a: {$gt: 0, $lt: a - 1}}); - -// Make sure the middle extent is empty and that both end extents are not empty -assert.eq(db.jstests_reversecursor.validate(true).extents[1].firstRecord, "null"); -assert.eq(db.jstests_reversecursor.validate(true).extents[1].lastRecord, "null"); -assert.neq(db.jstests_reversecursor.validate(true).extents[0].firstRecord, "null"); -assert.neq(db.jstests_reversecursor.validate(true).extents[0].lastRecord, "null"); -assert.neq(db.jstests_reversecursor.validate(true).extents[2].firstRecord, "null"); -assert.neq(db.jstests_reversecursor.validate(true).extents[2].lastRecord, "null"); - -// Make sure that we get the same number of elements for both the forward and reverse cursors -assert.eq(db.jstests_reversecursor.find().sort({$natural: 1}).toArray().length, 2); -assert.eq(db.jstests_reversecursor.find().sort({$natural: -1}).toArray().length, 2); diff --git a/jstests/mmap_v1/stats.js b/jstests/mmap_v1/stats.js deleted file mode 100644 index 9d1e95773b7..00000000000 --- a/jstests/mmap_v1/stats.js +++ /dev/null @@ -1,23 +0,0 @@ - -var statsDB = db.getSiblingDB("stats"); -statsDB.dropDatabase(); -var t = statsDB.stats1; - -t.save({a: 1}); - -assert.lt(0, t.dataSize(), "A"); -assert.lt(t.dataSize(), t.storageSize(), "B"); -assert.lt(0, t.totalIndexSize(), "C"); - -var stats = statsDB.stats(); -assert.gt(stats.fileSize, 0); -assert.eq(stats.dataFileVersion.major, 4); -assert.eq(stats.dataFileVersion.minor, 22); - -// test empty database; should be no dataFileVersion -statsDB.dropDatabase(); -var statsEmptyDB = statsDB.stats(); -assert.eq(statsEmptyDB.fileSize, 0); -assert.isnull(statsEmptyDB.dataFileVersion); - -statsDB.dropDatabase(); diff --git a/jstests/mmap_v1/syncdelay_overflow.js b/jstests/mmap_v1/syncdelay_overflow.js deleted file mode 100644 index 664e473d61b..00000000000 --- a/jstests/mmap_v1/syncdelay_overflow.js +++ /dev/null @@ -1,16 +0,0 @@ -/** - * A large `syncdelay` set via the command line or `setParameter` can cause a precision loss - * exception when being converted to a duration. This test exercises the protection of the command - * line `--syncdelay` parameter and calling `setParameter`. - */ -(function() { - var conn = MongoRunner.runMongod({storageEngine: 'mmapv1', syncdelay: 18446744073709552000}); - assert.eq(conn, null); - - conn = MongoRunner.runMongod({storageEngine: 'mmapv1'}); - assert.neq(conn, null); - var res = conn.adminCommand({setParameter: 1, 'syncdelay': 18446744073709552000}); - assert.commandFailedWithCode(res, 2); - assert.gt(res["errmsg"].indexOf("syncdelay must be between"), -1); - MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/mmap_v1/touch1.js b/jstests/mmap_v1/touch1.js deleted file mode 100644 index 91f7d1378a5..00000000000 --- a/jstests/mmap_v1/touch1.js +++ /dev/null @@ -1,13 +0,0 @@ - -t = db.touch1; -t.drop(); - -t.insert({x: 1}); -t.ensureIndex({x: 1}); - -res = t.runCommand("touch"); -assert(!res.ok, tojson(res)); - -res = t.runCommand("touch", {data: true, index: true}); -assert.eq(1, res.data.numRanges, tojson(res)); -assert.eq(1, res.ok, tojson(res)); diff --git a/jstests/mmap_v1/update.js b/jstests/mmap_v1/update.js deleted file mode 100644 index 3e132ca666a..00000000000 --- a/jstests/mmap_v1/update.js +++ /dev/null @@ -1,41 +0,0 @@ - -asdf = db.getCollection("asdf"); -asdf.drop(); - -var txt = "asdf"; -for (var i = 0; i < 10; i++) { - txt = txt + txt; -} - -var iterations = _isWindows() ? 2500 : 5000; - -// fill db -for (var i = 1; i <= iterations; i++) { - var obj = {txt: txt}; - asdf.save(obj); - - var obj2 = { - txt: txt, - comments: [{num: i, txt: txt}, {num: [], txt: txt}, {num: true, txt: txt}] - }; - asdf.update(obj, obj2); - - if (i % 100 == 0) { - var c = asdf.count(); - assert.eq(c, i); - } -} - -assert(asdf.validate().valid); - -var stats = db.runCommand({collstats: "asdf"}); - -// some checks. want to check that padding factor is working; in addition this lets us do a little -// basic -// testing of the collstats command at the same time -assert(stats.count == iterations); -assert(stats.size < 140433012 * 5 && stats.size > 1000000); -assert(stats.numExtents < 20); -assert(stats.nindexes == 1); - -asdf.drop(); diff --git a/jstests/mmap_v1/use_power_of_2.js b/jstests/mmap_v1/use_power_of_2.js deleted file mode 100644 index b3db7077e1d..00000000000 --- a/jstests/mmap_v1/use_power_of_2.js +++ /dev/null @@ -1,54 +0,0 @@ -/* - * This test ensures that the usePowerOf2 user flag effectively reuses space. - * - * As of SERVER-15273 usePowerOf2 is silently ignored so the behavior is the same regardless. - */ - -// prepare a doc of 14K -var doc = {_id: new Object(), data: "a"}; -var bigDoc = {_id: new Object(), data: "a"}; - -while (doc.data.length < 14 * 1024) - doc.data += "a"; -while (bigDoc.data.length < 15 * 1024) - bigDoc.data += "a"; - -var collName = "usepower1"; -var t = db.getCollection(collName); - -function checkStorageSize(expectedSize, sameLoc) { - t.insert(doc); - assert.eq(t.stats().size, expectedSize, "size should be expected"); - - var oldLoc = t.find().showRecordId().toArray()[0].$recordId; - - // Remvoe smaller doc, insert a bigger one. - t.remove(doc); - t.insert(bigDoc); - - var newLoc = t.find().showRecordId().toArray()[0].$recordId; - - // Check the diskloc of two docs. - assert.eq(friendlyEqual(oldLoc, newLoc), sameLoc); -} - -t.drop(); -db.createCollection(collName); -var res = db.runCommand({"collMod": collName, "usePowerOf2Sizes": false}); -assert(res.ok, "collMod failed"); -checkStorageSize(16 * 1023, true); // 15344 = 14369 (bsonsize) + overhead - -t.drop(); -db.createCollection(collName); -var res = db.runCommand({"collMod": collName, "usePowerOf2Sizes": true}); -assert(res.ok, "collMod failed"); -checkStorageSize(16 * 1023, true); // power of 2 - -// Create collection with flag -t.drop(); -db.runCommand({"create": collName, "flags": 0}); -checkStorageSize(16 * 1023, true); - -t.drop(); -db.runCommand({"create": collName, "flags": 1}); -checkStorageSize(16 * 1023, true); // power of 2 diff --git a/jstests/mmap_v1/use_power_of_2_a.js b/jstests/mmap_v1/use_power_of_2_a.js deleted file mode 100644 index 3b8642f09f9..00000000000 --- a/jstests/mmap_v1/use_power_of_2_a.js +++ /dev/null @@ -1,36 +0,0 @@ -// test the newCollectionsUsePowerOf2Sizes param -function test(defaultMode) { - // default should be server default - db.a.drop(); - db.createCollection('a'); - assert.eq(db.a.stats().userFlags & 1, defaultMode); - - // explicitly turned off should be 0 - db.b.drop(); - db.createCollection('b', {usePowerOf2Sizes: false}); - assert.eq(db.b.stats().userFlags & 1, 0); - - // Capped collections now behave like regular collections in terms of userFlags. Previously they - // were always 0, unless collmod was used. - - // capped should obey default (even though it is ignored) - db.c.drop(); - db.createCollection('c', {capped: true, size: 10}); - assert.eq(db.c.stats().userFlags & 1, defaultMode); - - // capped explicitly off should be 0 - db.d.drop(); - db.createCollection('d', {capped: true, size: 10, usePowerOf2Sizes: false}); - assert.eq(db.d.stats().userFlags & 1, 0); - - // capped and ask explicitly for powerOf2 should be 1 - db.e.drop(); - db.createCollection('e', {capped: true, size: 10, usePowerOf2Sizes: true}); - assert.eq(db.e.stats().userFlags & 1, 1); -} - -assert.eq(db.adminCommand({getParameter: 1, newCollectionsUsePowerOf2Sizes: true}) - .newCollectionsUsePowerOf2Sizes, - true); - -test(1); diff --git a/jstests/multiVersion/do_upgrade_downgrade.js b/jstests/multiVersion/do_upgrade_downgrade.js index 02e6162495b..3a1fb33091b 100644 --- a/jstests/multiVersion/do_upgrade_downgrade.js +++ b/jstests/multiVersion/do_upgrade_downgrade.js @@ -320,9 +320,6 @@ replicaSetTest({shardsvr: ""}); // Do tests for standalones and replica sets started with --configsvr. - if (jsTest.options().storageEngine !== "mmapv1") { - // We don't allow starting config servers with the MMAP storage engine. - standaloneTest({configsvr: ""}); - replicaSetTest({configsvr: ""}); - } + standaloneTest({configsvr: ""}); + replicaSetTest({configsvr: ""}); })(); diff --git a/jstests/multiVersion/mixed_storage_version_replication.js b/jstests/multiVersion/mixed_storage_version_replication.js deleted file mode 100644 index d9f2e880e0c..00000000000 --- a/jstests/multiVersion/mixed_storage_version_replication.js +++ /dev/null @@ -1,730 +0,0 @@ -/* - * Generally test that replica sets still function normally with mixed versions and mixed storage - * engines. This test will set up a replica set containing members of various versions and - * storage engines, do a bunch of random work, and assert that it replicates the same way on all - * nodes. - */ - -// This test randomly generates operations, which may include direct writes against -// config.transactions, which are not allowed to run under a session. -TestData.disableImplicitSessions = true; - -load('jstests/libs/parallelTester.js'); -load("jstests/replsets/rslib.js"); - -// Seed random numbers and print the seed. To reproduce a failed test, look for the seed towards -// the beginning of the output, and give it as an argument to randomize. -Random.setRandomSeed(); - -// Version constants. -const lastStableFCV = "3.6"; - -/* - * Namespace for all random operation helpers. Actual tests start below - */ -var RandomOps = { - // Change this to print all operations run. - verbose: false, - // 'Random' documents will have various combinations of these names mapping to these values - fieldNames: ["a", "b", "c", "longerName", "numbered10", "dashed-name"], - fieldValues: [ - true, - false, - 0, - 44, - -123, - "", - "String", - [], - [false, "x"], - ["array", 1, {doc: true}, new Date().getTime()], - {}, - {embedded: "document", weird: ["values", 0, false]}, - new Date().getTime() - ], - - /* - * Return a random element from Array a. - */ - randomChoice: function(a) { - if (a.length === 0) { - print("randomChoice called on empty input!"); - return null; - } - var x = Random.rand(); - while (x === 1.0) { // Would be out of bounds - x = Random.rand(); - } - var i = Math.floor(x * a.length); - return a[i]; - }, - - /* - * Uses above arrays to create a new doc with a random amount of fields mapping to random - * values. - */ - randomNewDoc: function() { - var doc = {}; - for (var i = 0; i < Random.randInt(0, this.fieldNames.length); i++) { - doc[this.randomChoice(this.fieldNames)] = this.randomChoice(this.fieldValues); - } - return doc; - }, - - /* - * Returns the names of all 'user created' (non admin/local) databases which have some data in - * them, or an empty list if none exist. - */ - getCreatedDatabases: function(conn) { - var created = []; - var dbs = conn.getDBs().databases; - for (var i in dbs) { - var db = dbs[i]; - if (db.name !== 'local' && db.name !== 'admin' && db.empty === false) { - created.push(db.name); - } - } - return created; - }, - - getRandomDoc: function(collection) { - try { - var randIndex = Random.randInt(0, collection.find().count()); - return collection.find().sort({$natural: 1}).skip(randIndex).limit(1)[0]; - } catch (e) { - return undefined; - } - }, - - /* - * Returns a random user defined collection. - * - * The second parameter is a function that should return false if it wants to filter out - * a collection from the list. - * - * If no collections exist, this returns null. - */ - getRandomExistingCollection: function(conn, filterFn) { - var matched = []; - var dbs = this.getCreatedDatabases(conn); - for (var i in dbs) { - var dbName = dbs[i]; - var colls = conn.getDB(dbName) - .getCollectionNames() - .filter(function(collName) { - if (collName == "system.indexes") { - return false; - } else if (filterFn && !filterFn(dbName, collName)) { - return false; - } else { - return true; - } - }) - .map(function(collName) { - return conn.getDB(dbName).getCollection(collName); - }); - Array.prototype.push.apply(matched, colls); - } - if (matched.length === 0) { - return null; - } - return this.randomChoice(matched); - }, - - ////////////////////////////////////////////////////////////////////////////////// - // RANDOMIZED CRUD OPERATIONS - ////////////////////////////////////////////////////////////////////////////////// - - /* - * Insert a random document into a random collection, with a random writeconcern - */ - insert: function(conn) { - var databases = ["tic", "tac", "toe"]; - var collections = ["eeny", "meeny", "miny", "moe"]; - var writeConcerns = [-1, 0, 1, 2, 3, 4, 5, 6, "majority"]; - - var db = this.randomChoice(databases); - var coll = this.randomChoice(collections); - var doc = this.randomNewDoc(); - if (Random.rand() < 0.5) { - doc._id = new ObjectId().str; // Vary whether or not we include the _id - } - var writeConcern = this.randomChoice(writeConcerns); - var journal = this.randomChoice([true, false]); - if (this.verbose) { - print("Inserting: "); - printjson(doc); - print("With write concern: " + writeConcern + " and journal: " + journal); - } - var result = - conn.getDB(db)[coll].insert(doc, {writeConcern: {w: writeConcern}, journal: journal}); - assert.writeOK(result); - if (this.verbose) { - print("done."); - } - }, - - /* - * remove a random document from a random collection - */ - remove: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null || coll.find().count() === 0) { - return null; // No data, can't delete anything. - } - var doc = this.getRandomDoc(coll); - if (doc === undefined) { - // If multithreaded, there could have been issues finding a random doc. - // If so, just skip this operation. - return; - } - - if (this.verbose) { - print("Deleting:"); - printjson(doc); - } - try { - coll.remove(doc); - } catch (e) { - if (this.verbose) { - print("Caught exception in remove: " + e); - } - } - if (this.verbose) { - print("done."); - } - }, - - /* - * Update a random document from a random collection. Set a random field to a (possibly) new - * value. - */ - update: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null || coll.find().count() === 0) { - return null; // No data, can't update anything. - } - var doc = this.getRandomDoc(coll); - if (doc === undefined) { - // If multithreaded, there could have been issues finding a random doc. - // If so, just skip this operation. - return; - } - - var field = this.randomChoice(this.fieldNames); - var updateDoc = {$set: {}}; - updateDoc.$set[field] = this.randomChoice(this.fieldValues); - if (this.verbose) { - print("Updating:"); - printjson(doc); - print("with:"); - printjson(updateDoc); - } - // If multithreaded, doc might not exist anymore. - try { - coll.update(doc, updateDoc); - } catch (e) { - if (this.verbose) { - print("Caught exception in update: " + e); - } - } - if (this.verbose) { - print("done."); - } - }, - - ////////////////////////////////////////////////////////////////////////////////// - // RANDOMIZED COMMANDS - ////////////////////////////////////////////////////////////////////////////////// - - /* - * Randomly rename a collection to a new name. New name will be an ObjectId string. - */ - renameCollection: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null) { - return null; - } - var newName = coll.getDB().getName() + "." + new ObjectId().str; - if (this.verbose) { - print("renaming collection " + coll.getFullName() + " to " + newName); - } - assert.commandWorked( - conn.getDB("admin").runCommand({renameCollection: coll.getFullName(), to: newName})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Randomly drop a user created collection - */ - dropCollection: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null) { - return null; - } - if (this.verbose) { - print("Dropping collection " + coll.getFullName()); - } - assert.commandWorked(coll.runCommand({drop: coll.getName()})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Randomly create an index on a random field in a random user defined collection. - */ - createIndex: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null) { - return null; - } - var index = {}; - index[this.randomChoice(this.fieldNames)] = this.randomChoice([-1, 1]); - if (this.verbose) { - print("Adding index " + tojsononeline(index) + " to " + coll.getFullName()); - } - coll.ensureIndex(index); - if (this.verbose) { - print("done."); - } - }, - - /* - * Randomly drop one existing index on a random user defined collection - */ - dropIndex: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null) { - return null; - } - var index = this.randomChoice(coll.getIndices()); - if (index.name === "_id_") { - return null; // Don't drop that one. - } - if (this.verbose) { - print("Dropping index " + tojsononeline(index.key) + " from " + coll.getFullName()); - } - assert.commandWorked(coll.dropIndex(index.name)); - if (this.verbose) { - print("done."); - } - }, - - /* - * Select a random collection and flip the user flag for usePowerOf2Sizes - */ - collMod: function(conn) { - var coll = this.getRandomExistingCollection(conn); - if (coll === null) { - return null; - } - var toggle = !coll.stats().userFlags; - if (this.verbose) { - print("Modifying usePowerOf2Sizes to " + toggle + " on collection " + - coll.getFullName()); - } - coll.runCommand({collMod: coll.getName(), usePowerOf2Sizes: toggle}); - if (this.verbose) { - print("done."); - } - }, - - /* - * Select a random user-defined collection and empty it - */ - emptyCapped: function(conn) { - var isCapped = function(dbName, coll) { - return conn.getDB(dbName)[coll].isCapped(); - }; - var coll = this.getRandomExistingCollection(conn, isCapped); - if (coll === null) { - return null; - } - if (this.verbose) { - print("Emptying capped collection: " + coll.getFullName()); - } - assert.commandWorked(coll.runCommand({emptycapped: coll.getName()})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Apply some ops to a random collection. For now we'll just have insert ops. - */ - applyOps: function(conn) { - // Check if there are any valid collections to choose from. - if (this.getRandomExistingCollection(conn) === null) { - return null; - } - var ops = []; - // Insert between 1 and 10 things. - for (var i = 0; i < Random.randInt(1, 10); i++) { - var coll = this.getRandomExistingCollection(conn); - var doc = this.randomNewDoc(); - doc._id = new ObjectId(); - if (coll !== null) { - ops.push({op: "i", ns: coll.getFullName(), o: doc}); - } - } - if (this.verbose) { - print("Applying the following ops: "); - printjson(ops); - } - assert.commandWorked(conn.getDB("admin").runCommand({applyOps: ops})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Create a random collection. Use an ObjectId for the name - */ - createCollection: function(conn) { - var dbs = this.getCreatedDatabases(conn); - if (dbs.length === 0) { - return null; - } - var dbName = this.randomChoice(dbs); - var newName = new ObjectId().str; - if (this.verbose) { - print("Creating new collection: " + "dbName" + "." + newName); - } - assert.commandWorked(conn.getDB(dbName).runCommand({create: newName})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Convert a random non-capped collection to a capped one with size 1MB. - */ - convertToCapped: function(conn) { - var isNotCapped = function(dbName, coll) { - return !conn.getDB(dbName)[coll].isCapped(); - }; - var coll = this.getRandomExistingCollection(conn, isNotCapped); - if (coll === null) { - return null; - } - if (this.verbose) { - print("Converting " + coll.getFullName() + " to a capped collection."); - } - assert.commandWorked(coll.runCommand({convertToCapped: coll.getName(), size: 1024 * 1024})); - if (this.verbose) { - print("done."); - } - }, - - appendOplogNote: function(conn) { - var note = "Test note " + new ObjectId().str; - if (this.verbose) { - print("Appending oplog note: " + note); - } - assert.commandWorked( - conn.getDB("admin").runCommand({appendOplogNote: note, data: {some: 'doc'}})); - if (this.verbose) { - print("done."); - } - }, - - /* - * Repeatedly call methods numOps times, choosing randomly from possibleOps, which should be - * a list of strings representing method names of the RandomOps object. - */ - doRandomWork: function(conn, numOps, possibleOps) { - for (var i = 0; i < numOps; i++) { - op = this.randomChoice(possibleOps); - try { - this[op](conn); - } catch (ex) { - print('doRandomWork - ' + op + ': failed: ' + ex); - throw ex; - } - } - } - -}; // End of RandomOps - -////////////////////////////////////////////////////////////////////////////////// -// OTHER HELPERS -////////////////////////////////////////////////////////////////////////////////// - -function isArbiter(conn) { - return conn.adminCommand({isMaster: 1}).arbiterOnly === true; -} - -function removeFromArray(elem, a) { - a.splice(a.indexOf(elem), 1); -} - -/* - * builds a function to be passed to assert.soon. Needs to know which node to expect as the new - * primary - */ -var primaryChanged = function(conns, replTest, primaryIndex) { - return function() { - return conns[primaryIndex] == replTest.getPrimary(); - }; -}; - -/* - * If we have determined a collection doesn't match on two hosts, use this to get a string of the - * differences. - */ -function getCollectionDiff(db1, db2, collName) { - var coll1 = db1[collName]; - var coll2 = db2[collName]; - var cur1 = coll1.find().sort({$natural: 1}); - var cur2 = coll2.find().sort({$natural: 1}); - var diffText = ""; - while (cur1.hasNext() && cur2.hasNext()) { - var doc1 = cur1.next(); - var doc2 = cur2.next(); - if (doc1 != doc2) { - diffText += "mismatching doc:" + tojson(doc1) + tojson(doc2); - } - } - if (cur1.hasNext()) { - diffText += db1.getMongo().host + " has extra documents:"; - while (cur1.hasNext()) { - diffText += "\n" + tojson(cur1.next()); - } - } - if (cur2.hasNext()) { - diffText += db2.getMongo().host + " has extra documents:"; - while (cur2.hasNext()) { - diffText += "\n" + tojson(cur2.next()); - } - } - return diffText; -} - -/* - * Check if two databases are equal. If not, print out what the differences are to aid with - * debugging. - */ -function assertDBsEq(db1, db2) { - assert.eq(db1.getName(), db2.getName()); - var hash1 = db1.runCommand({dbHash: 1}); - var hash2 = db2.runCommand({dbHash: 1}); - var host1 = db1.getMongo().host; - var host2 = db2.getMongo().host; - var success = true; - var collNames1 = db1.getCollectionNames(); - var collNames2 = db2.getCollectionNames(); - var diffText = ""; - if (db1.getName() === 'local') { - // We don't expect the entire local collection to be the same, not even the oplog, since - // it's a capped collection. - return; - } else if (hash1.md5 != hash2.md5) { - for (var i = 0; i < Math.min(collNames1.length, collNames2.length); i++) { - var collName = collNames1[i]; - if (collName.startsWith('system.')) { - // Skip system collections. These are not included in the dbhash before 3.3.10. - continue; - } - if (hash1.collections[collName] !== hash2.collections[collName]) { - if (db1[collName].stats().capped) { - if (!db2[collName].stats().capped) { - success = false; - diffText += - "\n" + collName + " is capped on " + host1 + " but not on " + host2; - } else { - // Skip capped collections. They are not expected to be the same from host - // to host. - continue; - } - } else { - success = false; - diffText += - "\n" + collName + " differs: " + getCollectionDiff(db1, db2, collName); - } - } - } - } - assert.eq(success, - true, - "Database " + db1.getName() + " differs on " + host1 + " and " + host2 + - "\nCollections: " + collNames1 + " vs. " + collNames2 + "\n" + diffText); -} - -/* - * Check the database hashes of all databases to ensure each node of the replica set has the same - * data. - */ -function assertSameData(primary, conns) { - var dbs = primary.getDBs().databases; - for (var i in dbs) { - var db1 = primary.getDB(dbs[i].name); - for (var j in conns) { - var conn = conns[j]; - if (!isArbiter(conn)) { - var db2 = conn.getDB(dbs[i].name); - assertDBsEq(db1, db2); - } - } - } -} - -/* - * function to pass to a thread to make it start doing random commands/CRUD operations. - */ -function startCmds(randomOps, host) { - // This test randomly generates operations, which may include direct writes against - // config.transactions, which are not allowed to run under a session. - TestData = {disableImplicitSessions: true}; - - var ops = [ - "insert", - "remove", - "update", - "renameCollection", - "dropCollection", - "createIndex", - "dropIndex", - "collMod", - "emptyCapped", - "applyOps", - "createCollection", - "convertToCapped", - "appendOplogNote" - ]; - var m = new Mongo(host); - var numOps = 200; - Random.setRandomSeed(); - randomOps.doRandomWork(m, numOps, ops); - return true; -} - -/* - * function to pass to a thread to make it start doing random CRUD operations. - */ -function startCRUD(randomOps, host) { - // This test randomly generates operations, which may include direct writes against - // config.transactions, which are not allowed to run under a session. - TestData = {disableImplicitSessions: true}; - - var m = new Mongo(host); - var numOps = 500; - Random.setRandomSeed(); - randomOps.doRandomWork(m, numOps, ["insert", "update", "remove"]); - return true; -} - -/* - * To avoid race conditions on things like trying to drop a collection while another thread is - * trying to rename it, just have one thread that might issue commands, and the others do random - * CRUD operations. To be clear, this is something that the Mongod should be able to handle, but - * this test code does not have atomic random operations. E.g. it has to first randomly select - * a collection to drop an index from, which may not be there by the time it tries to get a list - * of indices on the collection. - */ -function doMultiThreadedWork(primary, numThreads) { - var threads = []; - // The command thread - // Note we pass the hostname, as we have to re-establish the connection in the new thread. - var cmdThread = new ScopedThread(startCmds, RandomOps, primary.host); - threads.push(cmdThread); - cmdThread.start(); - // Other CRUD threads - for (var i = 1; i < numThreads; i++) { - var crudThread = new ScopedThread(startCRUD, RandomOps, primary.host); - threads.push(crudThread); - crudThread.start(); - } - for (var j = 0; j < numThreads; j++) { - assert.eq(threads[j].returnData(), true); - } -} - -////////////////////////////////////////////////////////////////////////////////// -// START ACTUAL TESTING -////////////////////////////////////////////////////////////////////////////////// - -(function() { - "use strict"; - var name = "mixed_storage_and_version"; - // Create a replica set with 2 nodes of each of the types below, plus one arbiter. - var oldVersion = "last-stable"; - var newVersion = "latest"; - - var setups = [ - {binVersion: newVersion, storageEngine: 'mmapv1'}, - {binVersion: newVersion, storageEngine: 'mmapv1'}, - {binVersion: newVersion, storageEngine: 'wiredTiger'}, - {binVersion: newVersion, storageEngine: 'wiredTiger'}, - {binVersion: oldVersion}, - {binVersion: oldVersion}, - {arbiter: true}, - ]; - var replTest = new ReplSetTest({nodes: {n0: setups[0]}, name: name}); - replTest.startSet(); - var config = replTest.getReplSetConfig(); - // Override the default value -1 in 3.5. - config.settings = {catchUpTimeoutMillis: 2000}; - replTest.initiate(config); - - // We set the featureCompatibilityVersion to lastStableFCV so that last-stable binary version - // secondaries can successfully initial sync from a latest binary version primary. We do this - // prior to adding any other members to the replica set. This effectively allows us to emulate - // upgrading some of our nodes to the latest version while different last-stable version and - // latest version mongod processes are being elected primary. - assert.commandWorked( - replTest.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - - for (let i = 1; i < setups.length; ++i) { - replTest.add(setups[i]); - } - - var newConfig = replTest.getReplSetConfig(); - config = replTest.getReplSetConfigFromNode(); - // Make sure everyone is syncing from the primary, to ensure we have all combinations of - // primary/secondary syncing. - config.members = newConfig.members; - config.settings.chainingAllowed = false; - config.version += 1; - reconfig(replTest, config); - - // Ensure all are synced. - replTest.awaitSecondaryNodes(120000); - var primary = replTest.getPrimary(); - - Random.setRandomSeed(); - - // Keep track of the indices of different types of primaries. - // We'll rotate to get a primary of each type. - var possiblePrimaries = [0, 2, 4]; - var highestPriority = 2; - while (possiblePrimaries.length > 0) { - config = primary.getDB("local").system.replset.findOne(); - var primaryIndex = RandomOps.randomChoice(possiblePrimaries); - print("TRANSITIONING to " + tojsononeline(setups[primaryIndex / 2]) + " as primary"); - // Remove chosen type from future choices. - removeFromArray(primaryIndex, possiblePrimaries); - config.members[primaryIndex].priority = highestPriority; - if (config.version === undefined) { - config.version = 2; - } else { - config.version++; - } - highestPriority++; - printjson(config); - reconfig(replTest, config); - replTest.awaitReplication(); - assert.soon(primaryChanged(replTest.nodes, replTest, primaryIndex), - "waiting for higher priority primary to be elected", - 100000); - print("New primary elected, doing a bunch of work"); - primary = replTest.getPrimary(); - doMultiThreadedWork(primary, 10); - replTest.awaitReplication(); - print("Work done, checking to see all nodes match"); - assertSameData(primary, replTest.nodes); - } - replTest.stopSet(); -})(); diff --git a/jstests/multiVersion/repair_feature_compatibility_version.js b/jstests/multiVersion/repair_feature_compatibility_version.js index a8df094f18d..8eeea0ed427 100644 --- a/jstests/multiVersion/repair_feature_compatibility_version.js +++ b/jstests/multiVersion/repair_feature_compatibility_version.js @@ -16,23 +16,6 @@ const latest = "latest"; /** - * If we're using mmapv1, we must recover the journal files from an unclean shutdown before - * attempting to run with --repair. - */ - let recoverMMapJournal = function(isMMAPv1, conn, dbpath) { - if (isMMAPv1) { - let returnCode = runMongoProgram("mongod", - "--port", - conn.port, - "--journalOptions", - /*MMAPV1Options::JournalRecoverOnly*/ 4, - "--dbpath", - dbpath); - assert.eq(returnCode, /*EXIT_NET_ERROR*/ 48); - } - }; - - /** * Ensure that a mongod (without using --repair) fails to start up if there are non-local * collections and the FCV document in the admin database has been removed. * @@ -71,13 +54,10 @@ // deleted. doStartupFailTests(latest, dbpath); - const isMMAPv1 = jsTest.options().storageEngine === "mmapv1"; - // --repair can be used to restore a missing featureCompatibilityVersion document to an existing // admin database, as long as all collections have UUIDs. The FCV should be initialized to // lastStableFCV / downgraded FCV. connection = setupMissingFCVDoc(latest, dbpath); - recoverMMapJournal(isMMAPv1, connection, dbpath); let returnCode = runMongoProgram("mongod", "--port", connection.port, "--repair", "--dbpath", dbpath); assert.eq( diff --git a/jstests/multiVersion/transitioning_to_and_from_WT.js b/jstests/multiVersion/transitioning_to_and_from_WT.js deleted file mode 100644 index f1c5717226f..00000000000 --- a/jstests/multiVersion/transitioning_to_and_from_WT.js +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Test the upgrade/downgrade process for the last stable release <~~> the latest release with both - * the mmapv1 and wiredTiger storage engines. Repeat the process with --directoryperdb set. - */ -(function() { - "use strict"; - - jsTestLog("Setting up initial data set with the last stable version of mongod"); - - var toolTest = new ToolTest('transitioning_to_and_from_WT', { - binVersion: MongoRunner.getBinVersionFor("last-stable"), - storageEngine: "mmapv1", - }); - - toolTest.dbpath = toolTest.root + "/original/"; - resetDbpath(toolTest.dbpath); - assert(mkdir(toolTest.dbpath)); - toolTest.startDB('foo'); - - // where we'll put the dump - var dumpTarget = toolTest.root + '/transitioning_to_and_from_WT_dump/'; - - // the db and collections we'll be using - var testDB = toolTest.db.getSiblingDB('test'); - var longName = "this_name_is_just_63_characters_because_that_is_the_name_limit"; - var testColl = testDB.coll; - testDB.createCollection("capped", {capped: true, size: 10000}); - var testCapped = testDB.capped; - // test database and collection lengths to make sure they work correctly in latest and with WT - var longDB = toolTest.db.getSiblingDB(longName); - var longColl = longDB.collection_name_is_lengthed_to_reach_namespace_max_of_123; - longColl.insert({x: 1}); - - // insert some documents - for (var i = 0; i < 50; i++) { - if (i < 10) { - testCapped.insert({x: i}); - } - testColl.insert({x: i}); - } - // create an index - testColl.ensureIndex({x: 1}, {name: "namedIndex"}); - - // sanity check the insert worked - var indexes = testColl.getIndexes(); - assert.eq(50, testColl.count()); - assert.eq(2, indexes.length); - assert(indexes[0].name === "namedIndex" || indexes[1].name === "namedIndex"); - assert.eq(10, testCapped.count()); - assert(testCapped.isCapped()); - assert.eq(1, longColl.count()); - - // Transition from the last stable version with mmapv1... - var modes = [ - // to the latest version with wiredTiger - { - binVersion: "latest", - storageEngine: "wiredTiger", - }, - // back to the last stable version with mmapv1 - { - binVersion: "last-stable", - storageEngine: "mmapv1", - }, - // to the latest version with mmapv1 - { - binVersion: "latest", - storageEngine: "mmapv1", - }, - // to latest version with wiredTiger - { - binVersion: "latest", - storageEngine: "wiredTiger", - }, - // back to the latest version with mmapv1 - { - binVersion: "latest", - storageEngine: "mmapv1", - }, - // to the last stable version with mmapv1 and directory per db - { - binVersion: "last-stable", - storageEngine: "mmapv1", - directoryperdb: "", - }, - // to the latest version with wiredTiger - { - binVersion: "latest", - storageEngine: "wiredTiger", - }, - // back to the last stable version with mmapv1 and directory per db - { - binVersion: "last-stable", - storageEngine: "mmapv1", - directoryperdb: "", - }, - // to latest version with mmapv1 and directory per db - { - binVersion: "latest", - storageEngine: "mmapv1", - directoryperdb: "", - }, - // to the latest with wiredTiger - { - binVersion: "latest", - storageEngine: "wiredTiger", - }, - // back to latest version with mmapv1 and directory per db - { - binVersion: "latest", - storageEngine: "mmapv1", - directoryperdb: "", - }, - ]; - - modes.forEach(function(entry, idx) { - jsTestLog("moving to: " + tojson(entry)); - // dump the data - resetDbpath(dumpTarget); - var ret = toolTest.runTool('dump', '--out', dumpTarget); - assert.eq(0, ret); - - // stop previous mongod - toolTest.stop(); - - // set up new node configuration info - toolTest.options.binVersion = MongoRunner.getBinVersionFor(entry.binVersion); - toolTest.dbpath = - toolTest.root + "/" + idx + "-" + entry.binVersion + "-" + entry.storageEngine + "/"; - - if (entry.hasOwnProperty("storageEngine")) { - toolTest.options.storageEngine = entry.storageEngine; - } - - if (entry.hasOwnProperty("directoryperdb")) { - toolTest.options.directoryperdb = entry.directoryperdb; - } - - // create the unique dbpath for this instance and start the mongod - resetDbpath(toolTest.dbpath); - assert(mkdir(toolTest.dbpath)); - toolTest.startDB('foo'); - - // refresh the db and coll reference - testDB = toolTest.db.getSiblingDB('test'); - testCapped = testDB.capped; - testColl = testDB.coll; - longDB = toolTest.db.getSiblingDB(longName); - longColl = longDB.collection_name_is_lengthed_to_reach_namespace_max_of_123; - - // ensure the new mongod was started with an empty data dir - assert.eq(0, testColl.count()); - assert.eq(0, testCapped.count()); - assert.eq(0, longColl.count()); - - // restore the data - ret = toolTest.runTool('restore', dumpTarget); - assert.eq(0, ret); - - // make sure the data was restored - assert.eq(50, testColl.count()); - indexes = testColl.getIndexes(); - assert.eq(2, indexes.length); - assert(indexes[0].name === "namedIndex" || indexes[1].name === "namedIndex"); - assert.eq(10, testCapped.count()); - assert(testCapped.isCapped()); - for (var i = 0; i < 50; i++) { - if (i < 10) { - assert.eq(1, testCapped.count({x: i})); - } - assert.eq(1, testColl.count({x: i})); - } - assert.eq(1, longColl.count()); - }); - - // success - toolTest.stop(); -}()); diff --git a/jstests/noPassthrough/directoryperdb.js b/jstests/noPassthrough/directoryperdb.js index 8561b810156..ce123ae08fb 100644 --- a/jstests/noPassthrough/directoryperdb.js +++ b/jstests/noPassthrough/directoryperdb.js @@ -1,5 +1,5 @@ /** - * Tests that a mongod started with --directoryperdb will write data for database x into a direcotry + * Tests that a mongod started with --directoryperdb will write data for database x into a directory * named x inside the dbpath. * * This test does not make sense for in-memory storage engines, since they will not produce any data @@ -13,7 +13,7 @@ var baseDir = "jstests_directoryperdb"; var dbpath = MongoRunner.dataPath + baseDir + "/"; - var isDirectoryPerDBSupported = jsTest.options().storageEngine == "mmapv1" || + var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine; var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''}); diff --git a/jstests/noPassthrough/libs/backup_restore.js b/jstests/noPassthrough/libs/backup_restore.js index e5c80ad9907..f51cab7b7d6 100644 --- a/jstests/noPassthrough/libs/backup_restore.js +++ b/jstests/noPassthrough/libs/backup_restore.js @@ -256,7 +256,7 @@ var BackupRestoreTest = function(options) { var nodes = rst.startSet(); // Initialize replica set using default timeout. This should give us sufficient time to - // allocate 1GB oplogs on slow test hosts with mmapv1. + // allocate 1GB oplogs on slow test hosts. rst.initiate(); rst.awaitNodesAgreeOnPrimary(); var primary = rst.getPrimary(); diff --git a/jstests/noPassthrough/read_only_command_line.js b/jstests/noPassthrough/read_only_command_line.js deleted file mode 100644 index 1c88a61c50b..00000000000 --- a/jstests/noPassthrough/read_only_command_line.js +++ /dev/null @@ -1,25 +0,0 @@ -// Test that setting readOnly mode on the command line causes readOnly to be properly set in both -// isMaster and serverStatus output. -// -// This test requires mmapv1. -// @tags: [requires_mmapv1] -(function() { - "use strict"; - - // TODO: use configured storageEngine from testData once wiredTiger supports readOnly mode. - var mongod = MongoRunner.runMongod({storageEngine: "mmapv1"}); - var dbpath = mongod.dbpath; - - // ensure dbpath gets set up. - assert.writeOK(mongod.getDB("foo").x.insert({x: 1})); - - assert(!mongod.getDB("admin").isMaster().readOnly); - assert(!mongod.getDB("admin").serverStatus().storageEngine.readOnly); - MongoRunner.stopMongod(mongod); - - mongod = MongoRunner.runMongod( - {storageEngine: "mmapv1", queryableBackupMode: "", dbpath: dbpath, noCleanData: true}); - assert(mongod.getDB("admin").isMaster().readOnly); - assert(mongod.getDB("admin").serverStatus().storageEngine.readOnly); - MongoRunner.stopMongod(mongod); -}()); diff --git a/jstests/noPassthrough/retryable_writes_mmap.js b/jstests/noPassthrough/retryable_writes_mmap.js deleted file mode 100644 index 7efac46e625..00000000000 --- a/jstests/noPassthrough/retryable_writes_mmap.js +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Verify that retryable writes aren't allowed on mmapv1, because it doesn't have document-level - * locking. - * @tags: [requires_sharding] - */ -(function() { - "use strict"; - - if (jsTest.options().storageEngine !== "mmapv1") { - jsTestLog("Storage engine is not mmapv1, skipping test"); - return; - } - - const rst = new ReplSetTest({nodes: 1}); - rst.startSet(); - rst.initiate(); - - let testDB = rst.getPrimary().startSession({retryWrites: true}).getDatabase("test"); - - assert.commandFailedWithCode( - testDB.foo.insert({x: 1}), - ErrorCodes.IllegalOperation, - "expected command with txnNumber to fail without document-level locking"); - - rst.stopSet(); - - const st = new ShardingTest({shards: {rs0: {nodes: 1}}}); - - testDB = st.s.startSession({retryWrites: true}).getDatabase("test"); - - assert.commandFailedWithCode( - testDB.foo.insert({x: 1}), - ErrorCodes.IllegalOperation, - "expected command with txnNumber to fail without document-level locking"); - - st.stop(); -}()); diff --git a/jstests/noPassthrough/server_status.js b/jstests/noPassthrough/server_status.js deleted file mode 100644 index d19621c07de..00000000000 --- a/jstests/noPassthrough/server_status.js +++ /dev/null @@ -1,42 +0,0 @@ -// Checks storage-engine specific sections of db.severStatus() output. - -(function() { - 'use strict'; - - // 'backgroundFlushing' is mmapv1-specific. - var mongo = MongoRunner.runMongod({smallfiles: ""}); - var testDB = mongo.getDB('test'); - var serverStatus = assert.commandWorked(testDB.serverStatus()); - if (serverStatus.storageEngine.name == 'mmapv1') { - assert(serverStatus.backgroundFlushing, - 'mmapv1 db.serverStatus() result must contain backgroundFlushing document: ' + - tojson(serverStatus)); - } else { - assert(!serverStatus.backgroundFlushing, - 'Unexpected backgroundFlushing document in non-mmapv1 db.serverStatus() result: ' + - tojson(serverStatus)); - } - MongoRunner.stopMongod(mongo); - - // 'dur' is mmapv1-specific and should only be generated when journaling is enabled. - mongo = MongoRunner.runMongod({smallfiles: "", journal: ""}); - testDB = mongo.getDB('test'); - serverStatus = assert.commandWorked(testDB.serverStatus()); - if (serverStatus.storageEngine.name == 'mmapv1') { - assert( - serverStatus.dur, - 'mmapv1 db.serverStatus() result must contain "dur" document: ' + tojson(serverStatus)); - } else { - assert(!serverStatus.dur, - 'Unexpected "dur" document in non-mmapv1 db.serverStatus() result: ' + - tojson(serverStatus)); - } - MongoRunner.stopMongod(mongo); - mongo = MongoRunner.runMongod({smallfiles: "", nojournal: ""}); - testDB = mongo.getDB('test'); - serverStatus = assert.commandWorked(testDB.serverStatus()); - assert(!serverStatus.dur, - 'Unexpected "dur" document in db.serverStatus() result when journaling is disabled: ' + - tojson(serverStatus)); - MongoRunner.stopMongod(mongo); -}()); diff --git a/jstests/noPassthrough/utf8_paths.js b/jstests/noPassthrough/utf8_paths.js index 0487c3c2c15..49cb5a63bac 100644 --- a/jstests/noPassthrough/utf8_paths.js +++ b/jstests/noPassthrough/utf8_paths.js @@ -16,9 +16,8 @@ pidfilepath: path + "/pidfile", }; - // directoryperdb is only supported with the wiredTiger, and mmapv1 storage engines - if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger" || - jsTest.options().storageEngine === "mmapv1") { + // directoryperdb is only supported with the wiredTiger storage engine + if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") { options["directoryperdb"] = ""; } diff --git a/jstests/parallel/update_serializability2.js b/jstests/parallel/update_serializability2.js index c2672491177..f28ddf9bd5a 100644 --- a/jstests/parallel/update_serializability2.js +++ b/jstests/parallel/update_serializability2.js @@ -1,4 +1,4 @@ -function test() { +(function() { "use strict"; var t = db.update_serializability1; t.drop(); @@ -24,10 +24,4 @@ function test() { // both operations should happen on every document assert.eq(N, t.find({x: 2, y: 2}).count()); -} - -if (db.serverStatus().storageEngine.name == 'mmapv1') { - jsTest.log('skipping test on mmapv1'); // This is only guaranteed on other engines. -} else { - test(); -} +})(); diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js deleted file mode 100644 index 3567e52925e..00000000000 --- a/jstests/replsets/config_server_checks.js +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Tests various combinations of the configsvr field in replica set configurations and the - * command line options that control whether a node can function as a member of a CSRS. - * - * This test requires mmapv1. - * @tags: [requires_mmapv1] - */ - -function expectState(rst, state) { - assert.soon(function() { - var status = rst.status(); - if (status.myState != state) { - print("Waiting for state " + state + " in replSetGetStatus output: " + tojson(status)); - } - return status.myState == state; - }); -} - -(function() { - "use strict"; - - (function() { - // Test that node with --configsvr cmd line and configsvr in replset config goes - // into REMOVED state if storage engine is not WiredTiger - jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1"); - var rst = new ReplSetTest({ - name: "configrs3", - nodes: 1, - nodeOptions: {configsvr: "", journal: "", storageEngine: "mmapv1"} - }); - - rst.startSet(); - var conf = rst.getReplSetConfig(); - conf.configsvr = true; - try { - rst.nodes[0].adminCommand({replSetInitiate: conf}); - } catch (e) { - // expected since we close all connections after going into REMOVED - } - expectState(rst, ReplSetTest.State.REMOVED); - rst.stopSet(); - })(); - - (function() { - // Test that node with --configsvr cmd line and configsvr in replset config and using - // wiredTiger - // does NOT go into REMOVED state. - jsTestLog("configsvr in rs config and --configsvr cmd line, normal case"); - var rst = new ReplSetTest({ - name: "configrs5", - nodes: 1, - nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} - }); - - rst.startSet(); - var conf = rst.getReplSetConfig(); - conf.configsvr = true; - assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - - rst.getPrimary(); - expectState(rst, ReplSetTest.State.PRIMARY); - - var conf = rst.getPrimary().getDB('local').system.replset.findOne(); - assert(conf.configsvr, tojson(conf)); - - rst.stopSet(); - })(); - - (function() { - // Test that node with --configsvr cmd line and initiated with an empty replset config - // will result in configsvr:true getting automatically added to the config (SERVER-20247). - jsTestLog("--configsvr cmd line, empty config to replSetInitiate"); - var rst = new ReplSetTest({ - name: "configrs6", - nodes: 1, - nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} - }); - - rst.startSet(); - assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1})); - - rst.getPrimary(); - expectState(rst, ReplSetTest.State.PRIMARY); - rst.stopSet(); - })(); - - (function() { - // Test that a set initialized without --configsvr but then restarted with --configsvr will - // fail to start up and won't automatically add "configsvr" to the replset config - // (SERVER-21236). - jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line"); - var rst = new ReplSetTest( - {name: "configrs7", nodes: 1, nodeOptions: {journal: "", storageEngine: "wiredTiger"}}); - - rst.startSet(); - var conf = rst.getReplSetConfig(); - assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - - rst.getPrimary(); - expectState(rst, ReplSetTest.State.PRIMARY); - - var node = rst.nodes[0]; - var options = node.savedOptions; - options.configsvr = ""; - options.noCleanData = true; - options.waitForConnect = false; - - MongoRunner.stopMongod(node); - - var mongod = MongoRunner.runMongod(options); - var exitCode = waitProgram(mongod.pid); - assert.eq( - MongoRunner.EXIT_ABRUPT, exitCode, "Mongod should have failed to start, but didn't"); - - rst.stopSet(); - })(); - - (function() { - // Test that a set initialized with --configsvr but then restarted without --configsvr will - // fail to start up. - jsTestLog("set initiated with configsvr, restarted without --configsvr cmd line"); - var rst = new ReplSetTest({ - name: "configrs8", - nodes: 1, - nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"} - }); - - rst.startSet(); - var conf = rst.getReplSetConfig(); - conf.configsvr = true; - assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf})); - - rst.getPrimary(); - expectState(rst, ReplSetTest.State.PRIMARY); - - var node = rst.nodes[0]; - var options = node.savedOptions; - delete options.configsvr; - options.noCleanData = true; - options.waitForConnect = false; - - MongoRunner.stopMongod(node); - - var mongod = MongoRunner.runMongod(options); - var exitCode = waitProgram(mongod.pid); - assert.eq( - MongoRunner.EXIT_ABRUPT, exitCode, "Mongod should have failed to start, but didn't"); - - rst.stopSet(); - })(); - -})(); diff --git a/jstests/replsets/double_rollback.js b/jstests/replsets/double_rollback.js deleted file mode 100644 index b62226d066b..00000000000 --- a/jstests/replsets/double_rollback.js +++ /dev/null @@ -1,150 +0,0 @@ -/* - * This test causes node 2 to enter rollback, reach the common point, and exit rollback, but before - * it can apply operations to bring it back to a consistent state, switch sync sources to the node - * that originally gave it the ops it is now rolling back (node 0). This test then verifies that - * node 2 refuses to use node 0 as a sync source because it doesn't contain the minValid document - * it needs to reach consistency. Node 2 is then allowed to reconnect to the node it was - * originally rolling back against (node 1) and finish its rollback. This is a regression test - * against the case where we *did* allow node 2 to sync from node 0 which gave it the very ops - * it rolled back, which could then lead to a double-rollback when node 2 was reconnected - * to node 1 and tried to apply its oplog despite not being in a consistent state. - */ - -// Rollback to a stable timestamp does not set a minValid and should be able to sync from any node. -// @tags: [requires_mmapv1] - -(function() { - 'use strict'; - - load("jstests/libs/check_log.js"); - load("jstests/replsets/rslib.js"); - - var name = "double_rollback"; - var dbName = "test"; - var collName = "double_rollback"; - - var rst = new ReplSetTest({ - name: name, - nodes: [ - {}, - {}, - {rsConfig: {priority: 0}}, - {rsConfig: {arbiterOnly: true}}, - {rsConfig: {arbiterOnly: true}} - ], - useBridge: true - }); - var nodes = rst.startSet(); - rst.initiate(); - - // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct - // size for faster startup, so nodes[0] is always the first primary. - jsTestLog("Make sure node 0 is primary."); - assert.eq(nodes[0], rst.getPrimary()); - // Wait for all data bearing nodes to get up to date. - assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert( - {a: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); - - jsTestLog("Create two partitions: [1] and [0,2,3,4]."); - nodes[1].disconnect(nodes[0]); - nodes[1].disconnect(nodes[2]); - nodes[1].disconnect(nodes[3]); - nodes[1].disconnect(nodes[4]); - - jsTestLog("Do a write that is replicated to [0,2,3,4]."); - assert.writeOK(nodes[0].getDB(dbName).getCollection(collName + "2").insert({a: 2}, { - writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS} - })); - - jsTestLog("Repartition to: [0,2] and [1,3,4]."); - nodes[1].reconnect(nodes[3]); - nodes[1].reconnect(nodes[4]); - nodes[3].disconnect(nodes[0]); - nodes[3].disconnect(nodes[2]); - nodes[4].disconnect(nodes[0]); - nodes[4].disconnect(nodes[2]); - - jsTestLog("Ensure that 0 steps down and that 1 becomes primary."); - waitForState(nodes[0], ReplSetTest.State.SECONDARY); - waitForState(nodes[1], ReplSetTest.State.PRIMARY); - assert.eq(nodes[1], rst.getPrimary()); - - jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition."); - assert.writeOK(nodes[1].getDB(dbName).getCollection(collName + "3").insert({a: 3})); - - // Turn on failpoint on node 2 to pause rollback after oplog is truncated and minValid is set. - assert.commandWorked(nodes[2].getDB('admin').runCommand( - {configureFailPoint: 'rollbackHangBeforeFinish', mode: 'alwaysOn'})); - - jsTestLog("Repartition to: [0] and [1,2,3,4]."); - nodes[2].disconnect(nodes[0]); - nodes[2].reconnect(nodes[1]); - nodes[2].reconnect(nodes[3]); - nodes[2].reconnect(nodes[4]); - - jsTestLog("Wait for node 2 to go into ROLLBACK and start syncing from node 1."); - // Since nodes 1 and 2 have now diverged, node 2 should go into rollback. - waitForState(nodes[2], ReplSetTest.State.ROLLBACK); - rst.awaitSyncSource(nodes[2], nodes[1]); - - jsTestLog("Wait for failpoint on node 2 to pause rollback before it finishes"); - // Wait for fail point message to be logged. - checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeFinish fail point enabled'); - - jsTestLog("Repartition to: [1,3,4] and [0,2]."); - nodes[2].disconnect(nodes[1]); - nodes[2].reconnect(nodes[0]); - - // Turn off failpoint on node 2 to allow rollback to finish. - assert.commandWorked(nodes[2].getDB('admin').runCommand( - {configureFailPoint: 'rollbackHangBeforeFinish', mode: 'off'})); - - jsTestLog("Wait for node 2 exit ROLLBACK state and go into RECOVERING"); - waitForState(nodes[2], ReplSetTest.State.RECOVERING); - - // At this point node 2 has truncated its oplog back to the common point and is looking - // for a sync source it can use to reach minvalid and get back into SECONDARY state. Node 0 - // is the only node it can reach, but since node 0 doesn't contain node 2's minvalid oplog entry - // node 2 will refuse to use it as a sync source. - checkLog.contains( - nodes[2], "remote oplog does not contain entry with optime matching our required optime"); - - // Ensure our connection to node 0 is re-established, since our - // original connection should have gotten killed after node 0 stepped down. - reconnect(nodes[0]); - - var node0RBID = assert.commandWorked(nodes[0].adminCommand('replSetGetRBID')).rbid; - var node1RBID = assert.commandWorked(nodes[1].adminCommand('replSetGetRBID')).rbid; - - jsTestLog("Reconnect all nodes."); - nodes[0].reconnect(nodes[1]); - nodes[0].reconnect(nodes[3]); - nodes[0].reconnect(nodes[4]); - nodes[2].reconnect(nodes[1]); - nodes[2].reconnect(nodes[3]); - nodes[2].reconnect(nodes[4]); - - jsTestLog("Wait for nodes 0 to roll back and both node 0 and 2 to catch up to node 1"); - waitForState(nodes[0], ReplSetTest.State.SECONDARY); - waitForState(nodes[2], ReplSetTest.State.SECONDARY); - rst.awaitReplication(); - - // Ensure our connection to node 0 is re-established, since our connection should have gotten - // killed during node 0's transition to ROLLBACK. - reconnect(nodes[0]); - - // Check that rollback happened on node 0, but not on node 2 since it had already rolled back - // and just needed to finish applying ops to reach minValid. - assert.neq(node0RBID, assert.commandWorked(nodes[0].adminCommand('replSetGetRBID')).rbid); - assert.eq(node1RBID, assert.commandWorked(nodes[1].adminCommand('replSetGetRBID')).rbid); - - // Node 1 should still be primary, and should now be able to satisfy majority writes again. - assert.writeOK(nodes[1].getDB(dbName).getCollection(collName + "4").insert({a: 4}, { - writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS} - })); - - // Verify data consistency between nodes. - rst.checkReplicatedDataHashes(); - rst.checkOplogs(); - rst.stopSet(); -}()); diff --git a/jstests/replsets/double_rollback_early.js b/jstests/replsets/double_rollback_early.js deleted file mode 100644 index 85b07da7a72..00000000000 --- a/jstests/replsets/double_rollback_early.js +++ /dev/null @@ -1,158 +0,0 @@ -/* SERVER-27050 This test causes node 2 to enter rollback, then fail after setting minValid, but - * before truncating the oplog. It will then choose the same sync source (1) and retry the rollback. - * The upstream node itself rolls back at this point. Node 2 should detect this case and fail the - * rollback and refuse to choose node 1 as its sync source because it doesn't have the minValid. - */ - -// Rollback to a stable timestamp does not set a minValid and should be able to sync from any node. -// @tags: [requires_mmapv1] - -(function() { - 'use strict'; - - // Skip db hash check because replset is partitioned. - TestData.skipCheckDBHashes = true; - - load("jstests/libs/check_log.js"); - load("jstests/replsets/rslib.js"); - - var collName = "test.coll"; - var counter = 0; - - var rst = new ReplSetTest({ - nodes: [ - {}, - {}, - {rsConfig: {priority: 0}}, - {rsConfig: {arbiterOnly: true}}, - {rsConfig: {arbiterOnly: true}} - ], - useBridge: true - }); - var nodes = rst.startSet(); - rst.initiate(); - - function stepUp(rst, node) { - var primary = rst.getPrimary(); - if (primary != node) { - try { - assert.commandWorked(primary.adminCommand({replSetStepDown: 1, force: true})); - } catch (ex) { - print("Caught exception while stepping down from node '" + tojson(node.host) + - "': " + tojson(ex)); - } - } - waitForState(node, ReplSetTest.State.PRIMARY); - } - - jsTestLog("Make sure node 0 is primary."); - stepUp(rst, nodes[0]); - assert.eq(nodes[0], rst.getPrimary()); - // Wait for all data bearing nodes to get up to date. - assert.writeOK(nodes[0].getCollection(collName).insert( - {a: counter++}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMs}})); - - jsTestLog("Create two partitions: [1] and [0,2,3,4]."); - nodes[1].disconnect(nodes[0]); - nodes[1].disconnect(nodes[2]); - nodes[1].disconnect(nodes[3]); - nodes[1].disconnect(nodes[4]); - - jsTestLog("Do a write that is replicated to [0,2,3,4]."); - assert.writeOK(nodes[0].getCollection(collName).insert( - {a: counter++}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMs}})); - - jsTestLog("Repartition to: [0,2] and [1,3,4]."); - nodes[1].reconnect(nodes[3]); - nodes[1].reconnect(nodes[4]); - nodes[3].disconnect(nodes[0]); - nodes[3].disconnect(nodes[2]); - nodes[4].disconnect(nodes[0]); - nodes[4].disconnect(nodes[2]); - - jsTestLog("Ensure that 0 steps down and that 1 becomes primary."); - waitForState(nodes[0], ReplSetTest.State.SECONDARY); - waitForState(nodes[1], ReplSetTest.State.PRIMARY); - assert.eq(nodes[1], rst.getPrimary()); - - jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition."); - // We track this object to ensure it gets rolled back on node 1 later. - assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++, tracked: true})); - - // Turn on failpoint on node 2 to pause rollback after oplog is truncated and minValid is set. - assert.commandWorked(nodes[2].adminCommand( - {configureFailPoint: 'rollbackHangThenFailAfterWritingMinValid', mode: 'alwaysOn'})); - - jsTestLog("Repartition to: [0] and [1,2,3,4]."); - nodes[2].disconnect(nodes[0]); - nodes[2].reconnect(nodes[1]); - nodes[2].reconnect(nodes[3]); - nodes[2].reconnect(nodes[4]); - - jsTestLog("Wait for node 2 to go into ROLLBACK and start syncing from node 1."); - // Since nodes 1 and 2 have now diverged, node 2 should go into rollback. - waitForState(nodes[2], ReplSetTest.State.ROLLBACK); - rst.awaitSyncSource(nodes[2], nodes[1]); - - jsTestLog("Wait for failpoint on node 2 to pause rollback after it writes minValid"); - // Wait for fail point message to be logged. - checkLog.contains(nodes[2], - 'rollback - rollbackHangThenFailAfterWritingMinValid fail point enabled'); - - // Switch failpoints, causing rollback to fail then pause when it retries. It is important to - // enable the new one before disabling the current one. - assert.commandWorked( - nodes[2].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'})); - assert.commandWorked(nodes[2].adminCommand( - {configureFailPoint: 'rollbackHangThenFailAfterWritingMinValid', mode: 'off'})); - jsTestLog("Wait for failpoint on node 2 to pause rollback after it restarts"); - // Wait for fail point message to be logged. - checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeStart fail point enabled'); - - jsTestLog("Repartition to: [0,3,4] and [1,2]."); - nodes[3].disconnect(nodes[1]); - nodes[3].reconnect(nodes[0]); - nodes[4].disconnect(nodes[1]); - nodes[4].reconnect(nodes[0]); - - jsTestLog("Ensure that 0 becomes primary."); - waitForState(nodes[0], ReplSetTest.State.PRIMARY); - waitForState(nodes[1], ReplSetTest.State.SECONDARY); - assert.eq(nodes[0], rst.getPrimary()); - // Do a write so that node 0 is definitely ahead of node 1. - assert.writeOK(nodes[0].getCollection(collName).insert({a: counter++})); - - jsTestLog("Repartition to: [0,1,3,4] and [2] so 1 rolls back and replicates from 0."); - assert.eq(nodes[1].getCollection(collName).count({tracked: true}), 1); - nodes[1].reconnect(nodes[0]); - waitForState(nodes[1], ReplSetTest.State.SECONDARY); - jsTestLog("w:2 write to node 0"); - assert.writeOK(nodes[0].getCollection(collName).insert( - {a: counter++}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMs}})); - reconnect(nodes[1]); // rollback drops connections. - assert.eq(nodes[1].getCollection(collName).count({tracked: true}), 0); - - // Turn off failpoint on node 2 to allow rollback to finish its attempt at rollback from node 1. - // It should fail with a rbid error and get stuck. - jsTestLog("Repartition to: [0,3,4] and [1,2]."); - nodes[1].reconnect(nodes[2]); - assert.adminCommandWorkedAllowingNetworkError( - nodes[2], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'}); - - jsTestLog("Wait for node 2 exit ROLLBACK state and go into RECOVERING"); - waitForState(nodes[2], ReplSetTest.State.RECOVERING); - - // At this point node 2 has truncated its oplog back to the common point and is looking - // for a sync source it can use to reach minvalid and get back into SECONDARY state. Node 1 - // is the only node it can reach, but since node 1 doesn't contain node 2's minvalid oplog entry - // node 2 will refuse to use it as a sync source. - checkLog.contains(nodes[2], "Upstream node rolled back. Need to retry our rollback."); - waitForState(nodes[2], ReplSetTest.State.RECOVERING); - - // This log message means that it will not be willing to use node 1 as the sync source when it - // retries. - checkLog.contains( - nodes[2], "remote oplog does not contain entry with optime matching our required optime"); - - rst.stopSet(); -}()); diff --git a/jstests/replsets/mmap_disallows_rc_majority.js b/jstests/replsets/mmap_disallows_rc_majority.js deleted file mode 100644 index 59b5c9fdd4a..00000000000 --- a/jstests/replsets/mmap_disallows_rc_majority.js +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Starting in MongoDB v3.6, `--enableMajorityReadConcern` is always on. Previously the startup - * option parsing would disallow the flag being set with a storage engine that does not support - * the feature. This has now become a runtime check on every query that requests the majority read - * concern. - * - * This test makes sure an MMAP replica set node will return an error when asked to respond to a - * read concern majority request. - * - * This test requires mmapv1, but does not rely on the tag to ensure this. - */ -(function() { - "use strict"; - - { - var testServer = MongoRunner.runMongod(); - if (testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) { - jsTest.log("skipping test since storage engine supports committed reads"); - MongoRunner.stopMongod(testServer); - return; - } - MongoRunner.stopMongod(testServer); - } - - let numNodes = 2; - let rst = new ReplSetTest({name: "mmap_disallows_rc_majority", nodes: numNodes}); - rst.startSet(); - rst.initiate(); - - let collName = "test.foo"; - assert.writeOK( - rst.getPrimary().getCollection(collName).insert({}, {writeConcern: {w: numNodes}})); - - assert.throws(function() { - rst.getPrimary().getCollection(collName).findOne({}, {}, {}, "majority"); - }, [], "Expected `findOne` to throw an exception on the failed {readConcern: majority}"); - assert.throws(function() { - rst.getSecondary().getCollection(collName).findOne({}, {}, {}, "majority"); - }, [], "Expected `findOne` to throw an exception on the failed {readConcern: majority}"); - - rst.stopSet(); -})(); diff --git a/jstests/replsets/oplog_replay_on_startup.js b/jstests/replsets/oplog_replay_on_startup.js deleted file mode 100644 index 987505ac58a..00000000000 --- a/jstests/replsets/oplog_replay_on_startup.js +++ /dev/null @@ -1,321 +0,0 @@ -// SERVER-7200 On startup, replica set nodes delete oplog state past the oplog delete point and -// apply any remaining unapplied ops before coming up as a secondary. -// -// This test requires mmapv1 because rollback to a stable timestamp does not allow arbitrary -// writes to the minValid document. This has been replaced by unittests. -// @tags: [requires_persistence, requires_mmapv1] -(function() { - "use strict"; - - var ns = "test.coll"; - - var rst = new ReplSetTest({ - nodes: 1, - }); - - rst.startSet(); - rst.initiate(); - - var conn = rst.getPrimary(); // Waits for PRIMARY state. - var nojournal = Array.contains(conn.adminCommand({getCmdLineOpts: 1}).argv, '--nojournal'); - var storageEngine = jsTest.options().storageEngine; - var term = conn.getCollection('local.oplog.rs').find().sort({$natural: -1}).limit(1).next().t; - - function runTest({ - oplogEntries, - collectionContents, - deletePoint, - begin, - minValid, - expectedState, - expectedApplied, - }) { - if (nojournal && (storageEngine === 'mmapv1') && expectedState === 'FATAL') { - // We can't test fatal states on mmap without a journal because it won't be able - // to start up again. - return; - } - - if (term != -1) { - term++; // Each test gets a new term on PV1 to ensure OpTimes always move forward. - } - - conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node. - assert.neq(null, conn, "failed to restart"); - var oplog = conn.getCollection('local.oplog.rs'); - var minValidColl = conn.getCollection('local.replset.minvalid'); - var oplogTruncateAfterColl = conn.getCollection('local.replset.oplogTruncateAfterPoint'); - var coll = conn.getCollection(ns); - - // Reset state to empty. - assert.commandWorked(oplog.runCommand('emptycapped')); - coll.drop(); - assert.commandWorked(coll.runCommand('create')); - - var ts = (num) => num === null ? Timestamp() : Timestamp(1000, num); - - oplogEntries.forEach((num) => { - assert.writeOK(oplog.insert({ - ts: ts(num), - t: NumberLong(term), - h: NumberLong(1), - op: 'i', - ns: ns, - v: 2, - o: {_id: num}, - })); - }); - - collectionContents.forEach((num) => { - assert.writeOK(coll.insert({_id: num})); - }); - - var injectedMinValidDoc = { - _id: ObjectId(), - - // appliedThrough - begin: { - ts: ts(begin), - t: NumberLong(term), - }, - - // minvalid: - t: NumberLong(term), - ts: ts(minValid), - }; - - var injectedOplogTruncateAfterPointDoc = { - _id: "oplogTruncateAfterPoint", - oplogTruncateAfterPoint: ts(deletePoint) - }; - - // This weird mechanism is the only way to bypass mongod's attempt to fill in null - // Timestamps. - assert.writeOK(minValidColl.remove({})); - assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true})); - assert.eq(minValidColl.findOne(), - injectedMinValidDoc, - "If the Timestamps differ, the server may be filling in the null timestamps"); - - assert.writeOK(oplogTruncateAfterColl.remove({})); - assert.writeOK(oplogTruncateAfterColl.update( - {}, {$set: injectedOplogTruncateAfterPointDoc}, {upsert: true})); - assert.eq(oplogTruncateAfterColl.findOne(), - injectedOplogTruncateAfterPointDoc, - "If the Timestamps differ, the server may be filling in the null timestamps"); - - rst.stop(0); - - if (expectedState === 'FATAL') { - try { - rst.start(0, {waitForConnect: true}, true); - } catch (e) { - } - rst.stop(0, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT}); - return; - } else { - conn = rst.start(0, {waitForConnect: true}, true); - } - - // Wait for the node to go to SECONDARY if it is able. - assert.soon( - () => - conn.adminCommand('serverStatus').metrics.repl.apply.attemptsToBecomeSecondary > 0, - () => conn.adminCommand('serverStatus').metrics.repl.apply.attemptsToBecomeSecondary); - - var isMaster = conn.adminCommand('ismaster'); - switch (expectedState) { - case 'SECONDARY': - // Primary is also acceptable since once a node becomes secondary, it will try to - // become primary if it is eligible and has enough votes (which this node does). - // This is supposed to test that we reach secondary, not that we stay there. - assert(isMaster.ismaster || isMaster.secondary, - 'not PRIMARY or SECONDARY: ' + tojson(isMaster)); - break; - - case 'RECOVERING': - assert(!isMaster.ismaster && !isMaster.secondary, - 'not in RECOVERING: ' + tojson(isMaster)); - - // Restart as a standalone node again so we can read from the collection. - conn = rst.restart(0, {noReplSet: true}); - break; - - case 'FATAL': - doassert("server startup didn't fail when it should have"); - break; - - default: - doassert(`expectedState ${expectedState} is not supported`); - } - - // Ensure the oplog has the entries it should have and none that it shouldn't. - assert.eq(conn.getCollection('local.oplog.rs') - .find({ns: ns, op: 'i'}) - .sort({$natural: 1}) - .map((op) => op.o._id), - expectedApplied); - - // Ensure that all ops that should have been applied were. - conn.setSlaveOk(true); - assert.eq(conn.getCollection(ns).find().sort({_id: 1}).map((obj) => obj._id), - expectedApplied); - } - - // - // Normal 3.4 cases - // - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: null, - minValid: null, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: null, - minValid: 2, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: null, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: 3, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3], - deletePoint: 4, - begin: 3, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5, 6], - collectionContents: [1, 2, 3], - deletePoint: 4, - begin: 3, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3, /*4,*/ 5, 6], - collectionContents: [1, 2, 3], - deletePoint: 4, - begin: 3, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5, 6], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: 3, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3, 4, 5, 6], - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5, 6], - collectionContents: [1, 2, 3], - deletePoint: null, - begin: 3, - minValid: 6, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3, 4, 5, 6], - }); - - // - // These states should be impossible to get into. - // - - runTest({ - oplogEntries: [1, 2, 3], - collectionContents: [1, 2, 3, 4], - deletePoint: null, - begin: 4, - minValid: null, // doesn't matter. - expectedState: 'FATAL', - }); - - runTest({ - oplogEntries: [4, 5, 6], - collectionContents: [1, 2], - deletePoint: 2, - begin: 3, - minValid: null, // doesn't matter. - expectedState: 'FATAL', - }); - - runTest({ - oplogEntries: [4, 5, 6], - collectionContents: [1, 2], - deletePoint: null, - begin: 3, - minValid: null, // doesn't matter. - expectedState: 'FATAL', - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5, 6], - collectionContents: [1, 2, 3, 4, 5], - deletePoint: null, - begin: 5, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3, 4, 5, 6], - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5, 6], - collectionContents: [1, 2, 3, 4, 5], - deletePoint: null, - begin: 5, - minValid: null, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3, 4, 5, 6], - }); - - runTest({ - oplogEntries: [1, 2, 3, 4, 5], - collectionContents: [1], - deletePoint: 4, - begin: 1, - minValid: 3, - expectedState: 'SECONDARY', - expectedApplied: [1, 2, 3], - }); - - rst.stopSet(); -})(); diff --git a/jstests/replsets/oplog_replay_on_startup_update_and_delete.js b/jstests/replsets/oplog_replay_on_startup_update_and_delete.js deleted file mode 100644 index a7e90bd405e..00000000000 --- a/jstests/replsets/oplog_replay_on_startup_update_and_delete.js +++ /dev/null @@ -1,50 +0,0 @@ -// SERVER-7200 On startup, replica set nodes delete oplog state past the oplog delete point and -// apply any remaining unapplied ops before coming up as a secondary. This test specifically tests -// having an update and a delete of the same document in the same batch. This is a bit of an edge -// case because if the delete has been applied already, the update won't find any documents. -// -// This test requires mmapv1 because rollback to a stable timestamp does not allow arbitrary -// writes to the minValid document. This has been replaced by unittests. -// @tags: [requires_persistence, requires_mmapv1] -(function() { - "use strict"; - - var ns = "test.coll"; - var id = ObjectId(); - - var rst = new ReplSetTest({ - nodes: 1, - }); - - rst.startSet(); - rst.initiate(); - - var conn = rst.getPrimary(); // Waits for PRIMARY state. - - // Do the insert update and delete operations. - var coll = conn.getCollection(ns); - assert.writeOK(coll.insert({_id: id})); - assert.writeOK(coll.update({_id: id}, {$inc: {a: 1}})); - assert.writeOK(coll.remove({_id: id})); - assert.eq(coll.findOne({_id: id}), null); - - // Set the appliedThrough point back to the insert so the update and delete are replayed. - conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node. - assert.neq(null, conn, "failed to restart"); - var oplog = conn.getCollection('local.oplog.rs'); - oplog.find().forEach(printjsononeline); - assert.eq(oplog.count({ns: ns, op: 'i'}), 1); - var insertOp = oplog.findOne({ns: ns, op: 'i'}); - var term = 't' in insertOp ? insertOp.t : -1; - var minValidColl = conn.getCollection('local.replset.minvalid'); - assert.writeOK(minValidColl.update({}, {$set: {begin: {ts: insertOp.ts, t: term}}})); - printjson({minValidDoc: minValidColl.findOne()}); - - // Make sure it starts up fine again and doesn't have the document. - conn = rst.restart(0); // Restart in replSet mode again. - conn = rst.getPrimary(); // Waits for PRIMARY state. - coll = conn.getCollection(ns); - assert.eq(coll.findOne({_id: id}), null); - - rst.stopSet(); -})(); diff --git a/jstests/replsets/oplog_truncated_on_recovery.js b/jstests/replsets/oplog_truncated_on_recovery.js deleted file mode 100644 index 20b2e325c64..00000000000 --- a/jstests/replsets/oplog_truncated_on_recovery.js +++ /dev/null @@ -1,112 +0,0 @@ -/** - * This test will ensure that recovery from a failed batch application will remove the oplog - * entries from that batch. - * - * To do this we: - * -- Create single node replica set - * -- Set minvalid manually on primary way ahead (5 days) - * -- Write some oplog entries newer than minvalid.start - * -- Ensure restarted primary comes up in recovering and truncates the oplog - * -- Success! - * - * This test requires persistence for two reasons: - * 1. To test that a restarted primary will stay in the RECOVERING state when minvalid is set to - * the future. An ephemeral storage engine will not have a minvalid after a restart, so the node - * will start an initial sync in this scenario, invalidating the test. - * 2. It uses a single node replica set, which cannot be restarted in any meaningful way with an - * ephemeral storage engine. - * - * This test requires mmapv1 because rollback to a stable timestamp does not allow arbitrary - * writes to the minValid document. This has been replaced by unittests. - * @tags: [requires_persistence, requires_mmapv1] - */ -(function() { - "use strict"; - - function tsToDate(ts) { - return new Date(ts.getTime() * 1000); - } - - function log(arg) { - jsTest.log(tojson(arg)); - } - - var replTest = new ReplSetTest({name: "oplog_truncated_on_recovery", nodes: 1}); - - var nodes = replTest.startSet(); - replTest.initiate(); - var master = replTest.getPrimary(); - var testDB = master.getDB("test"); - var localDB = master.getDB("local"); - var minvalidColl = localDB["replset.minvalid"]; - var oplogTruncateAfterColl = localDB["replset.oplogTruncateAfterPoint"]; - - // Write op - log(assert.writeOK(testDB.foo.save({_id: 1, a: 1}, {writeConcern: {w: 1}}))); - - // Set minvalid to something far in the future for the current primary, to simulate recovery. - // Note: This is so far in the future (5 days) that it will never become secondary. - var farFutureTS = new Timestamp( - Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days */), 0); - var rsgs = assert.commandWorked(localDB.adminCommand("replSetGetStatus")); - log(rsgs); - var primaryOpTime = rsgs.members[0].optime; - log(primaryOpTime); - - // Set the start of the failed batch - // TODO this test should restart in stand-alone mode to futz with the state rather than trying - // to do it on a running primary. - - jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS)); - var divergedTS = new Timestamp(primaryOpTime.ts.t, primaryOpTime.ts.i + 1); - // We do an update in case there is a minvalid document on the primary already. - // If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures - // that update returns details of the write, like whether an update or insert was performed. - log(assert.writeOK(minvalidColl.update({}, - { - ts: farFutureTS, - t: NumberLong(-1), - begin: primaryOpTime, - }, - {upsert: true, writeConcern: {w: 1}}))); - - log(assert.writeOK(oplogTruncateAfterColl.update({_id: "oplogTruncateAfterPoint"}, - {oplogTruncateAfterPoint: divergedTS}, - {upsert: true, writeConcern: {w: 1}}))); - - // Insert a diverged oplog entry that will be truncated after restart. - log(assert.writeOK(localDB.oplog.rs.insert({ - _id: ObjectId(), - ns: "", - ts: divergedTS, - op: "n", - h: NumberLong(0), - t: NumberLong(-1), - o: {} - }))); - log(localDB.oplog.rs.find().toArray()); - log(assert.commandWorked(localDB.adminCommand("replSetGetStatus"))); - log("restart primary"); - replTest.restart(master); - replTest.waitForState(master, ReplSetTest.State.RECOVERING); - - assert.soon(function() { - var mv; - try { - mv = minvalidColl.findOne(); - } catch (e) { - return false; - } - var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) + - " - " + tsToDate(mv.ts); - assert.eq(farFutureTS, mv.ts, msg); - - var lastTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(-1).next().ts; - log(localDB.oplog.rs.find().toArray()); - assert.eq(primaryOpTime.ts, lastTS); - return true; - }); - - // Shut down the set and finish the test. - replTest.stopSet(); -})(); diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js deleted file mode 100644 index 5bdd16539fb..00000000000 --- a/jstests/replsets/rollback_cmd_unrollbackable.js +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Test that a rollback of a non-rollbackable command causes a message to be logged - * - * If all data-bearing nodes in a replica set are using an ephemeral storage engine, the set will - * not be able to survive a scenario where all data-bearing nodes are down simultaneously. In such a - * scenario, none of the members will have any data, and upon restart will each look for a member to - * initial sync from, so no primary will be elected. This test induces such a scenario, so cannot be - * run on ephemeral storage engines. - * - * This only tests rollback via refetch behavior and thus only runs on mmapv1. - * @tags: [requires_persistence, requires_mmapv1] -*/ - -// Sets up a replica set and grabs things for later. -var name = "rollback_cmd_unrollbackable"; -var replTest = new ReplSetTest({name: name, nodes: 3}); -var nodes = replTest.nodeList(); -var conns = replTest.startSet(); -replTest.initiate({ - "_id": name, - "members": [ - {"_id": 0, "host": nodes[0], priority: 3}, - {"_id": 1, "host": nodes[1]}, - {"_id": 2, "host": nodes[2], arbiterOnly: true} - ] -}); -var a_conn = conns[0]; -var b_conn = conns[1]; -var AID = replTest.getNodeId(a_conn); -var BID = replTest.getNodeId(b_conn); - -// Gets master and do an initial write. -replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY); -var master = replTest.getPrimary(); -assert(master === conns[0], "conns[0] assumed to be master"); -assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}, upsert: true}; -assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); - -// Shuts down the master. -replTest.stop(AID); - -// Inserts a fake oplog entry with a non-rollbackworthy command. -master = replTest.getPrimary(); -assert(b_conn.host === master.host, "b_conn assumed to be master"); -options = { - writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}, - upsert: true -}; -// Inserts another oplog entry to set minValid ahead. -assert.writeOK(b_conn.getDB(name).foo.insert({x: 123})); -var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0]; -oplog_entry["ts"] = Timestamp(oplog_entry["ts"].t, oplog_entry["ts"].i + 1); -oplog_entry["op"] = "c"; -oplog_entry["o"] = { - "emptycapped": 1 -}; -assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry)); - -// Shuts down B and brings back the original master. -replTest.stop(BID); -replTest.restart(AID); -master = replTest.getPrimary(); -assert(a_conn.host === master.host, "a_conn assumed to be master"); - -// Does a write so that B will have to roll back. -options = { - writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}, - upsert: true -}; -assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); - -// Restarts B, which should attempt to rollback but then fassert. -jsTestLog("Restarting node B (" + b_conn.host + ") and waiting for it to fassert."); -clearRawMongoProgramOutput(); - -try { - b_conn = replTest.start(BID, {waitForConnect: true}, true /*restart*/); -} catch (e) { - // We swallow the exception from ReplSetTest#start() because it means that the server - // fassert()'d before the mongo shell could connect to it. -} -// Wait for node B to fassert -assert.soon(function() { - try { - b_conn.getDB("local").runCommand({ping: 1}); - } catch (e) { - return true; - } - return false; -}, "Node did not fassert", 60 * 1000); - -replTest.stop(BID, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT}); - -var msg = RegExp("Can't roll back this command yet: "); -assert(rawMongoProgramOutput().match(msg), - "Did not see a log entry about skipping the nonrollbackable command during rollback"); - -replTest.stopSet(); diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js deleted file mode 100644 index cc6d9dc7f95..00000000000 --- a/jstests/replsets/rollback_collMod_fatal.js +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Test that a rollback of collMod altering TTL will cause the node to go into a FATAL state - * - * If all data-bearing nodes in a replica set are using an ephemeral storage engine, the set will - * not be able to survive a scenario where all data-bearing nodes are down simultaneously. In such a - * scenario, none of the members will have any data, and upon restart will each look for a member to - * initial sync from, so no primary will be elected. This test induces such a scenario, so cannot be - * run on ephemeral storage engines. - - * This only tests rollback via refetch behavior and thus only runs on mmapv1. - * @tags: [requires_persistence, requires_mmapv1] - */ - -// Sets up a replica set and grabs things for later. -var name = "rollback_collMod_fatal"; -var replTest = new ReplSetTest({name: name, nodes: 3}); -var nodes = replTest.nodeList(); -var conns = replTest.startSet(); -replTest.initiate({ - "_id": name, - "members": [ - {"_id": 0, "host": nodes[0], priority: 3}, - {"_id": 1, "host": nodes[1]}, - {"_id": 2, "host": nodes[2], arbiterOnly: true} - ] -}); -var a_conn = conns[0]; -var b_conn = conns[1]; -var AID = replTest.getNodeId(a_conn); -var BID = replTest.getNodeId(b_conn); - -replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY); - -// Gets master and does an initial write. -var master = replTest.getPrimary(); -assert(master === conns[0], "conns[0] assumed to be master"); -assert(a_conn.host === master.host, "a_conn assumed to be master"); -var options = {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}, upsert: true}; -a_conn.getDB(name).foo.ensureIndex({x: 1}, {expireAfterSeconds: 3600}); -assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options)); - -// Shuts down the master. -replTest.stop(AID); - -// Does a collMod altering TTL which should cause FATAL when rolled back. -master = replTest.getPrimary(); -assert(b_conn.host === master.host, "b_conn assumed to be master"); -assert.commandWorked(b_conn.getDB(name).runCommand( - {collMod: "foo", index: {keyPattern: {x: 1}, expireAfterSeconds: 10}})); - -// Shuts down B and brings back the original master. -replTest.stop(BID); -replTest.restart(AID); -master = replTest.getPrimary(); -assert(a_conn.host === master.host, "a_conn assumed to be master"); - -// Does a write so that B will have to roll back. -options = { - writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}, - upsert: true -}; -assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options)); - -// Restarts B, which should attempt rollback but then fassert. -clearRawMongoProgramOutput(); -try { - b_conn = replTest.start(BID, {waitForConnect: true}, true /*restart*/); -} catch (e) { - // We swallow the exception from ReplSetTest#start() because it means that the server - // fassert()'d before the mongo shell could connect to it. -} -// Wait for node B to fassert -assert.soon(function() { - try { - b_conn.getDB("local").runCommand({ping: 1}); - } catch (e) { - return true; - } - return false; -}, "Node did not fassert", 60 * 1000); - -replTest.stop(BID, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT}); - -assert(rawMongoProgramOutput().match("Cannot roll back a collMod command"), "B failed to fassert"); - -replTest.stopSet(); diff --git a/jstests/replsets/rollback_transaction_table.js b/jstests/replsets/rollback_transaction_table.js index af32b327a88..53f1feda455 100644 --- a/jstests/replsets/rollback_transaction_table.js +++ b/jstests/replsets/rollback_transaction_table.js @@ -15,13 +15,14 @@ * - The txnNumber for the first session id is the original value. * - There is no record for the second session id. * - A record for the third session id was created during oplog replay. - * - * TODO(SERVER-33879): Unblacklist this test from WiredTiger. - * @tags: [requires_mmapv1] */ (function() { "use strict"; + // TODO(SERVER-35654): Re-enable once fixed. + if (true) + return; + load("jstests/libs/retryable_writes_util.js"); if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) { diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index a6e8389df3a..d2e46ff1ba4 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -3,8 +3,7 @@ * authentication is used * * This test is labeled resource intensive because its total io_write is 30MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 630MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ (function() { diff --git a/jstests/sharding/auto_rebalance_parallel_replica_sets.js b/jstests/sharding/auto_rebalance_parallel_replica_sets.js index fdcd6457fe4..35c9132b061 100644 --- a/jstests/sharding/auto_rebalance_parallel_replica_sets.js +++ b/jstests/sharding/auto_rebalance_parallel_replica_sets.js @@ -1,9 +1,5 @@ /** * Tests that the cluster is balanced in parallel in one balancer round (replica sets). - * - * This test is labeled resource intensive because its total io_write is 900MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ (function() { 'use strict'; diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js index 090c6f7f627..f4deb4335e5 100644 --- a/jstests/sharding/bouncing_count.js +++ b/jstests/sharding/bouncing_count.js @@ -1,9 +1,5 @@ /** * Tests whether new sharding is detected on insert by mongos - * - * This test is labeled resource intensive because its total io_write is 650MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ (function() { 'use strict'; diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js index ba2f3f559c6..9fc95563eab 100644 --- a/jstests/sharding/commands_that_write_accept_wc_configRS.js +++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js @@ -8,8 +8,7 @@ * commands fail, they should return an actual error, not just a writeConcernError. * * This test is labeled resource intensive because its total io_write is 70MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1900MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ load('jstests/libs/write_concern_util.js'); diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js index 37d95c3c189..2d41ef9bace 100644 --- a/jstests/sharding/commands_that_write_accept_wc_shards.js +++ b/jstests/sharding/commands_that_write_accept_wc_shards.js @@ -6,8 +6,7 @@ * on config servers. * * This test is labeled resource intensive because its total io_write is 58MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 4200MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ load('jstests/libs/write_concern_util.js'); diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js index 786ac576a0d..5862483138a 100644 --- a/jstests/sharding/large_chunk.js +++ b/jstests/sharding/large_chunk.js @@ -2,8 +2,7 @@ * Where we test operations dealing with large chunks * * This test is labeled resource intensive because its total io_write is 220MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1160MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ (function() { diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js index 0b0b231afb7..37cba54f498 100644 --- a/jstests/sharding/migrateBig_balancer.js +++ b/jstests/sharding/migrateBig_balancer.js @@ -1,7 +1,6 @@ /** * This test is labeled resource intensive because its total io_write is 95MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1086MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ (function() { diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js index 3d7e1e9edb9..601b327b76e 100644 --- a/jstests/sharding/move_chunk_wc.js +++ b/jstests/sharding/move_chunk_wc.js @@ -6,10 +6,6 @@ * It then passes a writeConcern too high for the to shard and sees that it fails. It then passes * a writeConcern too high for the from shard and sees that that fails. moveChunk does not yield * a writeConcernError. It should simply fail when the writeConcern is not met on the shards. - * - * This test is labeled resource intensive because its total io_write is 617MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ load('jstests/libs/write_concern_util.js'); diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js index 6f0db25f90e..99e662d57b1 100644 --- a/jstests/sharding/read_pref.js +++ b/jstests/sharding/read_pref.js @@ -1,10 +1,6 @@ /** * Integration test for read preference and tagging. The more comprehensive unit test can be found * in dbtests/replica_set_monitor_test.cpp. - * - * This test is labeled resource intensive because its total io_write is 706MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ // Checking UUID consistency involves talking to a shard node, which in this test is shutdown diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js index 0576d71231a..8c2704a3886 100644 --- a/jstests/sharding/read_pref_cmd.js +++ b/jstests/sharding/read_pref_cmd.js @@ -1,7 +1,6 @@ /** * This test is labeled resource intensive because its total io_write is 47MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1540MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ load("jstests/replsets/rslib.js"); diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js index 182bdff6f33..5e2c0429f95 100644 --- a/jstests/sharding/recovering_slaveok.js +++ b/jstests/sharding/recovering_slaveok.js @@ -1,10 +1,6 @@ /** * This tests that slaveOk'd queries in sharded setups get correctly routed when a slave goes into * RECOVERING state, and don't break - * - * This test is labeled resource intensive because its total io_write is 748MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ // Shard secondaries are restarted, which may cause that shard's primary to stepdown while it does diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js index 05f2b1e0368..55258dec663 100644 --- a/jstests/sharding/remove2.js +++ b/jstests/sharding/remove2.js @@ -2,8 +2,7 @@ * Test that removing and re-adding shard works correctly. * * This test is labeled resource intensive because its total io_write is 59MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 918MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ load("jstests/replsets/rslib.js"); diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js index fe90a05ec46..0fef085d5c6 100644 --- a/jstests/sharding/sharding_migrate_cursor1.js +++ b/jstests/sharding/sharding_migrate_cursor1.js @@ -2,8 +2,7 @@ * SERVER-2068 * * This test is labeled resource intensive because its total io_write is 131MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1230MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ (function() { diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js index 07af5b03862..c5022348fa6 100644 --- a/jstests/sharding/sharding_rs1.js +++ b/jstests/sharding/sharding_rs1.js @@ -1,9 +1,5 @@ /** * tests sharding with replica sets - * - * This test is labeled resource intensive because its total io_write is 798MB compared to a median - * of 135MB across all sharding tests in mmapv1. - * @tags: [resource_intensive] */ (function() { 'use strict'; diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js index 9d0886105dc..b46487dacd8 100644 --- a/jstests/sharding/sharding_system_namespaces.js +++ b/jstests/sharding/sharding_system_namespaces.js @@ -7,9 +7,6 @@ // system.namespaces on the donor, which is empty on wiredtiger. // As a result, the new collection created on receiver has different // options. -// -// P.S. wiredtiger options are not valid for MMAPv1, but MMAPv1 will -// keep and ignore them. var st = new ShardingTest({shards: 2}); diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js deleted file mode 100644 index 4ddb11cbc4b..00000000000 --- a/jstests/sharding/user_flags_sharded.js +++ /dev/null @@ -1,65 +0,0 @@ -// Test that when user flags are set on a collection, -// then collection is sharded, flags get carried over. -(function() { - - if (jsTest.options().storageEngine === "mmapv1") { - // the dbname and collection we'll be working with - var dbname = "testDB"; - var coll = "userFlagsColl"; - var ns = dbname + "." + coll; - - // First create fresh collection on a new standalone mongod that will become a shard. - var newShardConn = MongoRunner.runMongod({"shardsvr": ""}); - var db1 = newShardConn.getDB(dbname); - var t = db1.getCollection(coll); - print(t); - db1.getCollection(coll).drop(); // in case collection already existed - db1.createCollection(coll); - - // Then verify the new collection has userFlags set to 0 - var collstats = db1.getCollection(coll).stats(); - print("*************** Fresh Collection Stats ************"); - printjson(collstats); - assert.eq(collstats.userFlags, 1, "fresh collection doesn't have userFlags = 1 "); - - // Now we modify the collection with the usePowerOf2Sizes flag - var res = db1.runCommand({"collMod": coll, "usePowerOf2Sizes": false}); - assert.eq(res.ok, 1, "collMod failed"); - - // and insert some stuff, for the hell of it - var numdocs = 20; - for (i = 0; i < numdocs; i++) { - assert.writeOK(db1.getCollection(coll).insert({_id: i})); - } - - // Next verify that userFlags has changed to 0 - collstats = db1.getCollection(coll).stats(); - print("*************** Collection Stats After CollMod ************"); - printjson(collstats); - assert.eq(collstats.userFlags, 0, "modified collection should have userFlags = 0 "); - - // start up a new sharded cluster, and add previous mongod - var s = new ShardingTest({name: "user_flags", shards: 1}); - assert(s.admin.runCommand({addshard: newShardConn.host, name: "myShard"}).ok, - "did not accept new shard"); - - // enable sharding of the collection. Only 1 chunk initially, so move it to - // other shard to create the collection on that shard - s.adminCommand({enablesharding: dbname}); - s.adminCommand({shardcollection: ns, key: {_id: 1}}); - s.adminCommand( - {moveChunk: ns, find: {_id: 1}, to: s.shard0.shardName, _waitForDelete: true}); - - print("*************** Collection Stats On Other Shard ************"); - var shard2 = s._connections[0].getDB(dbname); - shard2stats = shard2.getCollection(coll).stats(); - printjson(shard2stats); - - assert.eq(shard2stats.count, numdocs, "moveChunk didn't succeed"); - assert.eq(shard2stats.userFlags, 0, "new shard should also have userFlags = 0 "); - - MongoRunner.stopMongod(newShardConn); - s.stop(); - } - -})(); diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js index adfa7297679..10541ecd16b 100644 --- a/jstests/sharding/zbigMapReduce.js +++ b/jstests/sharding/zbigMapReduce.js @@ -2,8 +2,7 @@ * This test is skipped on 32-bit platforms * * This test is labeled resource intensive because its total io_write is 625MB compared to a median - * of 5MB across all sharding tests in wiredTiger. Its total io_write is 3387MB compared to a median - * of 135MB in mmapv1. + * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ function setupTest() { |