summaryrefslogtreecommitdiff
path: root/jstests/disk
diff options
context:
space:
mode:
authorGeert Bosch <geert@mongodb.com>2018-06-12 17:27:01 -0400
committerGeert Bosch <geert@mongodb.com>2018-06-25 16:47:18 -0400
commitab0c426e60c4bdcc49b5a48a93f84828414d5ba6 (patch)
tree59eea717b0cae54e39b4981d654fd58d31d7b286 /jstests/disk
parent5339c9a55181662545652ab7106c8f4e55109327 (diff)
downloadmongo-ab0c426e60c4bdcc49b5a48a93f84828414d5ba6.tar.gz
SERVER-35591 Remove MMAPv1 testing
Diffstat (limited to 'jstests/disk')
-rw-r--r--jstests/disk/datafile_options.js37
-rw-r--r--jstests/disk/directoryperdb.js26
-rw-r--r--jstests/disk/diskfull.js15
-rw-r--r--jstests/disk/filesize.js44
-rw-r--r--jstests/disk/newcollection.js39
-rw-r--r--jstests/disk/parallel_collection_scan_on_capped_collection.js71
-rw-r--r--jstests/disk/preallocate.js51
-rw-r--r--jstests/disk/preallocate2.js21
-rw-r--r--jstests/disk/preallocate_directoryperdb.js55
-rw-r--r--jstests/disk/quota.js51
-rw-r--r--jstests/disk/quota2.js36
-rw-r--r--jstests/disk/quota3.js23
-rw-r--r--jstests/disk/repair.js66
-rw-r--r--jstests/disk/repair2.js147
-rw-r--r--jstests/disk/repair3.js77
-rw-r--r--jstests/disk/repair4.js53
-rw-r--r--jstests/disk/repair5.js57
17 files changed, 6 insertions, 863 deletions
diff --git a/jstests/disk/datafile_options.js b/jstests/disk/datafile_options.js
deleted file mode 100644
index 13d8a359693..00000000000
--- a/jstests/disk/datafile_options.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// This test fiddles with preallocation, an mmap only behavior.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_datafile_options";
-
-load('jstests/libs/command_line/test_parsed_options.js');
-
-jsTest.log("Testing \"noprealloc\" command line option");
-var expectedResult = {"parsed": {"storage": {"mmapv1": {"preallocDataFiles": false}}}};
-testGetCmdLineOptsMongod({noprealloc: ""}, expectedResult);
-
-jsTest.log("Testing \"storage.mmapv1.preallocDataFiles\" config file option");
-expectedResult = {
- "parsed": {
- "config": "jstests/libs/config_files/enable_prealloc.json",
- "storage": {"mmapv1": {"preallocDataFiles": true}}
- }
-};
-testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_prealloc.json"},
- expectedResult);
-
-jsTest.log("Testing with no explicit data file option setting");
-expectedResult = {
- "parsed": {"storage": {}}
-};
-testGetCmdLineOptsMongod({}, expectedResult);
-
-// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
-jsTest.log("Testing explicitly disabled \"noprealloc\" config file option");
-expectedResult = {
- "parsed": {
- "config": "jstests/libs/config_files/disable_noprealloc.ini",
- "storage": {"mmapv1": {"preallocDataFiles": true}}
- }
-};
-testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noprealloc.ini"},
- expectedResult);
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 25fc2292521..285b5588115 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -5,14 +5,11 @@ var storageEngine = db.serverStatus().storageEngine.name;
// The pattern which matches the names of database files
var dbFileMatcher;
-if (storageEngine == 'mmapv1') {
- // Matches mmapv1 *.ns and *.0, *.1, etc files.
- dbFileMatcher = /\.(ns|\d+)$/;
-} else if (storageEngine == 'wiredTiger') {
+if (storageEngine == 'wiredTiger') {
// Matches wiredTiger collection-*.wt and index-*.wt files
dbFileMatcher = /(collection|index)-.+\.wt$/;
} else {
- assert(false, 'This test must be run against mmapv1 or wiredTiger');
+ assert(false, 'This test must be run against wiredTiger');
}
// Set up helper functions.
@@ -27,9 +24,7 @@ assertDocumentCount = function(db, count) {
* Returns the current connection which gets restarted with wiredtiger.
*/
checkDBFilesInDBDirectory = function(conn, dbToCheck) {
- if (storageEngine == 'mmapv1') {
- conn.adminCommand({fsync: 1});
- } else if (storageEngine == 'wiredTiger') {
+ if (storageEngine == 'wiredTiger') {
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true});
}
@@ -64,9 +59,7 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) {
* Returns the restarted connection with wiredtiger.
*/
checkDBDirectoryNonexistent = function(conn, dbToCheck) {
- if (storageEngine == 'mmapv1') {
- conn.adminCommand({fsync: 1});
- } else if (storageEngine == 'wiredTiger') {
+ if (storageEngine == 'wiredTiger') {
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true});
}
@@ -82,14 +75,7 @@ checkDBDirectoryNonexistent = function(conn, dbToCheck) {
}
// Check db directories to ensure db files in them have been destroyed.
- // mmapv1 removes the database directory, pending SERVER-1379.
- if (storageEngine == 'mmapv1') {
- var files = listFiles(dbpath);
- var fileNotFound = true;
- for (f in files) {
- assert(files[f].name != dbToCheck, 'Directory ' + dbToCheck + ' still exists');
- }
- } else if (storageEngine == 'wiredTiger') {
+ if (storageEngine == 'wiredTiger') {
var dir = dbpath + dbToCheck;
// The KV catalog escapes non alpha-numeric characters with its UTF-8 byte sequence in
// decimal when creating the directory on disk.
@@ -169,4 +155,4 @@ if (!_isWindows()) {
assertDocumentCount(dbUU, 1);
m = checkDBFilesInDBDirectory(m, dbUU);
}
-MongoRunner.stopMongod(m); \ No newline at end of file
+MongoRunner.stopMongod(m);
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
deleted file mode 100644
index 78a31f609ba..00000000000
--- a/jstests/disk/diskfull.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Enable failpoint
-
-// The `allocateDiskFull` failpoint is mmap only.
-// @tags: [requires_mmapv1]
-assert.commandWorked(db.adminCommand({configureFailPoint: "allocateDiskFull", mode: "alwaysOn"}));
-
-var d = db.getSisterDB("DiskFullTestDB");
-var c = d.getCollection("DiskFullTestCollection");
-
-var writeError1 = c.insert({a: 6}).getWriteError();
-assert.eq(12520, writeError1.code);
-
-// All subsequent requests should fail
-var writeError2 = c.insert({a: 6}).getWriteError();
-assert.eq(12520, writeError2.code);
diff --git a/jstests/disk/filesize.js b/jstests/disk/filesize.js
deleted file mode 100644
index 709c82612ed..00000000000
--- a/jstests/disk/filesize.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Test for SERVER-7430: Warning about smallfiles should include filename
-
-// `--smallfiles` is mmap only.
-// @tags: [requires_mmapv1]
-var baseName = "filesize";
-
-// Start mongod with --smallfiles
-var m = MongoRunner.runMongod({nojournal: "", smallfiles: ""});
-
-var db = m.getDB(baseName);
-
-// Skip on 32 bits, since 32-bit servers don't warn about small files
-if (db.serverBuildInfo().bits == 32) {
- print("Skip on 32-bit");
- MongoRunner.stopMongod(m);
-} else {
- // Restart mongod without --smallFiles
- MongoRunner.stopMongod(m);
- m = MongoRunner.runMongod({
- restart: true,
- cleanData: false,
- dbpath: m.dbpath,
- port: m.port,
- nojournal: "",
- });
-
- db = m.getDB(baseName);
- var log = db.adminCommand({getLog: "global"}).log;
-
- // Find log message like:
- // "openExisting file size 16777216 but
- // mmapv1GlobalOptions.smallfiles=false: /data/db/filesize/local.0"
- var found = false, logline = '';
- for (i = log.length - 1; i >= 0; i--) {
- logline = log[i];
- if (logline.indexOf("openExisting file") >= 0 && logline.indexOf("local.0") >= 0) {
- found = true;
- break;
- }
- }
-
- assert(found);
- MongoRunner.stopMongod(m);
-}
diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js
deleted file mode 100644
index fda2a59e498..00000000000
--- a/jstests/disk/newcollection.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// SERVER-594 test
-
-// When `capped: false`, the `size` option on `createCollection` is only considered by mmapv1.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_newcollection";
-var m = MongoRunner.runMongod({noprealloc: "", smallfiles: ""});
-db = m.getDB("test");
-
-var t = db[baseName];
-var getTotalNonLocalNonAdminSize = function() {
- var totalNonLocalNonAdminDBSize = 0;
- m.getDBs().databases.forEach(function(dbStats) {
- // We accept the local database's and admin database's space overhead.
- if (dbStats.name == "local" || dbStats.name == "admin")
- return;
-
- // Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk.
- // See SERVER-11051.
- if (dbStats.sizeOnDisk == 1 && dbStats.empty)
- return;
- totalNonLocalNonAdminDBSize += dbStats.sizeOnDisk;
- });
- return totalNonLocalNonAdminDBSize;
-};
-
-for (var pass = 0; pass <= 1; pass++) {
- db.createCollection(baseName, {size: 15.8 * 1024 * 1024});
- if (pass == 0)
- t.drop();
-
- size = getTotalNonLocalNonAdminSize();
- t.save({});
- assert.eq(size, getTotalNonLocalNonAdminSize());
- assert(size <= 32 * 1024 * 1024);
-
- t.drop();
-}
-MongoRunner.stopMongod(m); \ No newline at end of file
diff --git a/jstests/disk/parallel_collection_scan_on_capped_collection.js b/jstests/disk/parallel_collection_scan_on_capped_collection.js
deleted file mode 100644
index cc95c32a2b5..00000000000
--- a/jstests/disk/parallel_collection_scan_on_capped_collection.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
-* Tests that calling the 'parallelCollectionScan' command on a capped collection
-* always only returns one cursor and that the document insertion order is maintained
-* when iterating over that cursor.
-*
-* This test requires the use of mmapv1 as the storage engine. The 'parallelCollectionScan'
-* command is not yet fully supported for wiredTiger and currently will always return only
-* one cursor regardless of the type of collection the command is run on.
-* @tags: [requires_mmapv1]
-*/
-
-(function() {
- 'use strict';
- let nonCappedCollName = 'noncapped_coll';
- let cappedCollName = 'capped_coll';
-
- // Create a non-capped collection.
- assert.commandWorked(db.runCommand({create: nonCappedCollName}));
- // Create a capped collection with the size of 4096 bytes.
- assert.commandWorked(db.runCommand({create: cappedCollName, capped: true, size: 4096}));
-
- let nonCappedBulk = db[nonCappedCollName].initializeUnorderedBulkOp();
- let cappedBulk = db[cappedCollName].initializeUnorderedBulkOp();
-
- // Add enough documents to each collection to ensure that more than one extent
- // on disk is populated. The 'parallelCollectionScan' command on non-capped
- // collections returns up to one cursor per extent.
- for (let i = 0; i < 500; i++) {
- nonCappedBulk.insert({key: i});
- cappedBulk.insert({key: i});
- }
- assert.writeOK(nonCappedBulk.execute());
- assert.writeOK(cappedBulk.execute());
-
- // Tests that calling 'parallelCollectionScan' with 'numCursors'>=1 on a
- // non-capped collection will return multiple cursors.
- let cmd = {parallelCollectionScan: nonCappedCollName, numCursors: 2};
- let res = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd));
- assert.eq(res.cursors.length, 2);
-
- // Tests that calling 'parallelCollectionScan' on a capped collection will return only
- // one cursor for the case where 'numCursors'>=1.
- let maxCursors = 3;
- for (let numCursors = 1; numCursors < maxCursors; numCursors++) {
- cmd = {parallelCollectionScan: cappedCollName, numCursors: numCursors};
- res = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd));
- assert.eq(res.cursors.length, 1);
- }
-
- // Tests that the document return order of 'parallelCollectionScan' on a capped collection
- // is consistent with the document insertion order.
- cmd = {parallelCollectionScan: cappedCollName, numCursors: 1};
- let pcsResult = assert.commandWorked(db.runCommand(cmd), 'Command failed: ' + tojson(cmd));
- assert.eq(pcsResult.cursors.length, 1);
- let pcsCursor = pcsResult.cursors[0].cursor;
- let pcsGetMore = {
- getMore: pcsResult.cursors[0].cursor.id,
- collection: cappedCollName,
- batchSize: 1
- };
- let pcsGetMoreResult =
- assert.commandWorked(db.runCommand(pcsGetMore), 'Command failed: ' + tojson(pcsGetMore));
- // The sequence of values being returned should be monotonically increasing by one until the
- // last batch.
- let initKey = pcsGetMoreResult.cursor.nextBatch[0].key;
- for (let i = initKey; i < (initKey + db[cappedCollName].count()); i++) {
- assert.eq(pcsGetMoreResult.cursor.nextBatch[0].key, i);
- pcsGetMoreResult = assert.commandWorked(db.runCommand(pcsGetMore),
- 'Command Failed: ' + tojson(pcsGetMore));
- }
-}());
diff --git a/jstests/disk/preallocate.js b/jstests/disk/preallocate.js
deleted file mode 100644
index e8ff9961a14..00000000000
--- a/jstests/disk/preallocate.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// check that there is preallocation, and there are 2 files
-
-// Preallocation is an mmap only behavior.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_preallocate";
-
-var m = MongoRunner.runMongod({});
-
-var getTotalNonLocalNonAdminSize = function() {
- var totalNonLocalNonAdminDBSize = 0;
- m.getDBs().databases.forEach(function(dbStats) {
- // We accept the local database's and admin database's space overhead.
- if (dbStats.name == "local" || dbStats.name == "admin")
- return;
-
- // Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk.
- // See SERVER-11051.
- if (dbStats.sizeOnDisk == 1 && dbStats.empty)
- return;
- totalNonLocalNonAdminDBSize += dbStats.sizeOnDisk;
- });
- return totalNonLocalNonAdminDBSize;
-};
-
-assert.eq(0, getTotalNonLocalNonAdminSize());
-
-m.getDB(baseName).createCollection(baseName + "1");
-
-// Windows does not currently use preallocation
-expectedMB = 64 + 16;
-if (m.getDB(baseName).serverBits() < 64)
- expectedMB /= 4;
-
-assert.soon(function() {
- return getTotalNonLocalNonAdminSize() >= expectedMB * 1024 * 1024;
-}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB");
-
-MongoRunner.stopMongod(m);
-
-m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath});
-
-size = getTotalNonLocalNonAdminSize();
-
-m.getDB(baseName).createCollection(baseName + "2");
-
-sleep(2000); // give prealloc a chance
-
-assert.eq(size, getTotalNonLocalNonAdminSize());
-
-MongoRunner.stopMongod(m);
diff --git a/jstests/disk/preallocate2.js b/jstests/disk/preallocate2.js
deleted file mode 100644
index ee12a610fa2..00000000000
--- a/jstests/disk/preallocate2.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// check that there is preallocation on insert
-
-// Preallocation is an mmap only behavior.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_preallocate2";
-
-var m = MongoRunner.runMongod({});
-
-m.getDB(baseName)[baseName].save({i: 1});
-
-// Windows does not currently use preallocation
-expectedMB = (_isWindows() ? 70 : 100);
-if (m.getDB(baseName).serverBits() < 64)
- expectedMB /= 4;
-
-assert.soon(function() {
- return m.getDBs().totalSize > expectedMB * 1000000;
-}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB");
-
-MongoRunner.stopMongod(m); \ No newline at end of file
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
deleted file mode 100644
index 5121a709050..00000000000
--- a/jstests/disk/preallocate_directoryperdb.js
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Test for SERVER-2417 - should not preallocate a database file while we are
- * dropping its directory in directoryperdb mode.
- */
-
-// Preallocation is an mmap only behavior.
-// @tags: [requires_mmapv1]
-
-var baseDir = "jstests_disk_preallocate_directoryperdb";
-var baseName = "preallocate_directoryperdb";
-var baseName2 = "preallocate_directoryperdb2";
-var baseName3 = "preallocate_directoryperdb3";
-dbpath = MongoRunner.dataPath + baseDir + "/";
-
-function checkDb2DirAbsent() {
- files = listFiles(dbpath);
- // printjson( files );
- for (var f in files) {
- var name = files[f].name;
- assert.eq(-1, name.indexOf(dbpath + baseName2), "baseName2 dir still present");
- }
-}
-
-var m = MongoRunner.runMongod(
- {smallfiles: "", directoryperdb: "", dbpath: dbpath, bind_ip: "127.0.0.1"});
-db = m.getDB(baseName);
-db2 = m.getDB(baseName2);
-var bulk = db[baseName].initializeUnorderedBulkOp();
-var bulk2 = db2[baseName2].initializeUnorderedBulkOp();
-var big = new Array(5000).toString();
-for (var i = 0; i < 3000; ++i) {
- bulk.insert({b: big});
- bulk2.insert({b: big});
-}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
-
-// Due to our write pattern, we expect db2's .3 file to be queued up in the file
-// allocator behind db's .3 file at the time db2 is dropped. This will
-// (incorrectly) cause db2's dir to be recreated until SERVER-2417 is fixed.
-db2.dropDatabase();
-
-checkDb2DirAbsent();
-
-db.dropDatabase();
-
-// Try writing a new database, to ensure file allocator is still working.
-db3 = m.getDB(baseName3);
-c3 = db[baseName3];
-assert.writeOK(c3.insert({}));
-assert.eq(1, c3.count());
-
-checkDb2DirAbsent();
-
-MongoRunner.stopMongod(m); \ No newline at end of file
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
deleted file mode 100644
index 2305ed7bd75..00000000000
--- a/jstests/disk/quota.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local'
-// database).
-
-// `--quotaFiles` is mmap only.
-// @tags: [requires_mmapv1]
-
-baseName = "jstests_disk_quota";
-
-var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""});
-db = m.getDB(baseName);
-
-big = new Array(10000).toString();
-
-// Insert documents until quota is exhausted.
-var coll = db[baseName];
-var res = coll.insert({b: big});
-while (!res.hasWriteError()) {
- res = coll.insert({b: big});
-}
-
-dotTwoDataFile = baseName + ".2";
-files = listFiles(m.dbpath);
-for (i in files) {
- // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated
- // (SERVER-3410) but no .2 file is expected.
- assert.neq(dotTwoDataFile, files[i].baseName);
-}
-
-dotTwoDataFile = "local" + ".2";
-// Check that quota does not apply to local db, and a .2 file can be created.
-l = m.getDB("local")[baseName];
-for (i = 0; i < 10000; ++i) {
- assert.writeOK(l.insert({b: big}));
- dotTwoFound = false;
- if (i % 100 != 0) {
- continue;
- }
- files = listFiles(m.dbpath);
- for (f in files) {
- if (files[f].baseName == dotTwoDataFile) {
- dotTwoFound = true;
- }
- }
- if (dotTwoFound) {
- break;
- }
-}
-
-assert(dotTwoFound);
-
-MongoRunner.stopMongod(m);
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
deleted file mode 100644
index 07215567b02..00000000000
--- a/jstests/disk/quota2.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Test for quotaFiles off by one file limit issue - SERVER-3420.
-
-// `--quotaFiles` is mmap only.
-// @tags: [requires_mmapv1]
-
-if (0) { // SERVER-3420
-
- baseName = "jstests_disk_quota2";
-
- var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""});
- db = m.getDB(baseName);
-
- big = new Array(10000).toString();
-
- // Insert documents until quota is exhausted.
- var coll = db[baseName];
- var res = coll.insert({b: big});
- while (!res.hasWriteError()) {
- res = coll.insert({b: big});
- }
-
- // Trigger allocation of an additional file for a 'special' namespace.
- for (n = 0; !db.getLastError(); ++n) {
- db.createCollection('' + n);
- }
-
- // Check that new docs are saved in the .0 file.
- for (i = 0; i < n; ++i) {
- c = db['' + i];
- res = c.insert({b: big});
- if (!res.hasWriteError()) {
- var recordId = c.find().showRecord()[0].$recordId;
- assert.eq(0, recordId >> 32);
- }
- }
-}
diff --git a/jstests/disk/quota3.js b/jstests/disk/quota3.js
deleted file mode 100644
index 27117e7e741..00000000000
--- a/jstests/disk/quota3.js
+++ /dev/null
@@ -1,23 +0,0 @@
-// Test for quotaFiles being ignored allocating a large collection - SERVER-3511.
-
-// `--quotaFiles` is mmap only.
-// @tags: [requires_mmapv1]
-
-if (0) { // SERVER-3511
-
- baseName = "jstests_disk_quota3";
- dbpath = MongoRunner.dataPath + baseName;
-
- var m = MongoRunner.runMongod({dbpath: dbpath, quotaFiles: 3, smallfiles: ""});
- db = m.getDB(baseName);
-
- db.createCollection(baseName, {size: 128 * 1024 * 1024});
- assert(db.getLastError());
-
- dotFourDataFile = dbpath + "/" + baseName + ".4";
- files = listFiles(dbpath);
- for (i in files) {
- // .3 file may be preallocated but not .4
- assert.neq(dotFourDataFile, files[i].name);
- }
-}
diff --git a/jstests/disk/repair.js b/jstests/disk/repair.js
deleted file mode 100644
index 49b9725c56f..00000000000
--- a/jstests/disk/repair.js
+++ /dev/null
@@ -1,66 +0,0 @@
-// check --repairpath and --repair
-
-// `--repairpath` is mmap only.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_repair";
-var dbpath = MongoRunner.dataPath + baseName + "/";
-var repairpath = dbpath + "repairDir/";
-
-resetDbpath(dbpath);
-resetDbpath(repairpath);
-
-var m = MongoRunner.runMongod({
- dbpath: dbpath,
- repairpath: repairpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-db[baseName].save({});
-assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
-function check() {
- files = listFiles(dbpath);
- for (f in files) {
- assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name), "backup dir in dbpath");
- }
-
- assert.eq.automsg("1", "db[ baseName ].count()");
-}
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-m = MongoRunner.runMongod({
- port: m.port,
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-assert.commandWorked(db.runCommand({repairDatabase: 1}));
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-rc = runMongoProgram(
- "mongod", "--repair", "--port", m.port, "--dbpath", dbpath, "--repairpath", repairpath);
-assert.eq.automsg("0", "rc");
-m = MongoRunner.runMongod({
- port: m.port,
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-rc = runMongoProgram("mongod", "--repair", "--port", m.port, "--dbpath", dbpath);
-assert.eq.automsg("0", "rc");
-m = MongoRunner.runMongod({
- port: m.port,
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-check();
-MongoRunner.stopMongod(m);
diff --git a/jstests/disk/repair2.js b/jstests/disk/repair2.js
deleted file mode 100644
index 11be561ef71..00000000000
--- a/jstests/disk/repair2.js
+++ /dev/null
@@ -1,147 +0,0 @@
-// repair with --directoryperdb
-
-// `--repairpath` is mmap only.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_repair2";
-
-function check() {
- files = listFiles(dbpath);
- for (f in files) {
- assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name),
- "backup dir " + files[f].name + " in dbpath");
- }
-
- assert.eq.automsg("1", "db[ baseName ].count()");
-}
-
-var dbpath = MongoRunner.dataPath + baseName + "/";
-var repairpath = dbpath + "repairDir/";
-var longDBName = Array(61).join('a');
-var longRepairPath = dbpath + Array(61).join('b') + '/';
-
-resetDbpath(dbpath);
-resetDbpath(repairpath);
-
-var m = MongoRunner.runMongod({
- directoryperdb: "",
- dbpath: dbpath,
- repairpath: repairpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-db[baseName].save({});
-assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
-
-// Check that repair files exist in the repair directory, and nothing else
-db.adminCommand({fsync: 1});
-files = listFiles(repairpath + "/backup_repairDatabase_0/" + baseName);
-var fileCount = 0;
-for (f in files) {
- print(files[f].name);
- if (files[f].isDirectory)
- continue;
- fileCount += 1;
- assert(/\.bak$/.test(files[f].name),
- "In database repair directory, found unexpected file: " + files[f].name);
-}
-assert(fileCount > 0, "Expected more than zero nondirectory files in the database directory");
-
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-assert.commandWorked(db.runCommand({repairDatabase: 1}));
-check();
-MongoRunner.stopMongod(m);
-
-// Test long database names
-resetDbpath(repairpath);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(longDBName);
-assert.writeOK(db[baseName].save({}));
-assert.commandWorked(db.runCommand({repairDatabase: 1}));
-MongoRunner.stopMongod(m);
-
-// Test long repairPath
-resetDbpath(longRepairPath);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- repairpath: longRepairPath,
- noCleanData: true,
-});
-db = m.getDB(longDBName);
-assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
-check();
-MongoRunner.stopMongod(m);
-
-// Test database name and repairPath with --repair
-resetDbpath(longRepairPath);
-var returnCode = runMongoProgram("mongod",
- "--port",
- m.port,
- "--repair",
- "--directoryperdb",
- "--dbpath",
- dbpath,
- "--repairpath",
- longRepairPath);
-assert.eq(returnCode, 0);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(longDBName);
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-returnCode = runMongoProgram("mongod",
- "--port",
- m.port,
- "--repair",
- "--directoryperdb",
- "--dbpath",
- dbpath,
- "--repairpath",
- repairpath);
-assert.eq(returnCode, 0);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- repairpath: repairpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-check();
-MongoRunner.stopMongod(m);
-
-resetDbpath(repairpath);
-returnCode =
- runMongoProgram("mongod", "--port", m.port, "--repair", "--directoryperdb", "--dbpath", dbpath);
-assert.eq(returnCode, 0);
-m = MongoRunner.runMongod({
- port: m.port,
- directoryperdb: "",
- dbpath: dbpath,
- noCleanData: true,
-});
-db = m.getDB(baseName);
-check();
diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js
deleted file mode 100644
index f339a666a70..00000000000
--- a/jstests/disk/repair3.js
+++ /dev/null
@@ -1,77 +0,0 @@
-// test --repairpath on another partition
-
-// `--repairpath` is mmap only.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_repair3";
-var repairbase = MongoRunner.dataDir + "/repairpartitiontest";
-var repairpath = repairbase + "/dir";
-
-doIt = false;
-files = listFiles(MongoRunner.dataDir);
-for (i in files) {
- if (files[i].name == repairbase) {
- doIt = true;
- }
-}
-
-if (!doIt) {
- print("path " + repairpath + " missing, skipping repair3 test");
- doIt = false;
-}
-
-if (doIt) {
- var dbpath = MongoRunner.dataPath + baseName + "/";
-
- resetDbpath(dbpath);
- resetDbpath(repairpath);
-
- var m = MongoRunner.runMongod({
- nssize: 8,
- noprealloc: "",
- smallfiles: "",
- dbpath: dbpath,
- repairpath: repairpath,
- });
- db = m.getDB(baseName);
- db[baseName].save({});
- assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: false}));
- function check() {
- files = listFiles(dbpath);
- for (f in files) {
- assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name),
- "backup dir in dbpath");
- }
-
- assert.eq.automsg("1", "db[ baseName ].count()");
- }
-
- check();
- MongoRunner.stopMongod(m);
-
- resetDbpath(repairpath);
- var rc = runMongoProgram("mongod",
- "--nssize",
- "8",
- "--noprealloc",
- "--smallfiles",
- "--repair",
- "--port",
- m.port,
- "--dbpath",
- dbpath,
- "--repairpath",
- repairpath);
- assert.eq.automsg("0", "rc");
- m = MongoRunner.runMongod({
- nssize: 8,
- noprealloc: "",
- smallfiles: "",
- port: m.port,
- dbpath: dbpath,
- repairpath: repairpath,
- });
- db = m.getDB(baseName);
- check();
- MongoRunner.stopMongod(m);
-}
diff --git a/jstests/disk/repair4.js b/jstests/disk/repair4.js
deleted file mode 100644
index 15033dc5986..00000000000
--- a/jstests/disk/repair4.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// test that disk space check happens on --repairpath partition
-
-// `--repairpath` is mmap only.
-// @tags: [requires_mmapv1]
-
-var baseName = "jstests_disk_repair4";
-var smallbase = MongoRunner.dataDir + "/repairpartitiontest";
-var smallpath = smallbase + "/dir";
-
-doIt = false;
-files = listFiles(MongoRunner.dataDir);
-for (i in files) {
- if (files[i].name == smallbase) {
- doIt = true;
- }
-}
-
-if (!doIt) {
- print("path " + smallpath + " missing, skipping repair4 test");
- doIt = false;
-}
-
-if (doIt) {
- var repairpath = MongoRunner.dataPath + baseName + "/";
-
- resetDbpath(smallpath);
- resetDbpath(repairpath);
-
- var m = MongoRunner.runMongod({
- nssize: "8",
- noprealloc: "",
- smallfiles: "",
- dbpath: smallpath,
- repairpath: repairpath,
- bind_ip: "127.0.0.1",
- });
-
- db = m.getDB(baseName);
- db[baseName].save({});
- assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
- function check() {
- files = listFiles(smallpath);
- for (f in files) {
- assert(!new RegExp("^" + smallpath + "backup_").test(files[f].name),
- "backup dir in dbpath");
- }
-
- assert.eq.automsg("1", "db[ baseName ].count()");
- }
-
- check();
- MongoRunner.stopMongod(m);
-}
diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js
deleted file mode 100644
index f9b43300070..00000000000
--- a/jstests/disk/repair5.js
+++ /dev/null
@@ -1,57 +0,0 @@
-// SERVER-2351 Test killop with repair command.
-
-// `repairDatabase` on WiredTiger does not respond to `killop`.
-// @tags: [requires_mmapv1]
-(function() {
- 'use strict';
- var baseName = "jstests_disk_repair5";
-
- var dbpath = MongoRunner.dataPath + baseName + "/";
-
- resetDbpath(dbpath);
-
- var m = MongoRunner.runMongod({
- dbpath: dbpath,
- restart: true,
- cleanData: false
- }); // So that the repair dir won't get removed
-
- var dbTest = m.getDB(baseName);
-
- // Insert a lot of data so repair runs a long time
- var bulk = dbTest[baseName].initializeUnorderedBulkOp();
- var big = new Array(5000).toString();
- for (var i = 0; i < 20000; ++i) {
- bulk.insert({i: i, b: big});
- }
- assert.writeOK(bulk.execute());
-
- function killRepair() {
- while (1) {
- var p = db.currentOp().inprog;
- for (var i in p) {
- var o = p[i];
- printjson(o);
-
- // Find the active 'repairDatabase' op and kill it.
- if (o.active && o.command && o.command.repairDatabase) {
- db.killOp(o.opid);
- return;
- }
- }
- }
- }
-
- var s = startParallelShell(killRepair.toString() + "; killRepair();", m.port);
- sleep(100); // make sure shell is actually running, lame
-
- // Repair should fail due to killOp.
- assert.commandFailed(dbTest.runCommand({repairDatabase: 1}));
-
- s();
-
- assert.eq(20000, dbTest[baseName].find().itcount());
- assert(dbTest[baseName].validate().valid);
-
- MongoRunner.stopMongod(m);
-})();