summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/backup_restore.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/noPassthrough/backup_restore.js')
-rw-r--r--jstests/noPassthrough/backup_restore.js110
1 files changed, 41 insertions, 69 deletions
diff --git a/jstests/noPassthrough/backup_restore.js b/jstests/noPassthrough/backup_restore.js
index 40d283873e7..e994aeda66c 100644
--- a/jstests/noPassthrough/backup_restore.js
+++ b/jstests/noPassthrough/backup_restore.js
@@ -17,52 +17,39 @@
(function() {
"use strict";
- function runCmd (cmd) {
- runProgram('bash', '-c', cmd);
+ function runCmd(cmd) {
+ runProgram('bash', '-c', cmd);
}
function crudClient(host, dbName, coll) {
// Launch CRUD client
- var crudClientCmds = "var bulkNum = 1000;" +
- "var baseNum = 100000;" +
+ var crudClientCmds = "var bulkNum = 1000;" + "var baseNum = 100000;" +
"var coll = db.getSiblingDB('" + dbName + "')." + coll + ";" +
- "coll.ensureIndex({x: 1});" +
- "var largeValue = new Array(1024).join('L');" +
+ "coll.ensureIndex({x: 1});" + "var largeValue = new Array(1024).join('L');" +
"Random.setRandomSeed();" +
// run indefinitely
- "while (true) {" +
- " try {" +
- " var op = Random.rand();" +
- " var match = Math.floor(Random.rand() * baseNum);" +
- " if (op < 0.2) {" +
+ "while (true) {" + " try {" + " var op = Random.rand();" +
+ " var match = Math.floor(Random.rand() * baseNum);" + " if (op < 0.2) {" +
// 20% of the operations: bulk insert bulkNum docs
" var bulk = coll.initializeUnorderedBulkOp();" +
" for (var i = 0; i < bulkNum; i++) {" +
" bulk.insert({x: (match * i) % baseNum," +
" doc: largeValue.substring(0, match % largeValue.length)});" +
- " }" +
- " assert.writeOK(bulk.execute());" +
+ " }" + " assert.writeOK(bulk.execute());" +
" } else if (op < 0.4) {" +
// 20% of the operations: update docs;
" var updateOpts = {upsert: true, multi: true};" +
- " assert.writeOK(coll.update(" +
- " {x: {$gte: match}}," +
+ " assert.writeOK(coll.update(" + " {x: {$gte: match}}," +
" {$inc: {x: baseNum}, $set: {n: 'hello'}}," +
- " updateOpts));" +
- " } else if (op < 0.9) {" +
+ " updateOpts));" + " } else if (op < 0.9) {" +
// 50% of the operations: find matchings docs
// itcount() consumes the cursor
- " coll.find({x: {$gte: match}}).itcount();" +
- " } else {" +
+ " coll.find({x: {$gte: match}}).itcount();" + " } else {" +
// 10% of the operations: remove matching docs
- " assert.writeOK(coll.remove({x: {$gte: match}}));" +
- " }" +
+ " assert.writeOK(coll.remove({x: {$gte: match}}));" + " }" +
" } catch(e) {" +
" if (e instanceof ReferenceError || e instanceof TypeError) {" +
- " throw e;" +
- " }" +
- " }" +
- "}";
+ " throw e;" + " }" + " }" + "}";
// Returns the pid of the started mongo shell so the CRUD test client can be terminated
// without waiting for its execution to finish.
@@ -75,26 +62,17 @@
// started without any cluster options. Since the shell running this test was started with
// --nodb, another mongo shell is used to allow implicit connections to be made to the
// primary of the replica set.
- var fsmClientCmds = "'use strict';" +
- "load('jstests/concurrency/fsm_libs/runner.js');" +
- "var dir = 'jstests/concurrency/fsm_workloads';" +
- "var blacklist = [" +
- " 'agg_group_external.js'," +
- " 'agg_sort_external.js'," +
- " 'auth_create_role.js'," +
- " 'auth_create_user.js'," +
- " 'auth_drop_role.js'," +
- " 'auth_drop_user.js'," +
- " 'reindex_background.js'," +
- " 'yield_sort.js'," +
- "].map(function(file) { return dir + '/' + file; });" +
- "Random.setRandomSeed();" +
+ var fsmClientCmds = "'use strict';" + "load('jstests/concurrency/fsm_libs/runner.js');" +
+ "var dir = 'jstests/concurrency/fsm_workloads';" + "var blacklist = [" +
+ " 'agg_group_external.js'," + " 'agg_sort_external.js'," +
+ " 'auth_create_role.js'," + " 'auth_create_user.js'," +
+ " 'auth_drop_role.js'," + " 'auth_drop_user.js'," +
+ " 'reindex_background.js'," + " 'yield_sort.js'," +
+ "].map(function(file) { return dir + '/' + file; });" + "Random.setRandomSeed();" +
// run indefinitely
- "while (true) {" +
- " try {" +
+ "while (true) {" + " try {" +
" var workloads = Array.shuffle(ls(dir).filter(function(file) {" +
- " return !Array.contains(blacklist, file);" +
- " }));" +
+ " return !Array.contains(blacklist, file);" + " }));" +
// Run workloads one at a time, so we ensure replication completes
" workloads.forEach(function(workload) {" +
" runWorkloadsSerially([workload]," +
@@ -104,14 +82,10 @@
" var result = db.getSiblingDB('test').fsm_teardown.insert({ a: 1 }, wc);" +
" assert.writeOK(result, 'teardown insert failed: ' + tojson(result));" +
" result = db.getSiblingDB('test').fsm_teardown.drop();" +
- " assert(result, 'teardown drop failed');" +
- " });" +
+ " assert(result, 'teardown drop failed');" + " });" +
" } catch(e) {" +
" if (e instanceof ReferenceError || e instanceof TypeError) {" +
- " throw e;" +
- " }" +
- " }" +
- "}";
+ " throw e;" + " }" + " }" + "}";
// Returns the pid of the started mongo shell so the FSM test client can be terminated
// without waiting for its execution to finish.
@@ -132,9 +106,10 @@
// Backup type (must be specified)
var allowedBackupKeys = ['fsyncLock', 'stopStart', 'rolling'];
assert(options.backup, "Backup option not supplied");
- assert.contains(options.backup, allowedBackupKeys,
- 'invalid option: ' + tojson(options.backup) +
- '; valid options are: ' + tojson(allowedBackupKeys));
+ assert.contains(options.backup,
+ allowedBackupKeys,
+ 'invalid option: ' + tojson(options.backup) + '; valid options are: ' +
+ tojson(allowedBackupKeys));
// Number of nodes in initial replica set (default 3)
var numNodes = options.nodes || 3;
@@ -152,11 +127,7 @@
var rst = new ReplSetTest({
name: replSetName,
nodes: numNodes,
- nodeOptions: {
- oplogSize: 1024,
- storageEngine: storageEngine,
- dbpath: dbpathFormat
- }
+ nodeOptions: {oplogSize: 1024, storageEngine: storageEngine, dbpath: dbpathFormat}
});
var nodes = rst.startSet();
@@ -179,12 +150,12 @@
// Perform fsync to create checkpoint. We doublecheck if the storage engine
// supports fsync here.
- var ret = primary.adminCommand({fsync : 1});
+ var ret = primary.adminCommand({fsync: 1});
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test of " + options.backup
- + " for " + storageEngine + ' as it does not support fsync');
+ jsTestLog("Skipping test of " + options.backup + " for " + storageEngine +
+ ' as it does not support fsync');
return;
}
@@ -211,8 +182,8 @@
var ret = secondary.getDB("admin").fsyncLock();
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test of " + options.backup
- + " for " + storageEngine + ' as it does not support fsync');
+ jsTestLog("Skipping test of " + options.backup + " for " + storageEngine +
+ ' as it does not support fsync');
return;
}
@@ -223,8 +194,8 @@
copiedFiles = ls(hiddenDbpath);
print("Copied files:", tojson(copiedFiles));
assert.gt(copiedFiles.length, 0, testName + ' no files copied');
- assert.commandWorked(secondary.getDB("admin").fsyncUnlock(), testName +
- ' failed to fsyncUnlock');
+ assert.commandWorked(secondary.getDB("admin").fsyncUnlock(),
+ testName + ' failed to fsyncUnlock');
} else if (options.backup == 'rolling') {
var rsyncCmd = "rsync -aKkz --del " + sourcePath + " " + destPath;
// Simulate a rolling rsync, do it 3 times before stopping process
@@ -271,7 +242,8 @@
// Note the dbhash can only run when the DB is inactive to get a result
// that can be compared, which is only in the fsyncLock/fsynUnlock case
if (dbHash !== undefined) {
- assert.eq(dbHash, rst.nodes[numNodes].getDB(crudDb).runCommand({dbhash: 1}).md5,
+ assert.eq(dbHash,
+ rst.nodes[numNodes].getDB(crudDb).runCommand({dbhash: 1}).md5,
testName + ' dbHash');
}
@@ -285,8 +257,8 @@
hidden: true
};
rsConfig.members.push(hiddenMember);
- assert.commandWorked(primary.adminCommand({replSetReconfig : rsConfig}), testName +
- ' failed to reconfigure replSet ' + tojson(rsConfig));
+ assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}),
+ testName + ' failed to reconfigure replSet ' + tojson(rsConfig));
// Wait up to 60 seconds until the new hidden node is in state RECOVERING.
rst.waitForState(rst.nodes[numNodes],
@@ -314,7 +286,7 @@
// Main
// Add storage engines which are to be skipped entirely to this array
- var noBackupTests = [ 'inMemoryExperiment' ];
+ var noBackupTests = ['inMemoryExperiment'];
// Grab the storage engine, default is wiredTiger
var storageEngine = jsTest.options().storageEngine || "wiredTiger";
@@ -338,7 +310,7 @@
}
}
- // Run the fsyncLock test. Will return before testing for any engine that doesn't
+ // Run the fsyncLock test. Will return before testing for any engine that doesn't
// support fsyncLock
runTest({
name: storageEngine + ' fsyncLock/fsyncUnlock',