diff options
Diffstat (limited to 'src/mongo/gotools/test/qa-tests/jstests/restore')
89 files changed, 3787 insertions, 0 deletions
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js b/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js new file mode 100644 index 00000000000..7bdbaceab60 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js @@ -0,0 +1,38 @@ +// this tests that we can restore a large number of collections, resolving +// an issue raised by TOOLS-1088 +// @tags: [requires_many_files, requires_large_ram] +(function() { + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var toolTest = getToolTest('15k_collections'); + var commonToolArgs = getCommonToolArguments(); + + var dbOne = toolTest.db.getSiblingDB('dbOne'); + + for (var i=0; i<=15000; i++) { + collName = "Coll" + i; + dbOne.createCollection(collName); + } + + // dump it + var dumpTarget = '15k_collections_dump'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database so it's empty + dbOne.dropDatabase(); + + // restore it + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, "restore to empty DB should have returned successfully"); + + // success + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js b/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js new file mode 100644 index 00000000000..02dea923cfa --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js @@ -0,0 +1,70 @@ +// This test requires mongo 2.4.x, and mongo 3.0.0 releases +// @tags: [requires_mongo_24, requires_mongo_30] +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // skip tests requiring wiredTiger storage engine on pre 3.0 mongod + if (TestData && TestData.storageEngine === 'wiredTiger') { + return; + } + + // Skip this test if running with SSL turned on, because the common tool args are not + // compatible with 2.4 servers. + if (TestData && TestData.useSSL) { + return; + } + // Tests using mongorestore to restore a dump from a 2.4 mongod to a 3.0 mongod. + + jsTest.log('Testing running mongorestore restoring data from a 2.4 mongod to'+ + ' a 3.0 mongod'); + + var toolTest = new ToolTest('24_to_28', {binVersion: '2.4'}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = '24_to_28_dump'; + resetDbpath(dumpTarget); + + // the db and collection we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + // insert some documents + for (var i = 0; i < 50; i++) { + testColl.insert({_id: i}); + } + // sanity check the insert worked + assert.eq(50, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database + testDB.dropDatabase(); + + // restart the mongod as a 3.0 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + delete toolTest.options.binVersion; + toolTest.startDB('foo'); + + // refresh the db and coll reference + testDB = toolTest.db.getSiblingDB('test'); + testColl = testDB.coll; + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget))); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(50, testColl.count()); + for (i = 0; i < 50; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // success + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js b/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js new file mode 100644 index 00000000000..3e03aa6bf18 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js @@ -0,0 +1,65 @@ +// This test requires mongo 2.6.x, and mongo 3.0.0 releases +// @tags: [requires_mongo_26, requires_mongo_30] +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // skip tests requiring wiredTiger storage engine on pre 3.0 mongod + if (TestData && TestData.storageEngine === 'wiredTiger') { + return; + } + + // Tests using mongorestore to restore a dump from a 2.6 mongod to a 3.0 mongod. + jsTest.log('Testing running mongorestore restoring data from a 2.6 mongod to'+ + ' a 3.0 mongod'); + + var toolTest = new ToolTest('26_to_28', {binVersion: '2.6'}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = '26_to_28_dump'; + resetDbpath(dumpTarget); + + // the db and collection we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + // insert some documents + for (var i = 0; i < 50; i++) { + testColl.insert({_id: i}); + } + // sanity check the insert worked + assert.eq(50, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database + testDB.dropDatabase(); + + // restart the mongod as a 3.0 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + delete toolTest.options.binVersion; + toolTest.startDB('foo'); + + // refresh the db and coll reference + testDB = toolTest.db.getSiblingDB('test'); + testColl = testDB.coll; + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget))); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(50, testColl.count()); + for (i = 0; i < 50; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js new file mode 100644 index 00000000000..01b2a50a24a --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js @@ -0,0 +1,66 @@ +// This test requires mongo 2.6.x, and mongo 3.0.0 releases +// @tags: [requires_mongo_26, requires_mongo_30] +(function() { + load("jstests/configs/standard_dump_targets.config.js"); + + // skip tests requiring wiredTiger storage engine on pre 2.8 mongod + if (TestData && TestData.storageEngine === 'wiredTiger') { + return; + } + + // Tests using mongorestore to restore a dump from a 2.8 mongod to a 2.6 mongod. + + jsTest.log('Testing running mongorestore restoring data from a 2.8 mongod to'+ + ' a 2.6 mongod'); + + var toolTest = new ToolTest('28_to_26'); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = '28_to_26_dump'; + resetDbpath(dumpTarget); + + // the db and collection we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + // insert some documents + for (var i = 0; i < 50; i++) { + testColl.insert({_id: i}); + } + // sanity check the insert worked + assert.eq(50, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database + testDB.dropDatabase(); + + // restart the mongod as a 2.6 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + toolTest.options = toolTest.options || {}; + toolTest.options.binVersion = '2.6'; + resetDbpath(toolTest.dbpath); + toolTest.startDB('foo'); + + // refresh the db and coll reference + testDB = toolTest.db.getSiblingDB('test'); + testColl = testDB.coll; + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget))); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(50, testColl.count()); + for (i = 0; i < 50; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // success + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js b/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js new file mode 100644 index 00000000000..c8566d7f606 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js @@ -0,0 +1,50 @@ +(function() { + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + var toolTest = getToolTest('archive_stdout'); + var baseArgs = getCommonToolArguments(); + baseArgs = baseArgs.concat('--port', toolTest.port); + + if (toolTest.useSSL) { + baseArgs = baseArgs.concat([ + '--ssl', + '--sslPEMKeyFile', 'jstests/libs/server.pem', + '--sslCAFile', 'jstests/libs/ca.pem', + '--sslAllowInvalidHostnames']); + } + if (dump_targets === 'gzip') { + baseArgs = baseArgs.concat('--gzip'); + } + var dumpArgs = ['mongodump', '--archive'].concat(baseArgs); + var restoreArgs = ['mongorestore', '--archive', '--drop'].concat(baseArgs); + + dumpArgs[0] = 'PATH=.:$PATH ' + dumpArgs[0]; + restoreArgs[0] = 'PATH=.:$PATH ' + restoreArgs[0]; + if (_isWindows()) { + dumpArgs[0] += '.exe'; + restoreArgs[0] += '.exe'; + } + + var testDb = toolTest.db; + testDb.dropDatabase(); + for (var i = 0; i < 500; i++) { + testDb.foo.insert({i: i}); + testDb.bar.insert({i: i*5}); + } + assert.eq(500, testDb.foo.count(), 'foo should have our test documents'); + assert.eq(500, testDb.bar.count(), 'bar should have our test documents'); + + var ret = runProgram('bash', '-c', dumpArgs.concat('|', restoreArgs).join(' ')); + assert.eq(0, ret, "bash execution should succeed"); + + for (i = 0; i < 500; i++) { + assert.eq(1, testDb.foo.find({i: i}).count(), 'document #'+i+' not in foo'); + assert.eq(1, testDb.bar.find({i: i*5}).count(), 'document #'+i+' not in bar'); + } + assert.eq(500, testDb.foo.count(), 'foo should have our test documents'); + assert.eq(500, testDb.bar.count(), 'bar should have our test documents'); + + testDb.dropDatabase(); + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js b/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js new file mode 100644 index 00000000000..1639dfa645f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js @@ -0,0 +1,54 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // Tests running mongorestore with bad command line options. + + jsTest.log('Testing running mongorestore with bad'+ + ' command line options'); + + var toolTest = new ToolTest('incompatible_flags'); + toolTest.startDB('foo'); + + // run restore with both --objcheck and --noobjcheck specified + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--objcheck', '--noobjcheck'] + .concat(getRestoreTarget('restore/testdata/dump_empty'))); + assert.neq(0, ret); + + // run restore with --oplogLimit with a bad timestamp + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', '--oplogLimit', + 'xxx'] + .concat(getRestoreTarget('restore/testdata/dump_with_oplog'))); + assert.neq(0, ret); + + // run restore with a negative --w value + ret = toolTest.runTool.apply(toolTest, ['restore', + '--w', '-1'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_empty'))); + assert.neq(0, ret); + + // run restore with an invalid db name + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'billy.crystal'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))); + assert.neq(0, ret); + + // run restore with an invalid collection name + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--collection', '$money'] + .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson'))); + assert.neq(0, ret); + + // run restore with an invalid verbosity value + ret = toolTest.runTool.apply(toolTest, ['restore', + '-v', 'torvalds'] + .concat(getRestoreTarget('restore/testdata/dump_empty'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js new file mode 100644 index 00000000000..e3d2f62f037 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js @@ -0,0 +1,43 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + // Tests using mongorestore to restore data from a blank collection + // file, with both a missing and blank metadata file. + + jsTest.log('Testing restoration from a blank collection file'); + + var toolTest = getToolTest('blank_collection_bson'); + var commonToolArgs = getCommonToolArguments(); + + // run the restore with the blank collection file and no + // metadata file. it should succeed, but insert nothing. + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--collection', 'blank'] + .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(0, toolTest.db.getSiblingDB('test').blank.count()); + + // run the restore with the blank collection file and a blank + // metadata file. it should succeed, but insert nothing. + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--collection', 'blank'] + .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank_metadata.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(0, toolTest.db.getSiblingDB('test').blank.count()); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js new file mode 100644 index 00000000000..1d3c85e3e0b --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js @@ -0,0 +1,29 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets === "archive") { + print('skipping test incompatable with archiving'); + return assert(true); + } + + // Tests using mongorestore to restore data from a blank db directory. + + jsTest.log('Testing restoration from a blank db directory'); + + var toolTest = getToolTest('blank_db'); + var commonToolArgs = getCommonToolArguments(); + + // run the restore with the blank db directory. it should succeed, but + // insert nothing. + var ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js b/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js new file mode 100644 index 00000000000..638a170c5d8 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js @@ -0,0 +1,136 @@ +// This test requires mongo 2.6.x releases +// @tags: [requires_mongo_26] +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // Tests using mongorestore to restore a dump containing users. If there is + // conflicting authSchemaVersion in the admin.system.version document, it + // should be ignored, and the restore should complete successfully. + + jsTest.log('Testing restoring a dump with a potentially conflicting'+ + ' authSchemaVersion in the database'); + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion, shouldSucceed) { + + jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') + + ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' + + (restoreVersion || 'latest') + ', and destDBVersion=' + + (destDBVersion || 'latest') + ', expected to pass=' + shouldSucceed); + + var toolTest = new ToolTest('conflicting_auth_schema_version', + {binVersion: sourceDBVersion, auth: ''}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'conflicting_auth_schema_version_dump'; + resetDbpath(dumpTarget); + + // the admin db, and the non-admin db we'll be using + var adminDB = toolTest.db.getSiblingDB('admin'); + var testDB = toolTest.db.getSiblingDB('test'); + + // create a user admin + adminDB.createUser({ + user: 'admin', + pwd: 'password', + roles: [ + {role: 'userAdminAnyDatabase', db: 'admin'}, + {role: 'readWriteAnyDatabase', db: 'admin'}, + {role: 'backup', db: 'admin'}, + ], + }); + var authInfo = {user: 'admin', pwd: 'password'}; + if (sourceDBVersion === "2.6") { + authInfo.mechanism = "MONGODB-CR"; + } + assert.eq(1, adminDB.auth(authInfo)); + + // add some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + + // sanity check the data was inserted + assert.eq(10, testDB.data.count()); + + // dump all the data + args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''), + '--username', 'admin', + '--password', 'password', '--port', toolTest.port] + .concat(getDumpTarget(dumpTarget)); + if (sourceDBVersion === "2.6") { + args.push("--authenticationMechanism=MONGODB-CR"); + } + var ret = runMongoProgram.apply(this, args); + assert.eq(0, ret); + + // restart the mongod, with a clean db path + stopMongod(toolTest.port); + resetDbpath(toolTest.dbpath); + toolTest.m = null; + toolTest.db = null; + toolTest.options.binVersion = destDBVersion; + toolTest.startDB('foo'); + + // refresh the db references + adminDB = toolTest.db.getSiblingDB('admin'); + testDB = toolTest.db.getSiblingDB('test'); + + // create a new user admin + adminDB.createUser({ + user: 'admin28', + pwd: 'password', + roles: [ + {role: 'userAdminAnyDatabase', db: 'admin'}, + {role: 'readWriteAnyDatabase', db: 'admin'}, + {role: 'restore', db: 'admin'}, + ], + }); + + var authInfoDest = {user: 'admin28', pwd: 'password'}; + if (destDBVersion === "2.6") { + authInfoDest.mechanism = "MONGODB-CR"; + } + assert.eq(1, adminDB.auth(authInfoDest)); + + // do a full restore + args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''), + '--username', 'admin28', + '--password', 'password', + '--port', toolTest.port, + '--stopOnError'] + .concat(getRestoreTarget(dumpTarget)); + + ret = runMongoProgram.apply(this, args); + + if (shouldSucceed) { + assert.eq(0, ret); + // make sure the data and users are all there + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + var users = adminDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'admin' || users[1].user === 'admin'); + assert(users[0].user === 'admin28' || users[1].user === 'admin28'); + } else { + assert.neq(0, ret); + } + // success + toolTest.stop(); + }; + + // 'undefined' triggers latest + runTest('2.6', '2.6', undefined, '2.6', true); + runTest('2.6', '2.6', undefined, undefined, true); + runTest('2.6', undefined, undefined, undefined, true); + runTest(undefined, undefined, undefined, '2.6', false); + runTest(undefined, undefined, undefined, undefined, true); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js b/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js new file mode 100644 index 00000000000..444b8d8115a --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js @@ -0,0 +1,89 @@ +(function() { + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests using mongorestore to restore data to a different collection + // then it was dumped from. + + jsTest.log('Testing restoration to a different collection'); + + if (dump_targets === 'archive') { + jsTest.log('Skipping test unsupported against archive targets'); + return assert(true); + } + + var toolTest = getToolTest('different_collection'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'different_collection_dump'; + resetDbpath(dumpTarget); + + // the db we will dump from + var sourceDB = toolTest.db.getSiblingDB('source'); + // the collection we will dump from + var sourceCollName = 'sourceColl'; + + // insert a bunch of data + for (var i = 0; i < 500; i++) { + sourceDB[sourceCollName].insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(500, sourceDB[sourceCollName].count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // restore just the collection into a different collection + // in the same database + var destCollName = 'destColl'; + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'source', + '--collection', destCollName] + .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(500, sourceDB[destCollName].count()); + for (i = 0; i < 500; i++) { + assert.eq(1, sourceDB[destCollName].count({_id: i})); + } + + // restore just the collection into a similarly-named collection + // in a different database + var destDB = toolTest.db.getSiblingDB('dest'); + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dest', + '--collection', sourceCollName] + .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(500, destDB[sourceCollName].count()); + for (i = 0; i < 500; i++) { + assert.eq(1, destDB[sourceCollName].count({_id: i})); + } + + // restore just the collection into a different collection + // in a different database + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dest', + '--collection', destCollName] + .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(500, destDB[destCollName].count()); + for (i = 0; i < 500; i++) { + assert.eq(1, destDB[destCollName].count({_id: i})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js new file mode 100644 index 00000000000..da55fed41c7 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js @@ -0,0 +1,84 @@ +(function() { + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests using mongorestore to restore data to a different db than + // it was dumped from. + + jsTest.log('Testing restoration to a different db'); + + if (dump_targets === 'archive') { + jsTest.log('Skipping test unsupported against archive targets'); + return assert(true); + } + + var toolTest = getToolTest('different_db'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'different_db_dump'; + resetDbpath(dumpTarget); + + // the db we will dump from + var sourceDB = toolTest.db.getSiblingDB('source'); + // the db we will restore to + var destDB = toolTest.db.getSiblingDB('dest'); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // we'll use two collections + var collNames = ['coll1', 'coll2']; + + // insert a bunch of data + collNames.forEach(function(collName) { + for (var i = 0; i < 500; i++) { + sourceDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(500, sourceDB[collName].count()); + }); + + // dump the data + ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // restore the data to a different db + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dest'] + .concat(getRestoreTarget(dumpTarget+'/source')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored + collNames.forEach(function(collName) { + assert.eq(500, destDB[collName].count()); + for (var i = 0; i < 500; i++) { + assert.eq(1, destDB[collName].count({_id: i+'_'+collName})); + } + }); + + // restore the data to another different db + ret = toolTest.runTool.apply(toolTest, ['restore', + '--nsFrom', '$db$.$collection$', + '--nsTo', 'otherdest.$collection$_$db$'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + destDB = toolTest.db.getSiblingDB('otherdest'); + collNames.forEach(function(collName) { + assert.eq(500, destDB[collName+'_source'].count()); + for (var i = 0; i < 500; i++) { + assert.eq(1, destDB[collName+'_source'].count({_id: i+'_'+collName})); + } + }); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js new file mode 100644 index 00000000000..12923e868aa --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js @@ -0,0 +1,107 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // Tests running mongorestore with --drop and --restoreDbUsersAndRoles, + // in addition to --auth, and makes sure the authenticated user does not + // get dropped before it can complete the restore job. + + jsTest.log('Testing dropping the authenticated user with mongorestore'); + + var toolTest = new ToolTest('drop_authenticated_user', {auth: ''}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'drop_authenticated_user_dump'; + resetDbpath(dumpTarget); + + // we'll use the admin db so that the user we are restoring as + // is part of the db we are restoring + var adminDB = toolTest.db.getSiblingDB('admin'); + + // create the users we'll need for the dump + adminDB.createUser({ + user: 'admin', + pwd: 'password', + roles: [ + {role: 'userAdmin', db: 'admin'}, + {role: 'readWrite', db: 'admin'}, + ], + }); + adminDB.auth('admin', 'password'); + + adminDB.createUser({ + user: 'backup', + pwd: 'password', + roles: [{role: 'backup', db: 'admin'}], + }); + + // create a role + adminDB.createRole({ + role: 'extraRole', + privileges: [{ + resource: {db: 'admin', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + adminDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, adminDB.data.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump', + '--username', 'backup', + '--password', 'password'] + .concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop all the data, but not the users or roles + adminDB.data.remove({}); + // sanity check the removal worked + assert.eq(0, adminDB.data.count()); + + // now create the restore user, so that we can use it for the restore but it is + // not part of the dump + adminDB.createUser({ + user: 'restore', + pwd: 'password', + roles: [{role: 'restore', db: 'admin'}], + }); + + // insert some data to be removed when --drop is run + for (i = 10; i < 20; i++) { + adminDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, adminDB.data.count()); + + // restore the data, specifying --drop + ret = toolTest.runTool.apply(toolTest, ['restore', + '--drop', + '--username', 'restore', + '--password', 'password'] + .concat(getRestoreTarget(dumpTarget))); + assert.eq(0, ret); + + // make sure the existing data was removed, and replaced with the dumped data + assert.eq(10, adminDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, adminDB.data.count({_id: i})); + } + + // make sure the correct roles and users exist - that the restore user was dropped + var users = adminDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'backup' || users[1].user === 'backup'); + assert(users[0].user === 'admin' || users[1].user === 'admin'); + assert.eq(1, adminDB.getRoles().length); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js new file mode 100644 index 00000000000..fded2c8706e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js @@ -0,0 +1,56 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that running mongorestore with --drop on a database with + // nothing to drop does not error out, and completes the + // restore successfully. + + jsTest.log('Testing restoration with --drop on a nonexistent db'); + + var toolTest = getToolTest('drop_nonexistent_db'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'drop_nonexistent_db_dump'; + resetDbpath(dumpTarget); + + // the db we will use + var testDB = toolTest.db.getSiblingDB('test'); + + // insert a bunch of data + for (var i = 0; i < 500; i++) { + testDB.coll.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(500, testDB.coll.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database we are using + testDB.dropDatabase(); + // sanity check the drop worked + assert.eq(0, testDB.coll.count()); + + // restore the data with --drop + ret = toolTest.runTool.apply(toolTest, ['restore', '--drop'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(500, testDB.coll.count()); + for (i = 0; i < 500; i++) { + assert.eq(1, testDB.coll.count({_id: i})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js new file mode 100644 index 00000000000..4f1c16fee3c --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js @@ -0,0 +1,86 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that running mongorestore with --drop and --collection leaves data + // in other collections untouched (that --drop only applies to the + // specified collection). + + jsTest.log('Testing restoration with --drop and --collection, with data in'+ + ' other collections'); + + var toolTest = getToolTest('drop_one_collection'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'drop_one_collection_dump'; + resetDbpath(dumpTarget); + + // the db we will take the dump from + var sourceDB = toolTest.db.getSiblingDB('source'); + + // dump from two different collections, even though we'll + // only be restoring one. + var collNames = ['coll1', 'coll2']; + collNames.forEach(function(collName) { + for (var i = 0; i < 500; i++) { + sourceDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(500, sourceDB[collName].count()); + }); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop and replace the data + collNames.forEach(function(collName) { + sourceDB[collName].drop(); + // sanity check the drop worked + assert.eq(0, sourceDB[collName].count()); + + // insert a disjoint set of data from the dump + for (var i = 500; i < 600; i++) { + sourceDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(100, sourceDB[collName].count()); + }); + + // insert data into the same collections in a different db + var otherDB = toolTest.db.getSiblingDB('other'); + collNames.forEach(function(collName) { + for (var i = 500; i < 600; i++) { + otherDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(100, otherDB[collName].count()); + }); + + // restore with --drop and --collection + ret = toolTest.runTool.apply(toolTest, ['restore', '--drop', + '--db', 'source', + '--collection', 'coll1'] + .concat(getRestoreTarget(dumpTarget+'/source/coll1.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure that the dumped data replaced the old data in only + // the specified collection, and all other data was left untouched + assert.eq(500, sourceDB.coll1.count()); + for (var i = 0; i < 500; i++) { + assert.eq(1, sourceDB.coll1.count({_id: i+'_coll1'})); + } + assert.eq(100, sourceDB.coll2.count()); + assert.eq(100, otherDB.coll1.count()); + assert.eq(100, otherDB.coll2.count()); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js new file mode 100644 index 00000000000..9c43d105d88 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js @@ -0,0 +1,73 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that running mongorestore with --drop drops existing data + // before restoring. + + jsTest.log('Testing restoration with --drop on existing data'); + + var toolTest = getToolTest('drop_with_data'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'drop_with_data_dump'; + resetDbpath(dumpTarget); + + // the db we will use + var testDB = toolTest.db.getSiblingDB('test'); + + // we'll use two collections, to make sure they both + // get dropped appropriately + var collNames = ['coll1', 'coll2']; + + // insert a bunch of data to be dumped + collNames.forEach(function(collName) { + for (var i = 0; i < 500; i++) { + testDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(500, testDB[collName].count()); + }); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop all the data, and replace it with different data + collNames.forEach(function(collName) { + testDB[collName].drop(); + // sanity check the drop worked + assert.eq(0, testDB[collName].count()); + + for (var i = 500; i < 600; i++) { + testDB[collName].insert({_id: i+'_'+collName}); + } + // sanity check the insertion worked + assert.eq(100, testDB[collName].count()); + }); + + // restore with --drop. the current data in all collections should + // be removed and replaced with the dumped data + ret = toolTest.runTool.apply(toolTest, ['restore', '--drop'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the dumped data was restored, and the old data + // was dropped + collNames.forEach(function(collName) { + assert.eq(500, testDB[collName].count()); + for (var i = 0; i < 500; i++) { + assert.eq(1, testDB[collName].count({_id: i+'_'+collName})); + } + }); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js b/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js new file mode 100644 index 00000000000..3ae4fb0f9ac --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js @@ -0,0 +1,73 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests using mongorestore to restore a mix of existing and + // non-existing documents to a collection, so we can make sure + // all new documents are actually added. + + jsTest.log('Testing restoration of a dump on top of existing documents'); + + var toolTest = getToolTest('dupe_restore'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'dupe_restore_dump'; + resetDbpath(dumpTarget); + + // we'll insert data into three collections spread across two dbs + var dbOne = toolTest.db.getSiblingDB('dbOne'); + var testColl = dbOne.duplicates; + + // insert a bunch of data + for (var i = 0; i < 50; i++) { + testColl.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(50, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // remove a few random documents + var removeDocs = function() { + testColl.remove({_id: 0}); + testColl.remove({_id: 5}); + testColl.remove({_id: 6}); + testColl.remove({_id: 9}); + testColl.remove({_id: 12}); + testColl.remove({_id: 27}); + testColl.remove({_id: 40}); + testColl.remove({_id: 46}); + testColl.remove({_id: 47}); + testColl.remove({_id: 49}); + assert.eq(40, testColl.count()); + }; + removeDocs(); + + // restore the db with default settings + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the restore worked, and all of the removed keys were restored + assert.eq(50, testColl.count(), "some documents were not restored with default settings"); + + // now check an array of batch sizes + for (i = 1; i < 100; i++) { + removeDocs(); + ret = toolTest.runTool.apply(toolTest, ['restore', "--batchSize", String(i)] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(50, testColl.count(), "some documents were not restored for batchSize="+i); + } + + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js b/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js new file mode 100644 index 00000000000..24a3032aab5 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js @@ -0,0 +1,33 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets === "archive") { + print('skipping test incompatable with archiving'); + return assert(true); + } + + // Tests running mongorestore with --restoreDbUsersAndRoles, with + // no users or roles in the dump. + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles with'+ + ' no users or roles in the dump'); + + var toolTest = getToolTest('empty_users_and_roles'); + var commonToolArgs = getCommonToolArguments(); + + // run the restore with no users or roles. it should succeed, but create no + // users or roles + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js new file mode 100644 index 00000000000..59d9997262e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js @@ -0,0 +1,42 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + // Tests that using mongorestore on a collection with extended json types + // in the metadata (both indexes and options) is handled gracefully. + + jsTest.log('Testing that restoration of extended JSON collection options works.'); + + var toolTest = getToolTest('extended_json_metadata_restore'); + var commonToolArgs = getCommonToolArguments(); + var testDB = toolTest.db.getSiblingDB('test'); + assert.eq(testDB.changelog.exists(), null, "collection already exists in db"); + + // run a restore against the mongos + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_extended_json_options')) + .concat(commonToolArgs)); + assert.eq(0, ret, "the restore does not crash"); + + var collectionOptionsFromDB = testDB.changelog.exists(); + printjson(collectionOptionsFromDB); + assert.eq(collectionOptionsFromDB.options.capped, true, "capped option should be restored"); + // Mongodb might fudge the collection max values for different storage engines, + // so we need some wiggle room. + var delta = 1000; + var size = 10 * 1000 * 1000; + assert.lte(collectionOptionsFromDB.options.size, size+delta, "size should be ~10000000"); + assert.gte(collectionOptionsFromDB.options.size, size-delta, "size should be ~10000000"); + + var indexes = testDB.changelog.getIndexes(); + printjson(indexes); + assert.eq(indexes[0].key._id, 1, "index is read properly"); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js b/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js new file mode 100644 index 00000000000..bb50f70b848 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js @@ -0,0 +1,96 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that mongorestore handles restoring different types of + // indexes correctly. + + jsTest.log('Testing restoration of different types of indexes'); + + var toolTest = getToolTest('indexes'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'indexes_dump'; + resetDbpath(dumpTarget); + + // the db and collection we will use + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + // create a bunch of indexes of different types + testColl.ensureIndex({a: 1}); + testColl.ensureIndex({b: 1}, {sparse: true, unique: true}); + testColl.ensureIndex({a: 1, b: -1}); + testColl.ensureIndex({b: NumberLong("1"), a: NumberLong("1")}); + testColl.ensureIndex({listField: 1}); + testColl.ensureIndex({textField: 'text'}, {language: 'spanish'}); + testColl.ensureIndex({geoField: '2dsphere'}); + + // store the getIndexes() output, to compare with the output + // after dumping and restoring + var indexesPre = testColl.getIndexes(); + + // insert some data + for (var i = 0; i < 5; i++) { + testColl.insert({a: i, b: i+1, listField: [i, i+1]}); + testColl.insert({textField: 'hola '+i}); + testColl.insert({geoField: {type: 'Point', coordinates: [i, i+1]}}); + } + // sanity check the data was inserted + assert.eq(15, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the collection + testColl.drop(); + // sanity check that the drop worked + assert.eq(0, testColl.count()); + assert.eq(0, testColl.getIndexes().length); + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(15, testColl.count()); + + // make sure the indexes were restored correctly + var indexesPost = testColl.getIndexes(); + assert.eq(indexesPre.length, indexesPost.length); + + if (dump_targets === "archive") { + jsTest.log('skipping bson file restore test while running with archiving'); + } else { + // drop the collection again + testColl.drop(); + // sanity check that the drop worked + assert.eq(0, testColl.count()); + + assert.eq(0, testColl.getIndexes().length); + + // restore the data, but this time mentioning the bson file specifically + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget+"/test/coll.bson")) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(15, testColl.count()); + + // make sure the indexes were restored correctly + indexesPost = testColl.getIndexes(); + assert.eq(indexesPre.length, indexesPost.length); + } + + // success + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js new file mode 100644 index 00000000000..89ecaca7ddc --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js @@ -0,0 +1,32 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // Tests running mongorestore with invalid specified dumps (directories when + // files are expected, and visa versa). + + jsTest.log('Testing running mongorestore with a invalid dump targets'); + + var toolTest = new ToolTest('invalid_dump_target'); + toolTest.startDB('foo'); + + // run restore with a file, not a directory, specified as the dump location + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README'))); + assert.neq(0, ret); + + // run restore with --db specified and a file, not a directory, as the db dump + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README'))); + assert.neq(0, ret); + + // run restore with --collection specified and a directory, not a file, + // as the dump file + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--collection', 'blank'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js new file mode 100644 index 00000000000..5630d8648e6 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js @@ -0,0 +1,22 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // Tests using mongorestore to restore data from a collection whose .metadata.json + // file contains invalid indexes. + + jsTest.log('Testing restoration from a metadata file with invalid indexes'); + + var toolTest = new ToolTest('invalid_metadata'); + toolTest.startDB('foo'); + + // run restore, targeting a collection whose metadata file contains an invalid index + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dbOne', + '--collection', 'invalid_metadata'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson'))); + assert.neq(0, ret); + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js b/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js new file mode 100644 index 00000000000..8de84517b7c --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js @@ -0,0 +1,88 @@ +(function() { + + load('jstests/common/check_version.js'); + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that running mongorestore with --keepIndexVersion does not + // update the index version, and that running it without + // --keepIndexVersion does. + + jsTest.log('Testing mongorestore with --keepIndexVersion'); + + var toolTest = getToolTest('keep_index_version'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'keep_index_version_dump'; + resetDbpath(dumpTarget); + + // the db and collection we will use + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + if (isAtLeastVersion(testDB.version(), '3.1.0')) { + jsTest.log("skipping test on "+testDB.version()); + return; + } + if (TestData && TestData.storageEngine === 'wiredTiger') { + jsTest.log("skipping test on "+testDB.version()+" when storage engine is wiredTiger"); + return; + } + + // create a version 0 index on the collection + testColl.ensureIndex({num: 1}, {v: 0}); + + // insert some data + for (var i = 0; i < 10; i++) { + testColl.insert({num: i}); + } + // sanity check the insert worked + assert.eq(10, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the db + testDB.dropDatabase(); + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(10, testColl.count()); + + // make sure the index version was updated + var indexes = testColl.getIndexes(); + assert.eq(2, indexes.length); + assert.eq(1, indexes[1].v); + + // drop the db + testDB.dropDatabase(); + + // restore the data with --keepIndexVersion specified + ret = toolTest.runTool.apply(toolTest, ['restore', '--keepIndexVersion'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + assert.eq(10, testColl.count()); + + // make sure the index version was not updated + indexes = testColl.getIndexes(); + assert.eq(2, indexes.length); + assert.eq(0, indexes[1].v); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js b/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js new file mode 100644 index 00000000000..2eec217b455 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js @@ -0,0 +1,52 @@ +(function() { + + // this test tests that the bulk api doesn't create BSON documents greater then the + // 16MB limit, as was discovered in TOOLS-939. + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var toolTest = getToolTest('large_bulk'); + var commonToolArgs = getCommonToolArguments(); + + var dbOne = toolTest.db.getSiblingDB('dbOne'); + // create a test collection + + var oneK=""; + var oneM=""; + var i; + for (i=0; i<=1024; i++) { + oneK+="X"; + } + for (i=0; i<=1024; i++) { + oneM+=oneK; + } + + for (i=0; i<=32; i++) { + dbOne.test.insert({data: oneM}); + } + + // dump it + var dumpTarget = 'large_bulk_dump'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database so it's empty + dbOne.dropDatabase(); + + // restore it + // 32 records are well under the 1k batch size + // so this should test wether the physcial size limit is respected + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, "restore to empty DB should have returned successfully"); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js new file mode 100644 index 00000000000..41844f58407 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js @@ -0,0 +1,20 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests using mongorestore to restore data from a malformed bson file. + + jsTest.log('Testing restoration from a malformed bson file'); + + var toolTest = new ToolTest('malformed_bson'); + toolTest.startDB('foo'); + + // run restore, targeting a malformed bson file + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dbOne', + '--collection', 'malformed_coll'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson'))); + assert.neq(0, ret); + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js new file mode 100644 index 00000000000..f724a15c620 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js @@ -0,0 +1,22 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests using mongorestore to restore data from a collection with + // a malformed metadata file. + + jsTest.log('Testing restoration from a malformed metadata file'); + + var toolTest = new ToolTest('malformed_metadata'); + toolTest.startDB('foo'); + + // run restore, targeting a collection with a malformed + // metadata.json file. + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dbOne', + '--collection', 'malformed_metadata'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson'))); + assert.neq(0, ret); + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js b/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js new file mode 100644 index 00000000000..0d8ff685105 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js @@ -0,0 +1,32 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests running mongorestore with a missing dump files and directories. + + jsTest.log('Testing running mongorestore with missing dump files and directories'); + + var toolTest = new ToolTest('missing_dump'); + toolTest.startDB('foo'); + + // run restore with a missing dump directory + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget('xxxxxxxx'))); + assert.neq(0, ret); + + // run restore with --db and a missing dump directory + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test'] + .concat(getRestoreTarget('xxxxxxxx'))); + assert.neq(0, ret); + + // specify --collection with a missing file + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--collection', 'data'] + .concat(getRestoreTarget('jstests/restore/testdata/blankdb/xxxxxxxx.bson'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js b/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js new file mode 100644 index 00000000000..3fbaa7c4670 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js @@ -0,0 +1,72 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests using mongorestore to restore data to multiple dbs. + + jsTest.log('Testing restoration to multiple dbs'); + + var toolTest = getToolTest('multiple_dbs'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'multiple_dbs_dump'; + resetDbpath(dumpTarget); + + // the dbs we will be using + var dbOne = toolTest.db.getSiblingDB('dbOne'); + var dbTwo = toolTest.db.getSiblingDB('dbTwo'); + + // we'll use two collections in each db, with one of + // the collection names common across the dbs + var oneOnlyCollName = 'dbOneColl'; + var twoOnlyCollName = 'dbTwoColl'; + var sharedCollName = 'bothColl'; + + // insert a bunch of data + for (var i = 0; i < 50; i++) { + dbOne[oneOnlyCollName].insert({_id: i+'_'+oneOnlyCollName}); + dbTwo[twoOnlyCollName].insert({_id: i+'_'+twoOnlyCollName}); + dbOne[sharedCollName].insert({_id: i+'_dbOne_'+sharedCollName}); + dbTwo[sharedCollName].insert({_id: i+'_dbTwo_'+sharedCollName}); + } + // sanity check the insertion worked + assert.eq(50, dbOne[oneOnlyCollName].count()); + assert.eq(50, dbTwo[twoOnlyCollName].count()); + assert.eq(50, dbOne[sharedCollName].count()); + assert.eq(50, dbTwo[sharedCollName].count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the databases + dbOne.dropDatabase(); + dbTwo.dropDatabase(); + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored properly + assert.eq(50, dbOne[oneOnlyCollName].count()); + assert.eq(50, dbTwo[twoOnlyCollName].count()); + assert.eq(50, dbOne[sharedCollName].count()); + assert.eq(50, dbTwo[sharedCollName].count()); + for (i = 0; i < 50; i++) { + assert.eq(1, dbOne[oneOnlyCollName].count({_id: i+'_'+oneOnlyCollName})); + assert.eq(1, dbTwo[twoOnlyCollName].count({_id: i+'_'+twoOnlyCollName})); + assert.eq(1, dbOne[sharedCollName].count({_id: i+'_dbOne_'+sharedCollName})); + assert.eq(1, dbTwo[sharedCollName].count({_id: i+'_dbTwo_'+sharedCollName})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js b/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js new file mode 100644 index 00000000000..cdad2a667d2 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js @@ -0,0 +1,152 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + jsTest.log('Testing namespaces escludes, includes, and mappings during restore'); + + var toolTest = getToolTest('namespaces'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'namespaces_dump'; + + // the db we will dump from + var source1DB = toolTest.db.getSiblingDB('source1'); + var source2DB = toolTest.db.getSiblingDB('source2'); + var source3DB = toolTest.db.getSiblingDB('source3'); + // the db we will restore to + var destDB = toolTest.db.getSiblingDB('dest'); + + function performRestoreWithArgs(...args) { + return toolTest.runTool.apply(toolTest, ['restore'] + .concat(args) + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + } + + function addTestDataTo(db, colls) { + colls.forEach(function(coll) { + var data = []; + for (var i = 0; i < 500; i++) { + data.push({_id: i+'_'+db.getName()+'.'+coll}); + } + db[coll].insertMany(data); + // sanity check the insertion worked + assert.eq(500, db[coll].count()); + // Add an index + var index = {}; + index[db.getName()+'.'+coll] = 1; + db[coll].createIndex(index); + }); + } + + function verifyDataIn(collection, sourceNS) { + if (sourceNS === null) { + assert.eq(0, collection.count()); + return; + } + assert.eq(500, collection.count()); + for (var i = 0; i < 500; i++) { + assert.eq(1, collection.count({_id: i+'_'+sourceNS})); + } + assert.eq(1, collection.getIndexes()[1].key[sourceNS]); + } + + addTestDataTo(source1DB, ['coll1', 'coll2', 'coll3']); + verifyDataIn(source1DB.coll1, 'source1.coll1'); + verifyDataIn(source1DB.coll2, 'source1.coll2'); + verifyDataIn(source1DB.coll3, 'source1.coll3'); + + addTestDataTo(source2DB, ['coll1', 'coll2', 'coll3']); + verifyDataIn(source2DB.coll1, 'source2.coll1'); + verifyDataIn(source2DB.coll2, 'source2.coll2'); + verifyDataIn(source2DB.coll3, 'source2.coll3'); + + addTestDataTo(source3DB, ['coll3', 'coll4']); + verifyDataIn(source3DB.coll3, 'source3.coll3'); + verifyDataIn(source3DB.coll4, 'source3.coll4'); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // Get rid of the source databases + source1DB.dropDatabase(); + source2DB.dropDatabase(); + source3DB.dropDatabase(); + + // Exclude *.coll1 + ret = performRestoreWithArgs('--nsExclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$'); + assert.eq(0, ret); + + verifyDataIn(destDB.coll_1_1, null); + verifyDataIn(destDB.coll_1_2, 'source1.coll2'); + verifyDataIn(destDB.coll_1_3, 'source1.coll3'); + verifyDataIn(destDB.coll_2_1, null); + verifyDataIn(destDB.coll_2_2, 'source2.coll2'); + verifyDataIn(destDB.coll_2_3, 'source2.coll3'); + verifyDataIn(destDB.coll_3_1, null); + verifyDataIn(destDB.coll_3_2, null); + verifyDataIn(destDB.coll_3_3, 'source3.coll3'); + verifyDataIn(destDB.coll_3_4, 'source3.coll4'); + + destDB.dropDatabase(); + + // Inclode only *.coll1 + ret = performRestoreWithArgs('--nsInclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$'); + assert.eq(0, ret); + + verifyDataIn(destDB.coll_1_1, 'source1.coll1'); + verifyDataIn(destDB.coll_1_2, null); + verifyDataIn(destDB.coll_1_3, null); + verifyDataIn(destDB.coll_2_1, 'source2.coll1'); + verifyDataIn(destDB.coll_2_2, null); + verifyDataIn(destDB.coll_2_3, null); + verifyDataIn(destDB.coll_3_1, null); + verifyDataIn(destDB.coll_3_2, null); + verifyDataIn(destDB.coll_3_3, null); + verifyDataIn(destDB.coll_3_4, null); + + destDB.dropDatabase(); + + // Exclude collections beginning with 'coll' (which is all of them) + ret = performRestoreWithArgs('--excludeCollectionsWithPrefix', 'coll', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$'); + assert.eq(0, ret); + + verifyDataIn(destDB.coll_1_1, null); + verifyDataIn(destDB.coll_1_2, null); + verifyDataIn(destDB.coll_1_3, null); + verifyDataIn(destDB.coll_2_1, null); + verifyDataIn(destDB.coll_2_2, null); + verifyDataIn(destDB.coll_2_3, null); + verifyDataIn(destDB.coll_3_1, null); + verifyDataIn(destDB.coll_3_2, null); + verifyDataIn(destDB.coll_3_3, null); + verifyDataIn(destDB.coll_3_4, null); + + destDB.dropDatabase(); + + // Swap source1 and source2 databases + ret = performRestoreWithArgs('--nsFrom', 'source1.*', '--nsTo', 'source2.*', '--nsFrom', 'source2.*', '--nsTo', 'source1.*'); + assert.eq(0, ret); + + verifyDataIn(source1DB.coll1, 'source2.coll1'); + verifyDataIn(source1DB.coll2, 'source2.coll2'); + verifyDataIn(source1DB.coll3, 'source2.coll3'); + verifyDataIn(source2DB.coll1, 'source1.coll1'); + verifyDataIn(source2DB.coll2, 'source1.coll2'); + verifyDataIn(source2DB.coll3, 'source1.coll3'); + verifyDataIn(source3DB.coll3, 'source3.coll3'); + verifyDataIn(source3DB.coll4, 'source3.coll4'); + + source1DB.dropDatabase(); + source2DB.dropDatabase(); + source3DB.dropDatabase(); + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js new file mode 100644 index 00000000000..8e2c5a26155 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js @@ -0,0 +1,75 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests that running mongorestore with --noIndexRestore does not + // restore indexes. + + jsTest.log('Testing restoration with --noIndexRestore'); + + var toolTest = getToolTest('no_index_restore'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'no_index_restore_dump'; + resetDbpath(dumpTarget); + + // the db we will use + var testDB = toolTest.db.getSiblingDB('test'); + + // we'll use two collections, one with no indexes, the other + // with indexes + var collNames = ['coll1', 'coll2']; + + // insert some data to be dumped + collNames.forEach(function(collName) { + for (var i = 0; i < 10; i++) { + testDB[collName].insert({_id: i, num: i+1, s: ''+i}); + } + // sanity check the insertion worked + assert.eq(10, testDB[collName].count()); + }); + + // create some indexes for the second collection + testDB.coll2.ensureIndex({num: 1}); + testDB.coll2.ensureIndex({num: 1, s: -1}); + // sanity check the indexes were created + assert.eq(3, testDB.coll2.getIndexes().length); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the collections + collNames.forEach(function(collName) { + testDB[collName].drop(); + // sanity check the drop worked + assert.eq(0, testDB[collName].count()); + assert.eq(0, testDB[collName].getIndexes().length); + }); + + // restore the data, with --noIndexRestore + ret = toolTest.runTool.apply(toolTest, ['restore', '--noIndexRestore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored fully, and only the _id + // indexes were restored + collNames.forEach(function(collName) { + assert.eq(10, testDB[collName].count()); + for (var i = 0; i < 10; i++) { + assert.eq(1, testDB[collName].count({_id: i})); + } + + assert.eq(1, testDB[collName].getIndexes().length); + }); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js new file mode 100644 index 00000000000..c6549839976 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js @@ -0,0 +1,125 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Using the collection options command is the way to get full + // collection options as of 2.8, so we use this helper to + // pull the options from a listCollections cursor. + var extractCollectionOptions = function(db, name) { + var res = db.runCommand("listCollections"); + for (var i = 0; i < res.cursor.firstBatch.length; i++) { + if (res.cursor.firstBatch[i].name === name) { + return res.cursor.firstBatch[i].options; + } + } + return {}; + }; + + // Tests that running mongorestore with --noOptionsRestore does + // not restore collection options, and that running it without + // --noOptionsRestore does restore collection options. + jsTest.log('Testing restoration with --noOptionsRestore'); + + var toolTest = getToolTest('no_options_restore'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'no_options_restore_dump'; + resetDbpath(dumpTarget); + + // the db we'll use + var testDB = toolTest.db.getSiblingDB('test'); + + // we'll use three different collections - the first will have + // options set, the second won't, the third will be capped. + // TODO: why aren't these being used? + // var collWithOptions = testDB.withOptions; + // var collWithoutOptions = testDB.withoutOptions; + // var collCapped = testDB.capped; + + // create the noPadding collection + var noPaddingOptions = {noPadding: true}; + testDB.createCollection('withOptions', noPaddingOptions); + + // create the capped collection + var cappedOptions = {capped: true, size: 4096, autoIndexId: true}; + testDB.createCollection('capped', cappedOptions); + + // insert some data into all three collections + ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) { + for (var i = 0; i < 50; i++) { + testDB[collName].insert({_id: i}); + } + // sanity check the insertions worked + assert.eq(50, testDB[collName].count()); + }); + + // add options to the appropriate collection + cmdRet = testDB.runCommand({'collMod': 'withOptions', usePowerOf2Sizes: true}); + assert.eq(1, cmdRet.ok); + + // store the default options, because they change based on storage engine + var baseCappedOptionsFromDB = extractCollectionOptions(testDB, 'capped'); + var baseWithOptionsFromDB = extractCollectionOptions(testDB, 'withOptions'); + var baseWithoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions'); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the data + testDB.dropDatabase(); + + // restore the data + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) { + assert.eq(50, testDB[collName].count()); + }); + + // make sure the options were restored correctly + var cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped'); + assert.eq(baseCappedOptionsFromDB, cappedOptionsFromDB); + var withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions'); + assert.eq(baseWithOptionsFromDB, withOptionsFromDB); + var withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions'); + assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB); + + // drop the data + testDB.dropDatabase(); + + // restore the data, without the options + ret = toolTest.runTool.apply(toolTest, ['restore', '--noOptionsRestore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored correctly + ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) { + assert.eq(50, testDB[collName].count()); + }); + + // make sure the options were not restored + cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped'); + assert.eq(baseWithoutOptionsFromDB, cappedOptionsFromDB); + withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions'); + assert.eq(baseWithoutOptionsFromDB, withOptionsFromDB); + withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions'); + assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB); + + // additional check that the capped collection is no longer capped + var cappedStats = testDB.capped.stats(); + assert(!cappedStats.capped); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js b/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js new file mode 100644 index 00000000000..1ab3617ff14 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js @@ -0,0 +1,46 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests running mongorestore and restoring users with a nonempty temp + // users collection. + + jsTest.log('Testing restoring users with a nonempty temp users collection.'+ + ' The restore should fail'); + + var toolTest = new ToolTest('nonempty_temp_users'); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'nonempty_temp_users_dump'; + resetDbpath(dumpTarget); + + // the admin db + var adminDB = toolTest.db.getSiblingDB('admin'); + + // create a user on the admin database + adminDB.createUser({ + user: 'adminUser', + pwd: 'password', + roles: [{role: 'read', db: 'admin'}], + }); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget))); + assert.neq(1, ret); + + // clear out the user + adminDB.dropAllUsers(); + + // insert into the tempusers collection + adminDB.tempusers.insert({_id: 'corruption'}); + + // restore the data. It should succeed + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget))); + assert.neq(1, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js b/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js new file mode 100644 index 00000000000..8242ff15899 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js @@ -0,0 +1,56 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var toolTest = getToolTest('norestore_profile'); + var commonToolArgs = getCommonToolArguments(); + + var dbOne = toolTest.db.getSiblingDB('dbOne'); + // turn on the profiler + dbOne.setProfilingLevel(2); + + // create some test data + for (var i=0; i<=100; i++) { + dbOne.test.insert({_id: i, x: i*i}); + } + // run some queries to end up in the profile collection + dbOne.test.find({_id: 3}); + dbOne.test.find({_id: 30}); + dbOne.test.find({_id: 50}); + + assert.gt(dbOne.system.profile.count(), 0, "profiler still empty after running test setup"); + + // dump it + var dumpTarget = 'norestore_profile'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // turn off profiling and remove the profiler collection + dbOne.setProfilingLevel(0); + dbOne.system.profile.drop(); + assert.eq(dbOne.system.profile.count(), 0); + + // drop the database so it's empty + dbOne.dropDatabase(); + + // restore it, this should restore everything *except* the profile collection + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, "restore to empty DB should have returned successfully"); + + // check that the data actually got restored + assert.gt(dbOne.test.count(), 100); + + // but the profile collection should still be empty + assert.eq(dbOne.system.profile.count(), 0); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js new file mode 100644 index 00000000000..60ed695b1cf --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js @@ -0,0 +1,44 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests that running mongorestore with --objcheck on valid bson + // files restores the data successfully. + + jsTest.log('Testing restoration with --objcheck'); + + var toolTest = new ToolTest('objcheck_valid_bson'); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'objcheck_valid_bson_dump'; + resetDbpath(dumpTarget); + + // the db and collection we will use + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.coll; + + // insert some data + for (var i = 0; i < 50; i++) { + testColl.insert({_id: i}); + } + // sanity check the insert worked + assert.eq(50, testColl.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the data + testDB.dropDatabase(); + + // restore the data, with --objcheck + ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget))); + assert.eq(0, ret); + + // make sure the restore completed succesfully + assert.eq(50, testColl.count()); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js new file mode 100644 index 00000000000..378e018f155 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js @@ -0,0 +1,78 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + // Tests using mongorestore with the --oplogReplay and --oplogLimit flags. + + jsTest.log('Testing restoration with the --oplogReplay and --oplogLimit options'); + + var toolTest = getToolTest('oplog_replay_and_limit'); + var commonToolArgs = getCommonToolArguments(); + + // this test uses the testdata/dump_with_oplog directory. this directory contains: + // - a test/ subdirectory, which will restore objects { _id: i } for i from + // 0-9 to the test.data collection + // - an oplog.bson file, which contains oplog entries for inserts of + // objects { _id: i } for i from 10-14 to the test.data collection. + // + // within the oplog.bson file, the entries for i from 10-13 have timestamps + // 1416342265:2 through 1416342265:5. the entry for { _id: i } has + // timestamp 1500000000:1. + + // the db and collection we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.data; + + // restore the data, without --oplogReplay. _ids 0-9, which appear in the + // collection's bson file, should be restored. + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog')) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(10, testColl.count()); + for (var i = 0; i < 10; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // drop the db + testDB.dropDatabase(); + + // restore the data, with --oplogReplay. _ids 10-14, appearing + // in the oplog.bson file, should be inserted as well. + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog')) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(15, testColl.count()); + for (i = 0; i < 15; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // drop the db + testDB.dropDatabase(); + + // restore the data, with --oplogReplay and --oplogLimit with a + // value that will filter out { _id: 14 } from getting inserted. + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', + '--oplogLimit', '1416342266:0'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog')) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(14, testColl.count()); + for (i = 0; i < 14; i++) { + assert.eq(1, testColl.count({_id: i})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js new file mode 100644 index 00000000000..4d021a656e2 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js @@ -0,0 +1,33 @@ +/** + * oplog_replay_conflict.js + * + * This file tests mongorestore with --oplogReplay where the user provides two top priority + * oplogs and mongorestore should exit with an error. + */ +(function() { + 'use strict'; + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var commonToolArgs = getCommonToolArguments(); + var restoreTarget = 'jstests/restore/testdata/dump_oplog_conflict'; + + var toolTest = getToolTest('oplog_replay_conflict'); + + // The test db and collections we'll be using. + var testDB = toolTest.db.getSiblingDB('test'); + testDB.createCollection('data'); + var testColl = testDB.data; + + // Replay the oplog from the provided oplog + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', + '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson', + restoreTarget].concat(commonToolArgs)); + + assert.eq(0, testColl.count(), + "no original entries should be restored"); + assert.eq(1, ret, "restore operation succeeded when it shouldn't have"); + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js new file mode 100644 index 00000000000..ad149c2a910 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js @@ -0,0 +1,60 @@ +/** + * oplog_replay_local_main.js + * + * This file tests mongorestore with --oplogReplay where the oplog file is in the 'oplog.$main' + * collection of the 'local' database. This occurs when using master-slave replication. + */ +(function() { + 'use strict'; + + var dumpTarget = 'oplog_replay_local_main'; + var rt = new ReplTest('oplog_replay_local_main'); + var m = rt.start(true); + // Set the test db to 'local' and collection to 'oplog.$main' to fake a replica set oplog + var testDB = m.getDB('local'); + var testColl = testDB.oplog.$main; + var testRestoreDB = m.getDB('test'); + var testRestoreColl = testRestoreDB.op; + resetDbpath(dumpTarget); + + var lastop = function() { + return testColl.find().sort({$natural: -1}).next(); + }; + + var lastTS = lastop().ts.t; + var oplogSize = 100; + + // Create a fake oplog consisting of 100 inserts. + for (var i = 0; i < oplogSize; i++) { + var op = { + ts: new Timestamp(lastTS, i), + op: 'i', + o: {_id: i, x: 'a' + i}, + ns: 'test.op' + }; + assert.commandWorked(testDB.runCommand({godinsert: 'oplog.$main', obj: op})); + } + + // Dump the fake oplog. + var ret = runMongoProgram('mongodump', + '--port', rt.ports[0], + '--db', 'local', + '-c', 'oplog.$main', + '--out', dumpTarget); + assert.eq(0, ret, "dump operation failed"); + + // Create the test.op collection. + testRestoreColl.drop(); + testRestoreDB.createCollection("op"); + assert.eq(0, testRestoreColl.count()); + + // Replay the oplog from the provided oplog + ret = runMongoProgram('mongorestore', + '--port', rt.ports[0], + '--oplogReplay', + dumpTarget); + assert.eq(0, ret, "restore operation failed"); + + assert.eq(oplogSize, testRestoreColl.count(), "all oplog entries should be inserted"); + rt.stop(true); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js new file mode 100644 index 00000000000..03a16c4a745 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js @@ -0,0 +1,67 @@ +/** + * oplog_replay_local_rs.js + * + * This file tests mongorestore with --oplogReplay where the oplog file is in the 'oplog.rs' + * collection of the 'local' database. This occurs when using a replica-set for replication. + */ +(function() { + 'use strict'; + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var commonToolArgs = getCommonToolArguments(); + var dumpTarget = 'oplog_replay_local_rs'; + + var toolTest = getToolTest('oplog_replay_local_rs'); + + // Set the test db to 'local' and collection to 'oplog.rs' to fake a replica set oplog + var testDB = toolTest.db.getSiblingDB('local'); + var testColl = testDB['oplog.rs']; + var testRestoreDB = toolTest.db.getSiblingDB('test'); + var testRestoreColl = testRestoreDB.op; + resetDbpath(dumpTarget); + + var oplogSize = 100; + testDB.createCollection('oplog.rs', {capped: true, size: 100000}); + + // Create a fake oplog consisting of 100 inserts. + for (var i = 0; i < oplogSize; i++) { + var r = testColl.insert({ + ts: new Timestamp(0, i), + op: "i", + o: {_id: i, x: 'a' + i}, + ns: "test.op", + }); + assert.eq(1, r.nInserted, "insert failed"); + } + + // Dump the fake oplog. + var ret = toolTest.runTool.apply(toolTest, ['dump', + '--db', 'local', + '-c', 'oplog.rs', + '--out', dumpTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "dump operation failed"); + + // Dump original data. + testColl.drop(); + assert.eq(0, testColl.count(), "all original entries should be dropped"); + + + // Create the test.op collection. + testRestoreColl.drop(); + testRestoreDB.createCollection("op"); + assert.eq(0, testRestoreColl.count()); + + // Replay the oplog from the provided oplog + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', + dumpTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "restore operation failed"); + + assert.eq(oplogSize, testRestoreColl.count(), + "all oplog entries should be inserted"); + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js new file mode 100644 index 00000000000..eae7db1519e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js @@ -0,0 +1,19 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // Tests using mongorestore with --oplogReplay when no oplog.bson file is present. + + jsTest.log('Testing restoration with --oplogReplay and no oplog.bson file'); + + var toolTest = new ToolTest('oplog_replay_no_oplog'); + toolTest.startDB('foo'); + + // run the restore, with a dump directory that has no oplog.bson file + var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay'] + .concat(getRestoreTarget('restore/testdata/dump_empty'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js new file mode 100644 index 00000000000..6a1f20d5cf6 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js @@ -0,0 +1,37 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + // Tests using mongorestore with --oplogReplay and noops in the oplog.bson, + // making sure the noops are ignored. + + jsTest.log('Testing restoration with --oplogReplay and noops'); + + var toolTest = getToolTest('oplog_replay_noop'); + var commonToolArgs = getCommonToolArguments(); + + // the db and collection we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + var testColl = testDB.data; + + // restore the data, with --oplogReplay + var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_noop_in_oplog')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the document appearing in the oplog, which shows up + // after the noops, was added successfully + assert.eq(1, testColl.count()); + assert.eq(1, testColl.count({a: 1})); + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js new file mode 100644 index 00000000000..6d2d873285e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js @@ -0,0 +1,40 @@ +/** + * oplog_replay_priority_oplog.js + * + * This file tests mongorestore with --oplogReplay where the user provides two oplogs and + * mongorestore only restores the higher priority one. + */ +(function() { + 'use strict'; + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var commonToolArgs = getCommonToolArguments(); + var restoreTarget = 'jstests/restore/testdata/dump_local_oplog'; + + var toolTest = getToolTest('oplog_replay_priority_oplog'); + + // The test db and collections we'll be using. + var testDB = toolTest.db.getSiblingDB('test'); + testDB.createCollection('data'); + var testColl = testDB.data; + testDB.createCollection('op'); + var restoreColl = testDB.op; + + // Replay the oplog from the provided oplog + var ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', + '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson', + restoreTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "restore operation failed"); + + // Extra oplog has 5 entries as explained in oplog_replay_and_limit.js + assert.eq(5, testColl.count(), + "all original entries from high priority oplog should be restored"); + assert.eq(0, restoreColl.count(), + "no original entries from low priority oplog should be restored"); + toolTest.stop(); +}()); + diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js new file mode 100644 index 00000000000..0b3000ad60e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js @@ -0,0 +1,65 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var commonToolArgs = getCommonToolArguments(); + var dumpTarget = 'oplog_replay_sizes'; + + // Helper for using mongorestore with --oplogReplay and a large oplog.bson + function tryOplogReplay(oplogSize, documentSize) { + var toolTest = getToolTest('oplog_replay_sizes'); + // the test db and collections we'll be using + var testDB = toolTest.db.getSiblingDB('test_oplog'); + var testColl = testDB.oplog; + var testRestoreDB = toolTest.db.getSiblingDB('test'); + var testRestoreColl = testRestoreDB.op; + resetDbpath(dumpTarget); + + var debugString = 'with ' + oplogSize + ' ops of size ' + documentSize; + jsTest.log('Testing --oplogReplay ' + debugString); + + + // create a fake oplog consisting of a large number of inserts + var xStr = new Array(documentSize).join("x"); // ~documentSize bytes string + for (var i = 0; i < oplogSize; i++) { + testColl.insert({ + ts: new Timestamp(0, i), + op: "i", + o: {_id: i, x: xStr}, + ns: "test.op" + }); + } + + // dump the fake oplog + var ret = toolTest.runTool.apply(toolTest, ['dump', + '--db', 'test_oplog', + '-c', 'oplog', + '--out', dumpTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "dump operation failed " + debugString); + + // create the test.op collection + testRestoreColl.drop(); + testRestoreDB.createCollection("op"); + assert.eq(0, testRestoreColl.count()); + + // trick restore into replaying the "oplog" we forged above + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', dumpTarget+'/test_oplog'] + .concat(commonToolArgs)); + assert.eq(0, ret, "restore operation failed " + debugString); + assert.eq(oplogSize, testRestoreColl.count(), + "all oplog entries should be inserted " + debugString); + toolTest.stop(); + } + + // run the test on various oplog and op sizes + tryOplogReplay(1024, 1024); // sanity check + tryOplogReplay(1024*1024, 1); // millions of micro ops + tryOplogReplay(8, 16*1024*1023); // 8 ~16MB ops + tryOplogReplay(32, 1024*1024); // 32 ~1MB ops + tryOplogReplay(32*1024, 1024); // many ~1KB ops + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js new file mode 100644 index 00000000000..52cf5a953cd --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js @@ -0,0 +1,68 @@ +/** + * oplog_replay_specify_file.js + * + * This file tests mongorestore with --oplogReplay where the user specifies a file with the + * --oplogFile flag. + */ +(function() { + 'use strict'; + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var commonToolArgs = getCommonToolArguments(); + var dumpTarget = 'oplog_replay_specify_file'; + + var toolTest = getToolTest('oplog_replay_specify_file'); + + // The test db and collections we'll be using. + var testDB = toolTest.db.getSiblingDB('test_oplog'); + var testColl = testDB.foo; + var testRestoreDB = toolTest.db.getSiblingDB('test'); + var testRestoreColl = testRestoreDB.op; + resetDbpath(dumpTarget); + + var oplogSize = 100; + + // Create a fake oplog consisting of 100 inserts. + for (var i = 0; i < oplogSize; i++) { + testColl.insert({ + ts: new Timestamp(0, i), + op: "i", + o: {_id: i, x: 'a' + i}, + ns: "test.op" + }); + } + + // Dump the fake oplog. + var ret = toolTest.runTool.apply(toolTest, ['dump', + '--db', 'test_oplog', + '-c', 'foo', + '--out', dumpTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "dump operation failed"); + + // Dump original data. + testColl.drop(); + assert.eq(0, testColl.count(), + "all original entries should be dropped"); + + // Create the test.op collection. + testRestoreColl.drop(); + testRestoreDB.createCollection("op"); + assert.eq(0, testRestoreColl.count()); + + // Replay the oplog from the provided oplog + ret = toolTest.runTool.apply(toolTest, ['restore', + '--oplogReplay', + '--oplogFile', dumpTarget + '/test_oplog/foo.bson', + dumpTarget] + .concat(commonToolArgs)); + assert.eq(0, ret, "restore operation failed"); + + assert.eq(oplogSize, testRestoreColl.count(), + "all oplog entries should be inserted"); + assert.eq(oplogSize, testColl.count(), + "all original entries should be restored"); + toolTest.stop(); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js new file mode 100644 index 00000000000..0335f94f53b --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js @@ -0,0 +1,77 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests using mongorestore to restore only a subset of a dump (either a + // single db or a single collection) from a larger dump. + + jsTest.log('Testing restoration of a subset of a dump'); + + var toolTest = getToolTest('partial_restore'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'partial_restore_dump'; + resetDbpath(dumpTarget); + + // we'll insert data into three collections spread across two dbs + var dbOne = toolTest.db.getSiblingDB('dbOne'); + var dbTwo = toolTest.db.getSiblingDB('dbTwo'); + var collOne = dbOne.collOne; + var collTwo = dbOne.collTwo; + var collThree = dbTwo.collThree; + + // insert a bunch of data + for (var i = 0; i < 50; i++) { + collOne.insert({_id: i+'_collOne'}); + collTwo.insert({_id: i+'_collTwo'}); + collThree.insert({_id: i+'_collThree'}); + } + // sanity check the insertion worked + assert.eq(50, collOne.count()); + assert.eq(50, collTwo.count()); + assert.eq(50, collThree.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the databases + dbOne.dropDatabase(); + dbTwo.dropDatabase(); + + // restore a single db + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dbOne'] + .concat(getRestoreTarget(dumpTarget+'/dbOne')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the restore worked, and nothing else but that db was restored + assert.eq(50, collOne.count()); + assert.eq(50, collTwo.count()); + assert.eq(0, collThree.count()); + + // drop the data + dbOne.dropDatabase(); + + // restore a single collection + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'dbOne', + '--collection', 'collTwo'] + .concat(getRestoreTarget(dumpTarget+'/dbOne/collTwo.bson')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the restore worked, and nothing else but that collection was restored + assert.eq(0, collOne.count()); + assert.eq(50, collTwo.count()); + assert.eq(0, collThree.count()); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js b/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js new file mode 100644 index 00000000000..785a5ad31d6 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js @@ -0,0 +1,25 @@ +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + jsTest.log('Testing that the order of fields is preserved in the oplog'); + + var toolTest = new ToolTest('ordered_oplog'); + toolTest.startDB('foo'); + + // run restore, with an "update" oplog with a _id field that is a subdocument with several fields + // { "h":{"$numberLong":"7987029173745013482"},"ns":"test.foobar", + // "o":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8},"foo":"bar"}, + // "o2":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8}},"op":"u","ts":{"$timestamp":{"t":1439225650,"i":1}},"v":NumberInt(2) + // } + // if the _id from the "o" and the _id from the "o2" don't match then mongod complains + // run it several times, because with just one execution there is a chance that restore randomly selects the correct order + // With several executions the chances of all false positives diminishes. + for (var i=0; i<10; i++) { + var ret = toolTest.runTool('restore', '--oplogReplay', 'jstests/restore/testdata/dump_with_complex_id_oplog'); + assert.eq(0, ret); + } + + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js b/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js new file mode 100644 index 00000000000..8750c29c6a7 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js @@ -0,0 +1,178 @@ +/** + * restore_document_validation.js + * + * This file test that mongorestore works with document validation. It both checks that when + * validation is turned on invalid documents are not restored and that when a user indicates + * they want to bypass validation, that all documents are restored. + */ + +(function() { + 'use strict'; + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + /** + * Part 1: Test that restore follows document validation rules. + */ + jsTest.log('Testing that restore reacts well to document validation'); + + var toolTest = getToolTest('document_validation'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'doc_validation'; + resetDbpath(dumpTarget); + + // the db we will use + var testDB = toolTest.db.getSiblingDB('test'); + + // create 1000 documents, half of which will pass the validation + for (var i = 0; i < 1000; i++) { + if (i%2 === 0) { + testDB.bar.insert({_id: i, num: i+1, s: String(i)}); + } else { + testDB.bar.insert({_id: i, num: i+1, s: String(i), baz: i}); + } + } + // sanity check the insertion worked + assert.eq(1000, testDB.bar.count(), 'all documents should be inserted'); + + var ret = toolTest.runTool.apply(toolTest, ['dump', '-v'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'dumping should run successfully'); + + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // sanity check that we can restore the data without validation + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + assert.eq(1000, testDB.bar.count(), 'after the restore, all documents should be seen'); + + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // turn on validation + var r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}}); + assert.eq(r, {ok: 1}, 'create collection with validation should work'); + + // test that it's working + r = testDB.bar.insert({num: 10000}); + assert.eq(r.nInserted, 0, "invalid documents shouldn't be inserted"); + + // restore the 1000 records of which only 500 are valid + ret = toolTest.runTool.apply(toolTest, ['restore', '-v'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'restoring against a collection with validation on should still succeed'); + + assert.eq(500, testDB.bar.count(), 'only the valid documents should be restored'); + + /** + * Part 2: Test that restore can bypass document validation rules. + */ + jsTest.log('Testing that bypass document validation works'); + + testDB.dropDatabase(); + + // turn on validation + r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}}); + assert.eq(r, {ok: 1}, 'create collection with validation should work'); + + // test that we cannot insert an 'invalid' document + r = testDB.bar.insert({num: 10000}); + assert.eq(r.nInserted, 0, 'invalid documents should not be inserted'); + + // restore the 1000 records again with bypassDocumentValidation turned on + ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'restoring documents should work with bypass document validation set'); + assert.eq(1000, testDB.bar.count(), + 'all documents should be restored with bypass document validation set'); + + /** + * Part 3: Test that restore can restore the document validation rules, + * if they're dumped with the collection. + */ + jsTest.log('Testing that dump and restore restores the validation rules themselves'); + + // clear out the database, including the validation rules + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // test that we can insert an 'invalid' document + r = testDB.bar.insert({num: 10000}); + assert.eq(r.nInserted, 1, + 'invalid documents should be inserted after validation rules are dropped'); + + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // restore the 1000 records again + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + assert.eq(1000, testDB.bar.count()); + + // turn on validation on a existing collection + testDB.runCommand({'collMod': 'bar', 'validator': {baz: {$exists: true}}}); + + // re-dump everything, this time dumping the validation rules themselves + ret = toolTest.runTool.apply(toolTest, ['dump', '-v'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'the dump should run successfully'); + + // clear out the database, including the validation rules + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // test that we can insert an 'invalid' document + r = testDB.bar.insert({num: 10000}); + assert.eq(r.nInserted, 1, + 'invalid documents should be inserted after we drop validation rules'); + + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // restore the 1000 records again + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'restoring rules and some invalid documents should run successfully'); + assert.eq(500, testDB.bar.count(), + 'restoring the validation rules and documents should only restore valid documents'); + + /** + * Part 4: Test that restore can bypass the document validation rules, + * even if they're dumped with the collection and restored with the collection. + */ + jsTest.log('Testing that bypass document validation works when restoring the rules as well'); + + // clear out the database, including the validation rules + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // test that we can insert an 'invalid' document + r = testDB.bar.insert({num: 10000}); + assert.eq(r.nInserted, 1, + 'invalid documents should be inserted after validation rules are dropped'); + + testDB.dropDatabase(); + assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen'); + + // restore the 1000 records again with bypassDocumentValidation turned on + ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, 'restoring documents should work with bypass document validation set'); + assert.eq(1000, testDB.bar.count(), + 'all documents should be restored with bypass document validation set'); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js new file mode 100644 index 00000000000..0cff1cc2845 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js @@ -0,0 +1,43 @@ +(function() { + if (typeof getToolTest === 'undefined') { + load('jstests/configs/sharding_28.config.js'); + } + + if (dump_targets === "archive") { + print('skipping test incompatable with archiving'); + return assert(true); + } + + var targetPath = 'restore_full_restore'; + var toolTest = getToolTest('fullrestore'); + var commonToolArgs = getCommonToolArguments(); + + var sourceDB = toolTest.db.getSiblingDB('blahblah'); + + // put in some sample data + for (var i=0; i<100; i++) { + sourceDB.test.insert({x: 1}); + } + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(targetPath)) + .concat(commonToolArgs)); + assert.eq(ret, 0, "dump of full sharded system should have succeeded"); + + // a full restore should fail + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(targetPath)) + .concat(commonToolArgs)); + assert.neq(ret, 0, "restore of full sharded system should have failed"); + + // delete the config dir + resetDbpath(targetPath + "/config"); + + // *now* the restore should succeed + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(targetPath)) + .concat(commonToolArgs)); + assert.eq(ret, 0, "restore of sharded system without config db should have succeeded"); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js b/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js new file mode 100644 index 00000000000..b91b391a617 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js @@ -0,0 +1,48 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + var toolTest = getToolTest('stop_on_error'); + var commonToolArgs = getCommonToolArguments(); + + var dbOne = toolTest.db.getSiblingDB('dbOne'); + // create a test collection + for (var i=0; i<=100; i++) { + dbOne.test.insert({_id: i, x: i*i}); + } + + // dump it + var dumpTarget = 'stop_on_error_dump'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database so it's empty + dbOne.dropDatabase(); + + // restore it - database was just dropped, so this should work successfully + ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret, "restore to empty DB should have returned successfully"); + + // restore it again with --stopOnError - this one should fail since there are dup keys + ret = toolTest.runTool.apply(toolTest, ['restore', '--stopOnError', '-vvvv'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.neq(0, ret); + + // restore it one more time without --stopOnError - there are dup keys but they will be ignored + ret = toolTest.runTool.apply(toolTest, ['restore', '-vvvv'] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js b/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js new file mode 100644 index 00000000000..a27ef8b94c3 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js @@ -0,0 +1,46 @@ +(function() { + + // Tests using mongorestore on a dump directory containing symlinks + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + jsTest.log('Testing restoration from a dump containing symlinks'); + + var toolTest = getToolTest('symlinks'); + + // this test uses the testdata/dump_with_soft_link. within that directory, + // the dbTwo directory is a soft link to testdata/soft_linked_db and the + // dbOne/data.bson file is a soft link to testdata/soft_linked_collection.bson. + // the file not_a_dir is a softlink to a bson file, and is there to make + // sure that softlinked regular files are not treated as directories. + + // the two dbs we'll be using + var dbOne = toolTest.db.getSiblingDB('dbOne'); + var dbTwo = toolTest.db.getSiblingDB('dbTwo'); + var notADir = toolTest.db.getSiblingDB('not_a_dir'); + + // restore the data + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(getRestoreTarget('jstests/restore/testdata/dump_with_soft_links'))); + assert.eq(0, ret); + + // make sure the data was restored properly + assert.eq(10, dbOne.data.count()); + assert.eq(10, dbTwo.data.count()); + assert.eq(0, notADir.data.count()); + for (var i = 0; i < 10; i++) { + assert.eq(1, dbOne.data.count({_id: i+'_dbOne'})); + assert.eq(1, dbTwo.data.count({_id: i+'_dbTwo'})); + } + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json new file mode 100644 index 00000000000..0967ef424bc --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json @@ -0,0 +1 @@ +{} diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README new file mode 100644 index 00000000000..8a13ce0a00c --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README @@ -0,0 +1 @@ +This exists so that this directory can remain blank of .bson files but still be checked into version control. diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson Binary files differnew file mode 100644 index 00000000000..3799a6f04b6 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json new file mode 100644 index 00000000000..64d0433a836 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json @@ -0,0 +1 @@ +{ "options" : { "create" : "changelog", "size" : { "$numberLong" : "10000000" }, "capped" : true }, "indexes" : [ { "v" : 1, "key" : { "_id" : { "$numberLong" : "1"}}, "ns" : "config.changelog", "name" : "_id_" }, {"v":1,"key":{"pos":"2d"},"name":"position_2d","ns":"config.changelog","min":{"$numberLong":"0"},"max":{"$numberLong":"1000"},"bits":{"$numberLong":"32"}} ] } diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson Binary files differnew file mode 100644 index 00000000000..6051944948e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json new file mode 100644 index 00000000000..9e28c8db056 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json @@ -0,0 +1 @@ +{"options":{"capped":true,"size":100096},"indexes":[]}
\ No newline at end of file diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson Binary files differnew file mode 100644 index 00000000000..a9ada58715f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson Binary files differnew file mode 100644 index 00000000000..9a47fca217f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json new file mode 100644 index 00000000000..e0ea3257e88 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json @@ -0,0 +1 @@ +{"indexes":[{"v":1,"name":"_id_","ns":"dbOne.invalid_metadata"},{"v":1,"name":"a_1","ns":"dbOne.invalid_metadata"}]} diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson new file mode 100644 index 00000000000..dd6d86a43dc --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson @@ -0,0 +1 @@ +XXX diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json new file mode 100644 index 00000000000..dd6d86a43dc --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json @@ -0,0 +1 @@ +XXX diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson new file mode 100644 index 00000000000..e69de29bb2d --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson Binary files differnew file mode 100644 index 00000000000..29172294c0f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson Binary files differnew file mode 100644 index 00000000000..a9ada58715f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson Binary files differnew file mode 100644 index 00000000000..c570d917b76 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json new file mode 100644 index 00000000000..65e5d967f00 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json @@ -0,0 +1 @@ +{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"test.data"}]}
\ No newline at end of file diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson Binary files differnew file mode 100644 index 00000000000..324f8e270df --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson Binary files differnew file mode 100644 index 00000000000..ff0d2e6bc31 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json new file mode 100644 index 00000000000..98eb8799771 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json @@ -0,0 +1 @@ +{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbOne.data"}]}
\ No newline at end of file diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson Binary files differnew file mode 100644 index 00000000000..f1247e928c3 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson Binary files differnew file mode 100644 index 00000000000..0d5439cec2e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json new file mode 100644 index 00000000000..8fa8534bde5 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json @@ -0,0 +1 @@ +{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbTwo.data"}]}
\ No newline at end of file diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson Binary files differnew file mode 100644 index 00000000000..cea78cca0a5 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir Binary files differnew file mode 100644 index 00000000000..a9ada58715f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson Binary files differnew file mode 100644 index 00000000000..a9ada58715f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson Binary files differnew file mode 100644 index 00000000000..ff0d2e6bc31 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson Binary files differnew file mode 100644 index 00000000000..0d5439cec2e --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json new file mode 100644 index 00000000000..8fa8534bde5 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json @@ -0,0 +1 @@ +{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbTwo.data"}]}
\ No newline at end of file diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson Binary files differnew file mode 100644 index 00000000000..cea78cca0a5 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js new file mode 100644 index 00000000000..2cdd595d090 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js @@ -0,0 +1,85 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests running mongorestore with --restoreDbUsersAndRoles + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles'); + + var toolTest = getToolTest('users_and_roles'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'users_and_roles_dump'; + resetDbpath(dumpTarget); + + // the db we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // restore the data, specifying --restoreDBUsersAndRoles + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test')) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + + // make sure the users were restored + var users = testDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'userOne' || users[1].user === 'userOne'); + assert(users[0].user === 'userTwo' || users[1].user === 'userTwo'); + + // make sure the role was restored + var roles = testDB.getRoles(); + assert.eq(1, roles.length); + assert.eq('roleOne', roles[0].role); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js new file mode 100644 index 00000000000..e0f9cf3dd1c --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js @@ -0,0 +1,143 @@ +// This test requires mongo 2.6.x, and mongo 3.0.0 releases +// @tags: [requires_mongo_26, requires_mongo_30] +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + // skip tests requiring wiredTiger storage engine on pre 2.8 mongod + if (TestData && TestData.storageEngine === 'wiredTiger') { + return; + } + + // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from + // a 2.6 mongod and restoring to a 2.8 mongod, then dumping again and + // restoring to a 2.6 mongod. + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+ + ' restoring a 2.6 dump to a 2.8 mongod, then back to a 2.6 mongod'); + + var toolTest = new ToolTest('users_and_roles_26_to_28_to_26', {binVersion: '2.6'}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'users_and_roles_26_to_28_to_26_dump'; + resetDbpath(dumpTarget); + + // the db we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // restart the mongod as a 2.8 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + delete toolTest.options.binVersion; + toolTest.startDB('foo'); + + // refresh the db reference + testDB = toolTest.db.getSiblingDB('test'); + + // restore the data, specifying --restoreDBUsersAndRoles + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test'))); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + + // make sure the users were restored + var users = testDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'userOne' || users[1].user === 'userOne'); + assert(users[0].user === 'userTwo' || users[1].user === 'userTwo'); + + // make sure the role was restored + var roles = testDB.getRoles(); + assert.eq(1, roles.length); + assert.eq('roleOne', roles[0].role); + + // dump the data again, to a slightly different target + ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget+'_second'))); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // restart the mongod as a 2.6 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + toolTest.options = toolTest.options || {}; + toolTest.options.binVersion = '2.6'; + toolTest.startDB('foo'); + + // refresh the db reference + testDB = toolTest.db.getSiblingDB('test'); + + // restore the data, specifying --restoreDBUsersAndRoles + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'_second/test'))); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + + // make sure the users were restored + users = testDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'userOne' || users[1].user === 'userOne'); + assert(users[0].user === 'userTwo' || users[1].user === 'userTwo'); + + // make sure the role was restored + roles = testDB.getRoles(); + assert.eq(1, roles.length); + assert.eq('roleOne', roles[0].role); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js new file mode 100644 index 00000000000..abc37867c6f --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js @@ -0,0 +1,157 @@ +// This test requires mongo 2.6.x, and mongo 3.0.0 releases +// @tags: [requires_mongo_26, requires_mongo_30] +(function() { + + load("jstests/configs/standard_dump_targets.config.js"); + + // skip tests requiring wiredTiger storage engine on pre 2.8 mongod + if (TestData && TestData.storageEngine === 'wiredTiger') { + return; + } + + // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from + // a 2.8 mongod and restoring to a 2.6 mongod, which should fail. + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+ + ' restoring a 2.8 dump to a 2.6 mongod'); + + var toolTest = new ToolTest('users_and_roles_28_to_26'); + resetDbpath(toolTest.dbpath); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'users_and_roles_28_to_26_dump'; + resetDbpath(dumpTarget); + + // the db we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // restart the mongod as a 2.6 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + toolTest.options = toolTest.options || {}; + toolTest.options.binVersion = '2.6'; + resetDbpath(toolTest.dbpath); + toolTest.startDB('foo'); + + // refresh the db reference + testDB = toolTest.db.getSiblingDB('test'); + + // restore the data, specifying --restoreDBUsersAndRoles. it should fail + // since the auth version is too new + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+ + ' restoring a 2.8 dump to a 2.6 mongod'); + + toolTest = new ToolTest('users_and_roles_28_to_26'); + resetDbpath(toolTest.dbpath); + toolTest.startDB('foo'); + + // where we'll put the dump + dumpTarget = 'users_and_roles_28_to_26_dump'; + + // the db we'll be using + testDB = toolTest.db.getSiblingDB('test'); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget))); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // restart the mongod as a 2.6 + stopMongod(toolTest.port); + toolTest.m = null; + toolTest.db = null; + toolTest.options = toolTest.options || {}; + toolTest.options.binVersion = '2.6'; + resetDbpath(toolTest.dbpath); + toolTest.startDB('foo'); + + // refresh the db reference + testDB = toolTest.db.getSiblingDB('test'); + + // restore the data, specifying --restoreDBUsersAndRoles. it should fail + // since the auth version is too new + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test'))); + assert.neq(0, ret); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js new file mode 100644 index 00000000000..97b95377a45 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js @@ -0,0 +1,142 @@ +// This test requires mongo 2.6.x releases +// @tags: [requires_mongo_26] +(function() { + + // Tests running mongorestore with --restoreDbUsersAndRoles against + // a full dump. + + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles against'+ + ' a full dump'); + + if (typeof getDumpTarget === 'undefined') { + load('jstests/configs/standard_dump_targets.config.js'); + } + + if (dump_targets !== "standard") { + print('skipping test incompatable with archiving or compression'); + return assert(true); + } + + var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion) { + + jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') + + ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' + + (restoreVersion || 'latest') + ', and destDBVersion=' + + (destDBVersion || 'latest')); + + var toolTest = new ToolTest('users_and_roles_full_dump', + {binVersion: sourceDBVersion}); + toolTest.startDB('foo'); + + // where we'll put the dump + var dumpTarget = 'users_and_roles_full_dump_dump'; + resetDbpath(dumpTarget); + + // the db we'll be using, and the admin db + var adminDB = toolTest.db.getSiblingDB('admin'); + var testDB = toolTest.db.getSiblingDB('test'); + + // create a user and role on the admin database + adminDB.createUser({ + user: 'adminUser', + pwd: 'password', + roles: [{role: 'read', db: 'admin'}], + }); + adminDB.createRole({ + role: 'adminRole', + privileges: [{ + resource: {db: 'admin', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + var args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''), + '--port', toolTest.port] + .concat(getDumpTarget(dumpTarget)); + var ret = runMongoProgram.apply(this, args); + assert.eq(0, ret); + + // restart the mongod, with a clean db path + stopMongod(toolTest.port); + resetDbpath(toolTest.dbpath); + toolTest.m = null; + toolTest.db = null; + toolTest.options.binVersion = destDBVersion; + toolTest.startDB('foo'); + + // refresh the db references + adminDB = toolTest.db.getSiblingDB('admin'); + testDB = toolTest.db.getSiblingDB('test'); + + // do a full restore + args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''), + '--port', toolTest.port] + .concat(getRestoreTarget(dumpTarget)); + ret = runMongoProgram.apply(this, args); + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + + // make sure the users were restored + var users = testDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'userOne' || users[1].user === 'userOne'); + assert(users[0].user === 'userTwo' || users[1].user === 'userTwo'); + var adminUsers = adminDB.getUsers(); + assert.eq(1, adminUsers.length); + assert.eq('adminUser', adminUsers[0].user); + + // make sure the roles were restored + var roles = testDB.getRoles(); + assert.eq(1, roles.length); + assert.eq('roleOne', roles[0].role); + var adminRoles = adminDB.getRoles(); + assert.eq(1, adminRoles.length); + assert.eq('adminRole', adminRoles[0].role); + + // success + toolTest.stop(); + + }; + + // 'undefined' triggers latest + runTest('2.6', '2.6', undefined, '2.6'); + runTest('2.6', '2.6', undefined, undefined); + runTest('2.6', undefined, undefined, undefined); + runTest('2.6', undefined, undefined, '2.6'); + runTest(undefined, undefined, undefined, undefined); +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js new file mode 100644 index 00000000000..fdbf236e8f8 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js @@ -0,0 +1,104 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + + // Tests running mongorestore with --restoreDbUsersAndRoles + + jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles'); + + var toolTest = getToolTest('users_and_roles_temp_collections'); + var commonToolArgs = getCommonToolArguments(); + + // where we'll put the dump + var dumpTarget = 'users_and_roles_temp_collections_dump'; + resetDbpath(dumpTarget); + + // the db we'll be using + var testDB = toolTest.db.getSiblingDB('test'); + + // create some users and roles on the database + testDB.createUser({ + user: 'userOne', + pwd: 'pwdOne', + roles: [{role: 'read', db: 'test'}], + }); + testDB.createRole({ + role: 'roleOne', + privileges: [{ + resource: {db: 'test', collection: ''}, + actions: ['find'], + }], + roles: [], + }); + testDB.createUser({ + user: 'userTwo', + pwd: 'pwdTwo', + roles: [{role: 'roleOne', db: 'test'}], + }); + + // insert some data + for (var i = 0; i < 10; i++) { + testDB.data.insert({_id: i}); + } + // sanity check the insertion worked + assert.eq(10, testDB.data.count()); + + // dump the data + var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + // drop the database, users, and roles + testDB.dropDatabase(); + testDB.dropAllUsers(); + testDB.dropAllRoles(); + + // insert to the default temp collections so we hit them later + var adminDB = toolTest.db.getSiblingDB('admin'); + adminDB.tempusers.insert({_id: 1}); + adminDB.temproles.insert({_id: 1}); + + // try to restore the data + ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test')) + .concat(commonToolArgs)); + + // we should succeed with default temp collections + assert.eq(0, ret); + + // try to restore the data with new temp collections + ret = toolTest.runTool.apply(toolTest, ['restore', + '--db', 'test', + '--tempUsersColl', 'tempU', + '--tempRolesColl', 'tempR', + '--restoreDbUsersAndRoles'] + .concat(getRestoreTarget(dumpTarget+'/test')) + .concat(commonToolArgs)); + + // we should succeed with new temp collections + assert.eq(0, ret); + + // make sure the data was restored + assert.eq(10, testDB.data.count()); + for (i = 0; i < 10; i++) { + assert.eq(1, testDB.data.count({_id: i})); + } + + // make sure the users were restored + var users = testDB.getUsers(); + assert.eq(2, users.length); + assert(users[0].user === 'userOne' || users[1].user === 'userOne'); + assert(users[0].user === 'userTwo' || users[1].user === 'userTwo'); + + // make sure the role was restored + var roles = testDB.getRoles(); + assert.eq(1, roles.length); + assert.eq('roleOne', roles[0].role); + + // success + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js new file mode 100644 index 00000000000..ecdcddbcb18 --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js @@ -0,0 +1,64 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + var toolTest = new ToolTest('write_concern', null); + var commonToolArgs = getCommonToolArguments(); + + var rs = new ReplSetTest({ + name: "rpls", + nodes: 3, + useHostName: true, + settings: {chainingAllowed: false}, + }); + + rs.startSet(); + rs.initiate(); + rs.awaitReplication(); + toolTest.port = rs.getPrimary().port; + var dbOne = rs.getPrimary().getDB("dbOne"); + + // create a test collection + for (var i=0; i<=100; i++) { + dbOne.test.insert({_id: i, x: i*i}); + } + rs.awaitReplication(); + + // dump the data that we'll + var dumpTarget = 'write_concern_dump'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + function writeConcernTestFunc(exitCode, writeConcern, name) { + jsTest.log(name); + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(writeConcern) + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(exitCode, ret, name); + dbOne.dropDatabase(); + } + + function noConnectTest() { + return startMongoProgramNoConnect.apply(null, ['mongorestore', + '--writeConcern={w:3}', '--host', rs.getPrimary().host] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + } + + // drop the database so it's empty + dbOne.dropDatabase(); + + // load and run the write concern suite + load('jstests/libs/wc_framework.js'); + runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest); + + dbOne.dropDatabase(); + rs.stopSet(); + toolTest.stop(); + +}()); diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js new file mode 100644 index 00000000000..593004daaae --- /dev/null +++ b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js @@ -0,0 +1,69 @@ +(function() { + + if (typeof getToolTest === 'undefined') { + load('jstests/configs/plain_28.config.js'); + } + var toolTest = new ToolTest('write_concern', null); + var st = new ShardingTest({ + shards: { + rs0: { + nodes: 3, + useHostName: true, + settings: {chainingAllowed: false}, + }, + }, + mongos: 1, + config: 1, + configReplSetTestOptions: { + settings: {chainingAllowed: false}, + }, + }); + var rs = st.rs0; + rs.awaitReplication(); + toolTest.port = st.s.port; + var commonToolArgs = getCommonToolArguments(); + var dbOne = st.s.getDB("dbOne"); + + // create a test collection + for (var i=0; i<=100; i++) { + dbOne.test.insert({_id: i, x: i*i}); + } + rs.awaitReplication(); + + // dump the data that we'll + var dumpTarget = 'write_concern_mongos_dump'; + resetDbpath(dumpTarget); + var ret = toolTest.runTool.apply(toolTest, ['dump', '-d', 'dbOne'] + .concat(getDumpTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(0, ret); + + function writeConcernTestFunc(exitCode, writeConcern, name) { + jsTest.log(name); + var ret = toolTest.runTool.apply(toolTest, ['restore'] + .concat(writeConcern) + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + assert.eq(exitCode, ret, name); + dbOne.dropDatabase(); + } + + function noConnectTest() { + return startMongoProgramNoConnect.apply(null, ['mongorestore', + '--writeConcern={w:3}', '--host', st.s.host] + .concat(getRestoreTarget(dumpTarget)) + .concat(commonToolArgs)); + } + + // drop the database so it's empty + dbOne.dropDatabase(); + + // load and run the write concern suite + load('jstests/libs/wc_framework.js'); + runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest); + + dbOne.dropDatabase(); + rs.stopSet(); + toolTest.stop(); + +}()); |