diff options
author | Daniel Gottlieb <daniel.gottlieb@mongodb.com> | 2018-09-20 15:00:47 -0400 |
---|---|---|
committer | Daniel Gottlieb <daniel.gottlieb@mongodb.com> | 2018-09-20 15:00:47 -0400 |
commit | 630eabac0591f207b29b6be014257387a9a7a904 (patch) | |
tree | 463bd23d64f3dec795b2fcf769d88a8fbe10ef50 /jstests | |
parent | 4ec12c35a07a8c0f3a30692aec413a71fdab30de (diff) | |
download | mongo-630eabac0591f207b29b6be014257387a9a7a904.tar.gz |
SERVER-37192: Move $backupCursor to enterprise.
Diffstat (limited to 'jstests')
-rw-r--r-- | jstests/auth/lib/commands_lib.js | 12 | ||||
-rw-r--r-- | jstests/noPassthrough/aggregation_backup_cursor.js | 125 | ||||
-rw-r--r-- | jstests/noPassthrough/backup_restore_backup_cursor.js | 29 |
3 files changed, 12 insertions, 154 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index 0a485990027..4f402dad1cd 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -84,6 +84,11 @@ authorization failure. Set of options to be passed to your 'command' function. Can be used to send different versions of the command depending on the testcase being run. +10) skipTest + +Add "skipTest: <function>" to not run the test for more complex reasons. The function is passed +one argument, the connection object. + */ // constants @@ -5945,6 +5950,10 @@ var authCommandsLib = { testname: "aggregate_$backupCursor", command: {aggregate: 1, cursor: {}, pipeline: [{$backupCursor: {}}]}, skipSharded: true, + // Only enterprise knows of this aggregation stage. + skipTest: + (conn) => + !conn.getDB("admin").runCommand({buildInfo: 1}).modules.includes("enterprise"), testcases: [{ runOnDb: adminDbName, roles: roles_hostManager, @@ -5988,6 +5997,9 @@ var authCommandsLib = { runOneTest: function(conn, t, impls) { jsTest.log("Running test: " + t.testname); + if (t.skipTest && t.skipTest(conn)) { + return []; + } // some tests shouldn't run in a sharded environment if (t.skipSharded && this.isMongos(conn)) { return []; diff --git a/jstests/noPassthrough/aggregation_backup_cursor.js b/jstests/noPassthrough/aggregation_backup_cursor.js deleted file mode 100644 index 0e6e7551b09..00000000000 --- a/jstests/noPassthrough/aggregation_backup_cursor.js +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Test the basic operation of a `$backupCursor` aggregation stage. - * - * @tags: [requires_persistence, requires_wiredtiger] - */ -(function() { - "use strict"; - - let conn = MongoRunner.runMongod(); - let db = conn.getDB("test"); - - let backupCursor = db.aggregate([{$backupCursor: {}}]); - // There should be about 14 files in total, but being precise would be unnecessarily fragile. - assert.gt(backupCursor.itcount(), 6); - assert(!backupCursor.isExhausted()); - backupCursor.close(); - - // Open a backup cursor. Use a small batch size to ensure a getMore retrieves additional - // results. - let response = assert.commandWorked( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {batchSize: 2}})); - assert.eq("test.$cmd.aggregate", response.cursor.ns); - assert.eq(2, response.cursor.firstBatch.length); - let cursorId = response.cursor.id; - - response = - assert.commandWorked(db.runCommand({getMore: cursorId, collection: "$cmd.aggregate"})); - // Sanity check the results. - assert.neq(0, response.cursor.id); - assert.gt(response.cursor.nextBatch.length, 4); - - // The $backupCursor is a tailable cursor. Even though we've exhausted the results, running a - // getMore should succeed. - response = - assert.commandWorked(db.runCommand({getMore: cursorId, collection: "$cmd.aggregate"})); - assert.neq(0, response.cursor.id); - assert.eq(0, response.cursor.nextBatch.length); - - // Because the backup cursor is still open, trying to open a second cursor should fail. - assert.commandFailed( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {}})); - - // Kill the backup cursor. - response = - assert.commandWorked(db.runCommand({killCursors: "$cmd.aggregate", cursors: [cursorId]})); - assert.eq(1, response.cursorsKilled.length); - assert.eq(cursorId, response.cursorsKilled[0]); - - // Open another backup cursor with a batch size of 0. The underlying backup cursor should be - // created. - response = assert.commandWorked( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {batchSize: 0}})); - assert.neq(0, response.cursor.id); - assert.eq(0, response.cursor.firstBatch.length); - - // Attempt to open a second backup cursor to demonstrate the original underlying cursor was - // opened. - assert.commandFailed( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {}})); - - // Close the cursor to reset state. - response = assert.commandWorked( - db.runCommand({killCursors: "$cmd.aggregate", cursors: [response.cursor.id]})); - assert.eq(1, response.cursorsKilled.length); - - // Set a failpoint which will generate a uassert after the backup cursor is open. - assert.commandWorked( - db.adminCommand({configureFailPoint: "backupCursorErrorAfterOpen", mode: "alwaysOn"})); - assert.commandFailed( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {}})); - assert.commandWorked( - db.adminCommand({configureFailPoint: "backupCursorErrorAfterOpen", mode: "off"})); - - // Demonstrate query cursor timeouts will kill backup cursors, closing the underlying resources. - assert.commandWorked( - db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {}})); - assert.commandWorked(db.adminCommand({setParameter: 1, cursorTimeoutMillis: 1})); - assert.soon(() => { - return db.runCommand({aggregate: 1, pipeline: [{$backupCursor: {}}], cursor: {}})['ok'] == - 1; - }); - - MongoRunner.stopMongod(conn); - - if (jsTest.options().noJournal) { - print("This test is being run with nojournal. Skipping ReplicaSet part."); - return; - } - - // Run a replica set to verify the contents of the `metadata` document. - let rst = new ReplSetTest({name: "aggBackupCursor", nodes: 1}); - rst.startSet(); - rst.initiate(); - db = rst.getPrimary().getDB("test"); - - backupCursor = db.aggregate([{$backupCursor: {}}]); - // The metadata document should be returned first. - let metadataDocEnvelope = backupCursor.next(); - assert(metadataDocEnvelope.hasOwnProperty("metadata")); - - let metadataDoc = metadataDocEnvelope["metadata"]; - let oplogStart = metadataDoc["oplogStart"]; - let oplogEnd = metadataDoc["oplogEnd"]; - let checkpointTimestamp = metadataDoc["checkpointTimestamp"]; - - // When replication is run, there will always be an oplog with a start/end. - assert(oplogStart); - assert(oplogEnd); - // The first opTime will likely have term -1 (repl initiation). - assert.gte(oplogStart["t"], -1); - // The last opTime's term must be a positive value larger than the first. - assert.gte(oplogEnd["t"], oplogStart["t"]); - assert.gte(oplogEnd["t"], 1); - // The timestamp of the last optime must be larger than the first. - assert.gte(oplogEnd["ts"], oplogStart["ts"]); - - // The checkpoint timestamp may or may not exist. If it exists, it must be between the start - // and end. - if (checkpointTimestamp != null) { - assert.gte(checkpointTimestamp, oplogStart["ts"]); - assert.gte(oplogEnd["ts"], checkpointTimestamp); - } - - rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/backup_restore_backup_cursor.js b/jstests/noPassthrough/backup_restore_backup_cursor.js deleted file mode 100644 index de9fc8df88b..00000000000 --- a/jstests/noPassthrough/backup_restore_backup_cursor.js +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Test the backup/restore process: - * - 3 node replica set - * - Mongo CRUD client - * - Mongo FSM client - * - open a $backupCursor on the Secondary - * - cp files returned by the $backupCursor - * - close the $backupCursor - * - Start mongod as hidden secondary - * - Wait until new hidden node becomes secondary - * - * Some methods for backup used in this test checkpoint the files in the dbpath. This technique will - * not work for ephemeral storage engines, as they do not store any data in the dbpath. - * @tags: [requires_persistence, requires_replication] - */ - -load("jstests/noPassthrough/libs/backup_restore.js"); - -(function() { - "use strict"; - - if (_isWindows()) { - return; - } - - // Run the fsyncLock test. Will return before testing for any engine that doesn't - // support fsyncLock - new BackupRestoreTest({backup: 'backupCursor'}).run(); -}()); |