summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hows <howsdav@gmail.com>2015-09-14 17:21:30 +1000
committerDavid Hows <howsdav@gmail.com>2015-09-14 17:21:30 +1000
commit3325cd25cd45ca7bdf856f62a740497ba7eedf36 (patch)
tree87d4f4e80c3a787cd83b002d9273df7d89490849
parente1d7404eae3a2c33fa4f15105b931f17d0c3d502 (diff)
downloadmongo-3325cd25cd45ca7bdf856f62a740497ba7eedf36.tar.gz
Revert "SERVER-18899 - Add beginBackup and endBackup to storage API"
This reverts commit e1d7404eae3a2c33fa4f15105b931f17d0c3d502.
-rw-r--r--jstests/core/fsync.js159
-rw-r--r--jstests/noPassthrough/backup_restore.js65
-rw-r--r--src/mongo/db/commands/fsync.cpp20
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h15
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp17
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.h7
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h3
-rw-r--r--src/mongo/db/storage/storage_engine.h35
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp19
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h6
11 files changed, 81 insertions, 273 deletions
diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js
index b3cc0a5992e..14c5d323591 100644
--- a/jstests/core/fsync.js
+++ b/jstests/core/fsync.js
@@ -1,110 +1,49 @@
-/**
- * Test fsyncLock functionality
- * - Skip for all storage engines which don't support fsync
- * - Run the fsyncLock command, confirm we lock correctly with currentOp
- * - Confirm that backup cursors are created and released in WT
- * - Confirm that we cannot insert during fsyncLock
- * - Confirm that writes can progress after fsyncUnlock
- * - Confirm that the command can be run repeatedly without breaking things
- * - Confirm that the pseudo commands and eval can perform fsyncLock/Unlock
- */
-(function() {
- "use strict";
-
- // Start with a clean DB
- var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
- fsyncLockDB.dropDatabase();
-
- // Tests the db.fsyncLock/fsyncUnlock features
- var storageEngine = db.serverStatus().storageEngine.name;
-
- // As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine
- // that does not support the begin/end backup commands. The array below contains a
- // list of engines which do support this option and should be ammended as needed.
- var supportsFsync = db.fsyncLock();
-
- if ( supportsFsync.ok != 1) {
- jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync");
- return;
- } else {
- db.fsyncUnlock();
- }
-
- var resFail = fsyncLockDB.runCommand({fsync:1, lock:1});
-
- // Start with a clean DB
- var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
- fsyncLockDB.dropDatabase();
-
- // Test it doesn't work unless invoked against the admin DB
- var resFail = fsyncLockDB.runCommand({fsync:1, lock:1});
- assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
-
- // Uses admin automatically and locks the server for writes
- var fsyncLockRes = db.fsyncLock();
- assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
- assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-
- // Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
- // is blocked. There is really now way to do that currently, so just check that the write didn't
- // go through.
- var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
- sleep(1000);
-
- // Make sure reads can still run even though there is a pending write and also that the write
- // didn't get through
- assert.eq(0, fsyncLockDB.coll.count({}));
-
- // Unlock and make sure the insert succeeded
- var fsyncUnlockRes = db.fsyncUnlock();
- assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
- assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
-
- // Make sure the db is unlocked and the initial write made it through.
- writeOpHandle();
- fsyncLockDB.coll.insert({x:2});
-
- assert.eq(2, fsyncLockDB.coll.count({}));
-
- // Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can
- // run this command repeatedly with no problems. Additionally check that the WT
- // backup session and cursor are closed when we unlock.
- var beforeSS = db.serverStatus();
- var fsyncLockRes = db.fsyncLock();
- assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed");
-
- // Under WiredTiger we will open a backup session and cursor. Confirm that these are opened.
- if (storageEngine == "wiredTiger") {
- var afterSS = db.serverStatus().wiredTiger.session
- beforeSS = beforeSS.wiredTiger.session
- assert.gt(afterSS["open session count"], beforeSS["open session count"],
- "WiredTiger did not open a backup session as expected");
- assert.gt(afterSS["open cursor count"], beforeSS["open cursor count"],
- "WiredTiger did not open a backup cursor as expected");
- }
-
- var fsyncUnlockRes = db.fsyncUnlock();
- assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed");
-
- if (storageEngine == "wiredTiger") {
- var finalSS = db.serverStatus().wiredTiger.session;
- assert.eq(beforeSS["open session count"], finalSS["open session count"],
- "WiredTiger did not close its backup session as expected");
- assert.eq(beforeSS["open cursor count"], finalSS["open cursor count"],
- "WiredTiger did not close its backup cursor as expected after ");
- }
-
- // Ensure eval is not allowed to invoke fsyncLock
- assert(!db.eval('db.fsyncLock()').ok, "eval('db.fsyncLock()') should fail.");
-
- // Check that the fsyncUnlock pseudo-command (a lookup on cmd.$sys.unlock)
- // still has the same effect as a legitimate 'fsyncUnlock' command
- // TODO: remove this in in the release following MongoDB 3.2 when pseudo-commands
- // are removed
- var fsyncCommandRes = db.fsyncLock();
- assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
- assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
- var fsyncPseudoCommandRes = db.getSiblingDB("admin").$cmd.sys.unlock.findOne();
- assert(fsyncPseudoCommandRes.ok, "fsyncUnlock pseudo-command failed");
- assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
-}());
+// Tests the db.fsyncLock/fsyncUnlock features
+
+// Start with a clean DB
+var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
+fsyncLockDB.dropDatabase();
+
+// Test it doesn't work unless invoked against the admin DB
+var resFail = fsyncLockDB.runCommand({fsync:1, lock:1});
+assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
+
+// Uses admin automatically and locks the server for writes
+var fsyncLockRes = db.fsyncLock();
+assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
+assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+
+// Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
+// is blocked. There is really now way to do that currently, so just check that the write didn't
+// go through.
+var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
+sleep(1000);
+
+// Make sure reads can still run even though there is a pending write and also that the write
+// didn't get through
+assert.eq(0, fsyncLockDB.coll.count({}));
+
+// Unlock and make sure the insert succeeded
+var fsyncUnlockRes = db.fsyncUnlock();
+assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
+assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
+
+// Make sure the db is unlocked and the initial write made it through.
+writeOpHandle();
+fsyncLockDB.coll.insert({x:2});
+
+assert.eq(2, fsyncLockDB.coll.count({}));
+
+// Ensure eval is not allowed to invoke fsyncLock
+assert(!db.eval('db.fsyncLock()').ok, "eval('db.fsyncLock()') should fail.");
+
+// Check that the fsyncUnlock pseudo-command (a lookup on cmd.$sys.unlock)
+// still has the same effect as a legitimate 'fsyncUnlock' command
+// TODO: remove this in in the release following MongoDB 3.2 when pseudo-commands
+// are removed
+var fsyncCommandRes = db.fsyncLock();
+assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
+assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+var fsyncPseudoCommandRes = db.getSiblingDB("admin").$cmd.sys.unlock.findOne();
+assert(fsyncPseudoCommandRes.ok, "fsyncUnlock pseudo-command failed");
+assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
diff --git a/jstests/noPassthrough/backup_restore.js b/jstests/noPassthrough/backup_restore.js
index f9af2162f63..416ebb6b4e7 100644
--- a/jstests/noPassthrough/backup_restore.js
+++ b/jstests/noPassthrough/backup_restore.js
@@ -283,51 +283,46 @@
}
// Main
-
- // Add storage engines which are to be skipped entirely to this array
- var noBackupTests = [ 'inMemoryExperiment' ];
-
- // Add storage engines that do not support fsyncLock to this array
- var noFsyncTests = [ 'rocksdb' ];
-
- // Add storage engines that do not support rolling backup test to this array
- var noRollingTests = [ 'inMemoryExperiment', 'rocksdb', 'mmapv1' ]
-
- // Grab the storage engine, default is wiredTiger
var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (noBackupTests.indexOf(storageEngine) != -1 ) {
+ if (storageEngine === "wiredTiger") {
+ // fsyncLock does not work for wiredTiger (SERVER-18899)
+ // runTest({
+ // name: storageEngine + ' fsyncLock/fsyncUnlock',
+ // storageEngine: storageEngine,
+ // backup: 'fsyncLock',
+ // clientTime: 30000
+ // });
+ runTest({
+ name: storageEngine + ' stop/start',
+ storageEngine: storageEngine,
+ backup: 'stopStart',
+ clientTime: 30000
+ });
+ // if rsync is not available on the host, then this test is skipped
+ if (!runProgram('bash', '-c', 'which rsync')) {
+ runTest({
+ name: storageEngine + ' rolling',
+ storageEngine: storageEngine,
+ backup: 'rolling',
+ clientTime: 30000
+ });
+ } else {
+ jsTestLog("Skipping test for " + storageEngine + ' rolling');
+ }
+ } else if (storageEngine === 'inMemoryExperiment') {
jsTestLog("Skipping test for " + storageEngine);
- return;
- }
-
- // Skip fsyncLock/Unlock on rocksdb
- if (noFsyncTests.indexOf(storageEngine) != -1 ) {
+ } else {
runTest({
name: storageEngine + ' fsyncLock/fsyncUnlock',
storageEngine: storageEngine,
backup: 'fsyncLock'
});
- } else {
- jsTestLog("Skipping test for " + storageEngine + ' fsyncLock/fsyncUnlock');
- }
- runTest({
- name: storageEngine + ' stop/start',
- storageEngine: storageEngine,
- backup: 'stopStart',
- clientTime: 30000
- });
- // if rsync is not available on the host, then this test is skipped
- if (noRollingTests.indexOf(storageEngine) != -1 &&
- !runProgram('bash', '-c', 'which rsync')) {
runTest({
- name: storageEngine + ' rolling',
+ name: storageEngine + ' stop/start',
storageEngine: storageEngine,
- backup: 'rolling',
- clientTime: 30000
+ backup: 'stopStart'
});
- } else {
- jsTestLog("Skipping test for " + storageEngine + ' rolling');
}
-}());
+}()); \ No newline at end of file
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index 16fb2d9541b..348637d062a 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -39,7 +39,6 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/audit.h"
-#include "mongo/db/db.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
@@ -48,7 +47,6 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
@@ -249,10 +247,11 @@ void FSyncLockThread::doRealWork() {
fsyncCmd.locked = false;
return;
}
+
txn.lockState()->downgradeGlobalXtoSForMMAPV1();
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
try {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
storageEngine->flushAllFiles(true);
} catch (std::exception& e) {
error() << "error doing flushAll: " << e.what() << endl;
@@ -261,18 +260,6 @@ void FSyncLockThread::doRealWork() {
fsyncCmd.locked = false;
return;
}
- try {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- uassertStatusOK(storageEngine->beginBackup(&txn));
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(&txn, "beginBackup", "global");
- } catch (const DBException& e) {
- error() << "storage engine unable to begin backup : " << e.toString() << endl;
- fsyncCmd.err = e.toString();
- fsyncCmd._threadSync.notify_one();
- fsyncCmd.locked = false;
- return;
- }
invariant(!fsyncCmd.locked);
fsyncCmd.locked = true;
@@ -282,9 +269,6 @@ void FSyncLockThread::doRealWork() {
while (!fsyncCmd.pendingUnlock) {
fsyncCmd._unlockSync.wait(fsyncCmd.m);
}
-
- storageEngine->endBackup(&txn);
-
fsyncCmd.pendingUnlock = false;
fsyncCmd.locked = false;
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index db481341bf0..056a8921b0c 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -94,21 +94,6 @@ public:
return 0;
}
- /**
- * See StorageEngine::beginBackup for details
- */
- virtual Status beginBackup(OperationContext* txn) {
- return Status(ErrorCodes::CommandNotSupported,
- "The current storage engine doesn't support backup mode");
- }
-
- /**
- * See StorageEngine::endBackup for details
- */
- virtual void endBackup(OperationContext* txn) {
- MONGO_UNREACHABLE;
- }
-
virtual bool isDurable() const = 0;
/**
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 7fd66424256..e296b9d9963 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -247,23 +247,6 @@ int KVStorageEngine::flushAllFiles(bool sync) {
return _engine->flushAllFiles(sync);
}
-Status KVStorageEngine::beginBackup(OperationContext* txn) {
- // We should not proceed if we are already in backup mode
- if (_inBackupMode)
- return Status(ErrorCodes::BadValue, "Already in Backup Mode");
- Status status = _engine->beginBackup(txn);
- if (status.isOK())
- _inBackupMode = true;
- return status;
-}
-
-void KVStorageEngine::endBackup(OperationContext* txn) {
- // We should never reach here if we aren't already in backup mode
- invariant(_inBackupMode);
- _engine->endBackup(txn);
- _inBackupMode = false;
-}
-
bool KVStorageEngine::isDurable() const {
return _engine->isDurable();
}
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.h b/src/mongo/db/storage/kv/kv_storage_engine.h
index e172bc72fab..6c45d208d01 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.h
+++ b/src/mongo/db/storage/kv/kv_storage_engine.h
@@ -80,10 +80,6 @@ public:
virtual int flushAllFiles(bool sync);
- virtual Status beginBackup(OperationContext* txn);
-
- virtual void endBackup(OperationContext* txn);
-
virtual bool isDurable() const;
virtual Status repairRecordStore(OperationContext* txn, const std::string& ns);
@@ -124,8 +120,5 @@ private:
typedef std::map<std::string, KVDatabaseCatalogEntry*> DBMap;
DBMap _dbs;
mutable stdx::mutex _dbsLock;
-
- // Flag variable that states if the storage engine is in backup mode.
- bool _inBackupMode = false;
};
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index cc8c0640f9c..b1fd028a1d5 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -323,14 +323,6 @@ int MMAPV1Engine::flushAllFiles(bool sync) {
return MongoFile::flushAll(sync);
}
-Status MMAPV1Engine::beginBackup(OperationContext* txn) {
- return Status::OK();
-}
-
-void MMAPV1Engine::endBackup(OperationContext* txn) {
- return;
-}
-
bool MMAPV1Engine::isDurable() const {
return getDur().isDurable();
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index ad672f7d3de..25c38500831 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -49,10 +49,7 @@ public:
RecoveryUnit* newRecoveryUnit();
void listDatabases(std::vector<std::string>* out) const;
-
int flushAllFiles(bool sync);
- Status beginBackup(OperationContext* txn);
- void endBackup(OperationContext* txn);
DatabaseCatalogEntry* getDatabaseCatalogEntry(OperationContext* opCtx, StringData db);
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 1cedafc2ba8..3eac6b32311 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -192,41 +192,6 @@ public:
virtual int flushAllFiles(bool sync) = 0;
/**
- * Transitions the storage engine into backup mode.
- *
- * During backup mode the storage engine must stabilize its on-disk files, and avoid
- * any internal processing that may involve file I/O, such as online compaction, so
- * a filesystem level backup may be performed.
- *
- * Storage engines that do not support this feature should use the default implementation.
- * Storage engines that implement this must also implement endBackup().
- *
- * For Storage engines that implement beginBackup the _inBackupMode variable is provided
- * to avoid multiple instance enterting/leaving backup concurrently.
- *
- * If this function returns an OK status, MongoDB can call endBackup to signal the storage
- * engine that filesystem writes may continue. This function should return a non-OK status if
- * filesystem changes cannot be stopped to allow for online backup. If the function should be
- * retried, returns a non-OK status. This function may throw a WriteConflictException, which
- * should trigger a retry by the caller. All other exceptions should be treated as errors.
- */
- virtual Status beginBackup(OperationContext* txn) {
- return Status(ErrorCodes::CommandNotSupported,
- "The current storage engine doesn't support backup mode");
- }
-
- /**
- * Transitions the storage engine out of backup mode.
- *
- * Storage engines that do not support this feature should use the default implementation.
- *
- * Storage engines implementing this feature should fassert when unable to leave backup mode.
- */
- virtual void endBackup(OperationContext* txn) {
- return;
- }
-
- /**
* Recover as much data as possible from a potentially corrupt RecordStore.
* This only recovers the record data, not indexes or anything else.
*
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 1d3a6b30dc5..c6200c63de4 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -250,25 +250,6 @@ int WiredTigerKVEngine::flushAllFiles(bool sync) {
return 1;
}
-Status WiredTigerKVEngine::beginBackup(OperationContext* txn) {
- invariant(!_backupSession);
-
- // This cursor will be freed by the backupSession being closed as the session is uncached
- auto session = stdx::make_unique<WiredTigerSession>(_conn);
- WT_CURSOR* c = NULL;
- WT_SESSION* s = session->getSession();
- int ret = WT_OP_CHECK(s->open_cursor(s, "backup:", NULL, NULL, &c));
- if (ret != 0) {
- return wtRCToStatus(ret);
- }
- _backupSession = std::move(session);
- return Status::OK();
-}
-
-void WiredTigerKVEngine::endBackup(OperationContext* txn) {
- _backupSession.reset();
-}
-
void WiredTigerKVEngine::syncSizeInfo(bool sync) const {
if (!_sizeStorer)
return;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index e2c3093e03f..a428905461a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -96,10 +96,6 @@ public:
virtual int flushAllFiles(bool sync);
- virtual Status beginBackup(OperationContext* txn);
-
- virtual void endBackup(OperationContext* txn);
-
virtual int64_t getIdentSize(OperationContext* opCtx, StringData ident);
virtual Status repairIdent(OperationContext* opCtx, StringData ident);
@@ -161,7 +157,5 @@ private:
mutable ElapsedTracker _sizeStorerSyncTracker;
mutable Date_t _previousCheckedDropsQueued;
-
- std::unique_ptr<WiredTigerSession> _backupSession;
};
}