diff options
author | Dianna Hohensee <dianna.hohensee@mongodb.com> | 2020-11-05 17:20:31 -0500 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-11-12 17:54:28 +0000 |
commit | ccd024d9fb5c0587e33a3a3321e7f9a0430d0190 (patch) | |
tree | e4035ba5b8f90bc300b7645d8d1ae4cf3df3c401 | |
parent | c1ad4eab08f4679ca5071d76cfa845a9652881fe (diff) | |
download | mongo-ccd024d9fb5c0587e33a3a3321e7f9a0430d0190.tar.gz |
SERVER-52562 Enable two-phase drop for standalone mode; allow Lock-Free reads for standalone mode.
-rw-r--r-- | jstests/disk/directoryperdb.js | 22 | ||||
-rw-r--r-- | jstests/noPassthrough/directoryperdb.js | 50 | ||||
-rw-r--r-- | jstests/noPassthrough/disable_lock_free_reads_server_parameter.js | 42 | ||||
-rw-r--r-- | src/mongo/db/mongod_options.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h | 3 | ||||
-rw-r--r-- | src/mongo/db/storage/storage_engine_impl.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/storage/storage_engine_impl.h | 6 | ||||
-rw-r--r-- | src/mongo/db/storage/storage_util.cpp | 14 |
9 files changed, 96 insertions, 63 deletions
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js index 0d4c23fe3e3..49c2c89bb0f 100644 --- a/jstests/disk/directoryperdb.js +++ b/jstests/disk/directoryperdb.js @@ -15,6 +15,27 @@ assertDocumentCount = function(db, count) { }; /** + * Wait for the sub-directory for database 'dbName' in the MongoDB file directory 'dbDirPath' to be + * deleted. MongoDB does not always delete data immediately with a catalog change. + */ +const waitForDatabaseDirectoryRemoval = function(dbName, dbDirPath) { + assert.soon( + function() { + const files = listFiles(dbDirPath).filter(function(path) { + return path.name.endsWith(dbName); + }); + if (files.length == 0) { + return true; + } else { + return false; + } + }, + "dbpath contained '" + dbName + + "' directory when it should have been removed: " + tojson(listFiles(dbDirPath)), + 10 * 1000); // The periodic task to run data table cleanup runs once a second. +}; + +/** * Returns the current connection which gets restarted with wiredtiger. */ checkDBFilesInDBDirectory = function(conn, dbToCheck) { @@ -94,6 +115,7 @@ dbBase = m.getDB(baseName); // Drop a database created with directoryperdb. assert.commandWorked(dbBase.runCommand({dropDatabase: 1})); +waitForDatabaseDirectoryRemoval(baseName, dbpath); assertDocumentCount(dbBase, 0); m = checkDBDirectoryNonexistent(m, baseName); dbBase = m.getDB(baseName); diff --git a/jstests/noPassthrough/directoryperdb.js b/jstests/noPassthrough/directoryperdb.js index 417f844e103..5e1111f61cd 100644 --- a/jstests/noPassthrough/directoryperdb.js +++ b/jstests/noPassthrough/directoryperdb.js @@ -12,6 +12,7 @@ const baseDir = "jstests_directoryperdb"; const dbpath = MongoRunner.dataPath + baseDir + "/"; +const dbname = "foo"; const isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine; @@ -25,44 +26,51 @@ if (!isDirectoryPerDBSupported) { assert(m, 'storage engine with directoryperdb support failed to start up'); } -const getFooDir = function() { - return listFiles(dbpath).filter(function(path) { - return path.name.endsWith("/foo"); +const getDir = function(dbName, dbDirPath) { + return listFiles(dbDirPath).filter(function(path) { + return path.name.endsWith(dbName); }); }; -const checkFooDirExists = function() { - const files = getFooDir(); - assert.eq( - 1, - files.length, - "dbpath did not contain 'foo' directory when it should have: " + tojson(listFiles(dbpath))); +const checkDirExists = function(dbName, dbDirPath) { + const files = getDir(dbName, dbDirPath); + assert.eq(1, + files.length, + "dbpath did not contain '" + dbName + + "' directory when it should have: " + tojson(listFiles(dbDirPath))); assert.gt(listFiles(files[0].name).length, 0); }; -const checkFooDirRemoved = function() { - checkLog.containsJson(db.getMongo(), 4888200, {db: "foo"}); - const files = getFooDir(); - assert.eq(0, - files.length, - "dbpath contained 'foo' directory when it should have been removed: " + - tojson(listFiles(dbpath))); +const checkDirRemoved = function(dbName, dbDirPath) { + checkLog.containsJson(db.getMongo(), 4888200, {db: dbName}); + assert.soon( + function() { + const files = getDir(dbName, dbDirPath); + if (files.length == 0) { + return true; + } else { + return false; + } + }, + "dbpath contained '" + dbName + + "' directory when it should have been removed:" + tojson(listFiles(dbDirPath)), + 10 * 1000); // The periodic task to run data table cleanup runs once a second. }; -const db = m.getDB("foo"); +const db = m.getDB(dbname); assert.commandWorked(db.bar.insert({x: 1})); -checkFooDirExists(); +checkDirExists(dbname, dbpath); // Test that dropping the last collection in the database causes the database directory to be // removed. assert(db.bar.drop()); -checkFooDirRemoved(); +checkDirRemoved(dbname, dbpath); // Test that dropping the entire database causes the database directory to be removed. assert.commandWorked(db.bar.insert({x: 1})); -checkFooDirExists(); +checkDirExists(dbname, dbpath); assert.commandWorked(db.dropDatabase()); -checkFooDirRemoved(); +checkDirRemoved(dbname, dbpath); MongoRunner.stopMongod(m); diff --git a/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js b/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js index 05e9687324c..4733365a0f1 100644 --- a/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js +++ b/jstests/noPassthrough/disable_lock_free_reads_server_parameter.js @@ -2,7 +2,6 @@ * Tests the 'disableLockFreeReads' startup setParameter. * * User set disableLockFreeReads will be overridden to true (disabled) if: - * - in standalone mode * - with enableMajorityReadConcern=false * Otherwise, the default for disableLockFreeReads is true. * @@ -19,13 +18,21 @@ const replSetName = 'disable_lock_free_reads_server_parameter'; -jsTest.log("Starting server with disableLockFreeReads=false in standalone mode: this should " + - "override the setting to true."); +jsTest.log("Starting server with disableLockFreeReads=false in standalone mode: this should turn " + + "on lock-free reads."); let conn = MongoRunner.runMongod({setParameter: "disableLockFreeReads=false"}); -assert.neq(conn, null); -checkLog.contains(conn, "disabling lock-free reads"); -checkLog.contains(conn, "Lock-free reads is disabled"); +assert(conn); +checkLog.containsJson(conn, 4788403); // Logging that lock-free reads is enabled. +MongoRunner.stopMongod(conn); + +jsTest.log("Starting server with disableLockFreeReads=true in standalone mode: this is the " + + "default and nothing should happen."); + +conn = MongoRunner.runMongod({setParameter: "disableLockFreeReads=true"}); +assert(conn); +assert(!checkLog.checkContainsOnce(conn, "disabling lock-free reads")); +checkLog.containsJson(conn, 4788402); // Logging that lock-free reads is disabled. MongoRunner.stopMongod(conn); jsTest.log("Starting server with disableLockFreeReads=false and enableMajorityReadConcern=false: " + @@ -36,34 +43,25 @@ conn = MongoRunner.runMongod({ enableMajorityReadConcern: false, setParameter: "disableLockFreeReads=false" }); -assert.neq(conn, null); -checkLog.contains(conn, "disabling lock-free reads"); -checkLog.contains(conn, "Lock-free reads is disabled"); -MongoRunner.stopMongod(conn); - -jsTest.log("Starting server in standalone mode with disableLockFreeReads=true: this is the " + - "default and nothing should happen."); - -conn = MongoRunner.runMongod({setParameter: "disableLockFreeReads=true"}); -assert.neq(conn, null); -assert(!checkLog.checkContainsOnce(conn, "disabling lock-free reads")); -checkLog.contains(conn, "Lock-free reads is disabled"); +assert(conn); +checkLog.containsJson(conn, 4788401); // Logging eMRCf disables lock-free reads. +checkLog.containsJson(conn, 4788402); // Logging that lock-free reads is disabled. MongoRunner.stopMongod(conn); jsTest.log("Starting server as a replica set member with disableLockFreeReads=false: this should " + "turn on lock-free reads."); conn = MongoRunner.runMongod({replSet: replSetName, setParameter: "disableLockFreeReads=false"}); -assert.neq(conn, null); -checkLog.contains(conn, "Lock-free reads is enabled"); +assert(conn); +checkLog.containsJson(conn, 4788403); // Logging that lock-free reads is enabled. MongoRunner.stopMongod(conn); jsTest.log("Starting server as a replica set member with disableLockFreeReads=true: this is the " + "default and nothing should happen."); conn = MongoRunner.runMongod({replSet: replSetName, setParameter: "disableLockFreeReads=true"}); -assert.neq(conn, null); +assert(conn); assert(!checkLog.checkContainsOnce(conn, "disabling lock-free reads")); -checkLog.contains(conn, "Lock-free reads is disabled"); +checkLog.containsJson(conn, 4788402); // Logging that lock-free reads is disabled. MongoRunner.stopMongod(conn); }()); diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp index c4a364fee1d..04d88cfbf5d 100644 --- a/src/mongo/db/mongod_options.cpp +++ b/src/mongo/db/mongod_options.cpp @@ -505,16 +505,6 @@ Status storeMongodOptions(const moe::Environment& params) { // storage engines will continue to perform regular capped collection handling for the oplog // collection, regardless of this parameter setting. storageGlobalParams.allowOplogTruncation = false; - - // Standalone mode does not currently support lock-free reads, so we disable it. If the user - // tries to explicitly enable it by specifying --disableLockFreeReads=false, log a warning - // so that the user knows the feature will not run in standalone mode. - if (!storageGlobalParams.disableLockFreeReads) { - LOGV2_WARNING( - 4788400, - "Lock-free reads is not supported in standalone mode: disabling lock-free reads."); - storageGlobalParams.disableLockFreeReads = true; - } } if (params.count("replication.enableMajorityReadConcern")) { diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp index e507ad57e06..f7ab3ec27c1 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp @@ -94,7 +94,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons { stdx::lock_guard<Latch> lock(_mutex); for (auto it = _dropPendingIdents.cbegin(); - it != _dropPendingIdents.cend() && it->first < ts; + it != _dropPendingIdents.cend() && (it->first < ts || it->first == Timestamp::min()); ++it) { // This collection/index satisfies the 'ts' requirement to be safe to drop, but we must // also check that there are no active operations remaining that still retain a diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h index db9668fa507..320bbfda213 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h @@ -98,7 +98,8 @@ public: /** * Notifies this class that the storage engine has advanced its oldest timestamp. - * Drops all unreferenced drop-pending idents with drop timestamps before 'ts'. + * Drops all unreferenced drop-pending idents with drop timestamps before 'ts', as well as all + * unreferenced idents with Timestamp::min() drop timestamps (untimestamped on standalones). */ void dropIdentsOlderThan(OperationContext* opCtx, const Timestamp& ts); diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index 5c9f90bc56f..9a1ea732079 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -1048,13 +1048,9 @@ void StorageEngineImpl::checkpoint() { } void StorageEngineImpl::_onMinOfCheckpointAndOldestTimestampChanged(const Timestamp& timestamp) { - if (timestamp.isNull()) { - return; - } - // No drop-pending idents present if getEarliestDropTimestamp() returns boost::none. if (auto earliestDropTimestamp = _dropPendingIdentReaper.getEarliestDropTimestamp()) { - if (timestamp > *earliestDropTimestamp) { + if (timestamp >= *earliestDropTimestamp) { LOGV2(22260, "Removing drop-pending idents with drop timestamps before timestamp", "timestamp"_attr = timestamp); @@ -1145,6 +1141,10 @@ void StorageEngineImpl::TimestampMonitor::startup() { _currentTimestamps.minOfCheckpointAndOldest != minOfCheckpointAndOldest) { listener->notify(minOfCheckpointAndOldest); + } else if (stable == Timestamp::min()) { + // Special case notification of all listeners when writes do not have + // timestamps. This handles standalone mode. + listener->notify(Timestamp::min()); } } } diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h index 9233a6120b3..912f31cef8a 100644 --- a/src/mongo/db/storage/storage_engine_impl.h +++ b/src/mongo/db/storage/storage_engine_impl.h @@ -194,6 +194,9 @@ public: * * The TimestampListener must be registered in the TimestampMonitor in order to be notified * of timestamp changes and react to changes for the duration it's part of the monitor. + * + * Listeners expected to run in standalone mode should handle Timestamp::min() notifications + * appropriately. */ class TimestampListener { public: @@ -255,7 +258,8 @@ public: ~TimestampMonitor(); /** - * Monitor changes in timestamps and to notify the listeners on change. + * Monitor changes in timestamps and to notify the listeners on change. Notifies all + * listeners on Timestamp::min() in order to support standalone mode that is untimestamped. */ void startup(); diff --git a/src/mongo/db/storage/storage_util.cpp b/src/mongo/db/storage/storage_util.cpp index 684df92aa9a..1556b3469c4 100644 --- a/src/mongo/db/storage/storage_util.cpp +++ b/src/mongo/db/storage/storage_util.cpp @@ -84,7 +84,11 @@ void removeIndex(OperationContext* opCtx, nss, indexNameStr = indexName.toString(), ident](boost::optional<Timestamp> commitTimestamp) { - if (storageEngine->supportsPendingDrops() && commitTimestamp) { + if (storageEngine->supportsPendingDrops()) { + if (!commitTimestamp) { + // Standalone mode will not provide a timestamp. + commitTimestamp = Timestamp::min(); + } LOGV2(22206, "Deferring table drop for index '{index}' on collection " "'{namespace}{uuid}. Ident: '{ident}', commit timestamp: '{commitTimestamp}'", @@ -96,6 +100,8 @@ void removeIndex(OperationContext* opCtx, "commitTimestamp"_attr = commitTimestamp); storageEngine->addDropPendingIdent(*commitTimestamp, nss, ident); } else { + // Intentionally ignoring failure here. Since we've removed the metadata pointing to + // the collection, we should never see it again anyway. auto kvEngine = storageEngine->getEngine(); kvEngine->dropIdent(recoveryUnit, ident->getIdent()).ignore(); } @@ -155,7 +161,11 @@ Status dropCollection(OperationContext* opCtx, } }; - if (storageEngine->supportsPendingDrops() && commitTimestamp) { + if (storageEngine->supportsPendingDrops()) { + if (!commitTimestamp) { + // Standalone mode will not provide a timestamp. + commitTimestamp = Timestamp::min(); + } LOGV2(22214, "Deferring table drop for collection", logAttrs(nss), |