summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/noPassthrough/ensure_size_storer_flushes_periodically.js74
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h3
-rw-r--r--src/mongo/util/elapsed_tracker.h13
6 files changed, 105 insertions, 2 deletions
diff --git a/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js
new file mode 100644
index 00000000000..65826603346
--- /dev/null
+++ b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js
@@ -0,0 +1,74 @@
+/**
+ * Test that the size storer runs periodically in the event of a server crash, not just on clean
+ * shutdown.
+ *
+ * This test requires persistence to ensure data survives a restart.
+ * @tags: [requires_persistence]
+ */
+
+(function() {
+"use strict";
+
+// Set up the data files to be reused across server restarts.
+const dbpath = MongoRunner.dataPath + jsTestName();
+resetDbpath(dbpath);
+const mongodArgs = {
+ dbpath: dbpath,
+ noCleanData: true
+};
+
+let conn = MongoRunner.runMongod(mongodArgs);
+let testDB = conn.getDB(jsTestName());
+let testColl = testDB.test;
+
+// Set up the collection with some data. The fsync command will flush the size storer.
+assert.commandWorked(testColl.insert({y: "insertedDataInitialize"}));
+assert.commandWorked(testDB.adminCommand({fsync: 1}));
+
+// First test that fast count data is lost. The size storer flushes every 100,000 operations or 60
+// seconds. 10 documents should not take long, so the 60 seconds flush should not trigger.
+for (let i = 0; i < 10; ++i) {
+ assert.commandWorked(testColl.insert({x: i}));
+}
+assert.eq(11, testColl.count());
+
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(conn, null, 'mongod was unable to restart after receiving a SIGKILL');
+testDB = conn.getDB(jsTestName());
+testColl = testDB.test;
+jsTestLog("Recovery after first crash. Fast count: " + testColl.count() +
+ ", number of docs: " + tojson(testColl.find({}).toArray().length));
+
+assert.eq(1,
+ testColl.count(),
+ "Fast count should be incorrect after a server crash. Fast count: " + testColl.count());
+
+// Second, ensure that fast count data saved after 60 seconds is present after a server crash.
+for (let i = 0; i < 100; ++i) {
+ assert.commandWorked(testColl.insert({x: i}));
+}
+assert.eq(testColl.count(), 101, "Fast count should be 100 + 1. Fast count: " + testColl.count());
+
+jsTestLog("Sleep > 60 seconds to wait for the size storer to be ready to flush.");
+sleep(65 * 1000);
+jsTestLog("Awake. Doing one more write to trigger a flush, if some internal op didn't already.");
+// The fast count should definitely be at least 101, but the fast count update to 102 for the
+// {y: "triggeringSizeStorerFlush"} write may or may not be persisted, depending on whether the
+// write triggered the flush or some internal write already has and reset the 60 second timer.
+assert.commandWorked(testColl.insert({y: "triggeringSizeStorerFlush"}));
+
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(conn, null, 'mongod was unable to restart after receiving a SIGKILL');
+testDB = conn.getDB(jsTestName());
+testColl = testDB.test;
+jsTestLog("Recovery after second crash. Fast count: " + testColl.count() +
+ ", number of docs: " + tojson(testColl.find({}).toArray().length));
+
+assert.gte(testColl.count(),
+ 101,
+ "Fast count should still be 100 + 1 after crash. Fast count: " + testColl.count());
+
+MongoRunner.stopMongod(conn);
+}()); \ No newline at end of file
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 108da46e6c9..734be9b934a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -2732,4 +2732,10 @@ StatusWith<BSONObj> WiredTigerKVEngine::getSanitizedStorageOptionsForSecondaryRe
return options;
}
+void WiredTigerKVEngine::sizeStorerPeriodicFlush() {
+ if (_sizeStorerSyncTracker.intervalHasElapsed()) {
+ syncSizeInfo(false);
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index 911913c2080..ef9a80f6a1e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -423,6 +423,12 @@ public:
StatusWith<BSONObj> getSanitizedStorageOptionsForSecondaryReplication(
const BSONObj& options) const override;
+ /**
+ * Flushes any WiredTigerSizeStorer updates to the storage engine if enough time has elapsed, as
+ * dictated by the _sizeStorerSyncTracker.
+ */
+ void sizeStorerPeriodicFlush();
+
private:
class WiredTigerSessionSweeper;
@@ -506,6 +512,7 @@ private:
std::unique_ptr<WiredTigerSizeStorer> _sizeStorer;
std::string _sizeStorerUri;
mutable ElapsedTracker _sizeStorerSyncTracker;
+
bool _ephemeral; // whether we are using the in-memory mode of the WT engine
const bool _inRepairMode;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 5fbfe2b8a7c..3225e05ef0a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -519,6 +519,10 @@ void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) {
if (!returnedToCache)
delete session;
+
+ if (_engine) {
+ _engine->sizeStorerPeriodicFlush();
+ }
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
index 3b28b12da27..98b5e01ed2e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h
@@ -124,6 +124,9 @@ public:
* cache exceeds wiredTigerCursorCacheSize.
* The exact cursor config that was used to create the cursor must be provided or subsequent
* users will retrieve cursors with incorrect configurations.
+ *
+ * Additionally calls into the WiredTigerKVEngine to see if the SizeStorer needs to be flushed.
+ * The SizeStorer gets flushed on a periodic basis.
*/
void releaseCursor(uint64_t id, WT_CURSOR* cursor, const std::string& config);
diff --git a/src/mongo/util/elapsed_tracker.h b/src/mongo/util/elapsed_tracker.h
index 5a05d5ea31d..72489a845a6 100644
--- a/src/mongo/util/elapsed_tracker.h
+++ b/src/mongo/util/elapsed_tracker.h
@@ -38,14 +38,23 @@ namespace mongo {
class ClockSource;
-/** Keep track of elapsed time. After a set amount of time, tells you to do something. */
+/**
+ * Keeps track of elapsed time. After a set amount of time, or a set number of iterations, tells you
+ * to do something.
+ */
class ElapsedTracker {
public:
+ /**
+ * Either 'hitsBetweenMarks' calls to intervalHasElapsed() occur before intervalHasElapsed()
+ * returns true, or 'msBetweenMarks' time must elapse before intervalHasElapsed() returns true.
+ */
ElapsedTracker(ClockSource* cs, int32_t hitsBetweenMarks, Milliseconds msBetweenMarks);
/**
* Call this for every iteration.
- * @return true if one of the triggers has gone off.
+ *
+ * Returns true after either _hitsBetweenMarks calls occur or _msBetweenMarks time has elapsed
+ * since that last true response. Both triggers are reset whenever true is returned.
*/
bool intervalHasElapsed();