diff options
author | Louis Williams <louis.williams@mongodb.com> | 2022-02-02 06:14:45 -0500 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2022-02-02 11:41:39 +0000 |
commit | c45f8885f8c2aa87a8498add9228969600600de0 (patch) | |
tree | 2aaf91df8336d236dc2f382362ea7edd87a25c98 | |
parent | cb6654f0251852820a0c4af4901f31fe21597377 (diff) | |
download | mongo-c45f8885f8c2aa87a8498add9228969600600de0.tar.gz |
Revert "SERVER-62650 Limit cache wait time when initializing RecordIds"r5.3.0-alpha1
This reverts commit 6d509615d2d6ef7af38e1b982b6272a54e9b591c.
-rw-r--r-- | jstests/core/txns/commit_and_abort_large_unprepared_transactions.js | 27 | ||||
-rw-r--r-- | src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp | 19 |
2 files changed, 9 insertions, 37 deletions
diff --git a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js index aff44fe900d..feb09ef4656 100644 --- a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js +++ b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js @@ -27,25 +27,14 @@ const sessionDB = session.getDatabase(dbName); const sessionColl = sessionDB.getCollection(collName); // Test committing an unprepared large transaction with two 10MB inserts. -try { - let doc1 = createLargeDocument(1); - let doc2 = createLargeDocument(2); - - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - assert.commandWorked(sessionColl.insert(doc2)); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); -} catch (e) { - // It may be possible for this test to run in a passthrough where such a large transaction fills - // up the cache and cannot commit. The transaction will be rolled-back with a WriteConflict as a - // result. - if (e.code === ErrorCodes.WriteConflict && e.errmsg.startsWith("Cache full")) { - jsTestLog("Ignoring WriteConflict due to large transaction's size"); - } else { - throw e; - } -} +let doc1 = createLargeDocument(1); +let doc2 = createLargeDocument(2); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); +assert.commandWorked(sessionColl.insert(doc2)); + +assert.commandWorked(session.commitTransaction_forTesting()); +assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); // Test aborting an unprepared large transaction with two 10MB inserts. let doc3 = createLargeDocument(3); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index d988707fbb3..314b806b83b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -1906,17 +1906,6 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) { // required by the largest_key API. WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(); auto sessRaii = cache->getSession(); - - // We must limit the amount of time spent blocked on cache eviction to avoid a deadlock with - // ourselves. The calling operation may have a session open that has written a large amount of - // data, and by creating a new session, we are preventing WT from being able to roll back that - // transaction to free up cache space. If we do block on cache eviction here, we must consider - // that the other session owned by this thread may be the one that needs to be rolled back. If - // this does time out, we will receive a WT_CACHE_FULL and throw an error. - auto wtSession = sessRaii->getSession(); - invariantWTOK(wtSession->reconfigure(wtSession, "cache_max_wait_ms=1000")); - ON_BLOCK_EXIT([&] { wtSession->reconfigure(wtSession, ""); }); - auto cachedCursor = sessRaii->getCachedCursor(_tableId, ""); auto cursor = cachedCursor ? cachedCursor : sessRaii->getNewCursor(_uri); ON_BLOCK_EXIT([&] { sessRaii->releaseCursor(_tableId, cursor, ""); }); @@ -1925,13 +1914,7 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) { // largest_key API returns the largest key in the table regardless of visibility. This ensures // we don't re-use RecordIds that are not visible. int ret = cursor->largest_key(cursor); - if (ret == WT_CACHE_FULL) { - // Force the caller to rollback its transaction if we can't make progess with eviction. - // TODO (SERVER-60839): Convert this to a different error code that is distinguishable from - // a true write conflict. - throw WriteConflictException( - fmt::format("Cache full while performing initial write to '{}'", _ns)); - } else if (ret != WT_NOTFOUND) { + if (ret != WT_NOTFOUND) { invariantWTOK(ret); auto recordId = getKey(cursor); nextId = recordId.getLong() + 1; |