diff options
author | Audrey Fang <audrey.fang@10gen.com> | 2018-06-08 16:23:24 -0400 |
---|---|---|
committer | Audrey Fang <audrey.fang@10gen.com> | 2018-08-15 15:33:38 -0400 |
commit | d2360d5243fa8a19673473c63d114fdf03028a81 (patch) | |
tree | 32e9962a2ff531073ecfa15c43e09bc6bcdf4bf0 | |
parent | 73b02dc629b72d835534ee5e555603f5eade26be (diff) | |
download | mongo-d2360d5243fa8a19673473c63d114fdf03028a81.tar.gz |
SERVER-26387: Replace noPassthrough/indexbg2.js with test that uses
failpoints
(cherry picked from commit 1ba4fd11653654df741b5c399a85f38617b21ec8)
-rw-r--r-- | jstests/noPassthrough/indexbg2.js | 240 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_create.cpp | 16 |
2 files changed, 175 insertions, 81 deletions
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js index 34b35eb16d1..815a4e35df3 100644 --- a/jstests/noPassthrough/indexbg2.js +++ b/jstests/noPassthrough/indexbg2.js @@ -1,97 +1,175 @@ // Test background index creation w/ constraints // @tags: [requires_document_locking] -load("jstests/libs/slow_weekly_util.js"); - -var testServer = new SlowWeeklyMongod("indexbg2"); -var db = testServer.getDB("test"); -var baseName = "jstests_index12"; - -var parallel = function() { - return db[baseName + "_parallelStatus"]; -}; - -var resetParallel = function() { - parallel().drop(); -}; - -// Return the PID to call `waitpid` on for clean shutdown. -var doParallel = function(work) { - resetParallel(); - return startMongoProgramNoConnect( - "mongo", - "--eval", - work + "; db." + baseName + "_parallelStatus.save( {done:1} );", - db.getMongo().host); -}; - -var doneParallel = function() { - return !!parallel().findOne(); -}; - -var waitParallel = function() { - assert.soon(function() { - return doneParallel(); - }, "parallel did not finish in time", 300000, 1000); -}; - -var doTest = function() { +(function() { "use strict"; - var size = 10000; - var bgIndexBuildPid; - while (1) { // if indexing finishes before we can run checks, try indexing w/ more data - print("size: " + size); - var fullName = "db." + baseName; - var t = db[baseName]; - t.drop(); - - for (var i = 0; i < size; ++i) { - db.jstests_index12.save({i: i}); + + load("jstests/libs/check_log.js"); + + const conn = MongoRunner.runMongod({smallfiles: "", nojournal: ""}); + assert.neq(null, conn, "mongod failed to start."); + + let db = conn.getDB("test"); + let baseName = "jstests_index12"; + + let parallel = function() { + return db[baseName + "_parallelStatus"]; + }; + + let resetParallel = function() { + parallel().drop(); + }; + + // Return the PID to call `waitpid` on for clean shutdown. + let doParallel = function(work) { + resetParallel(); + return startMongoProgramNoConnect( + "mongo", + "--eval", + work + "; db." + baseName + "_parallelStatus.save( {done:1} );", + db.getMongo().host); + }; + + let indexBuild = function() { + let fullName = "db." + baseName; + return doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )"); + }; + + let doneParallel = function() { + return !!parallel().findOne(); + }; + + let waitParallel = function() { + assert.soon(function() { + return doneParallel(); + }, "parallel did not finish in time", 300000, 1000); + }; + + let turnFailPointOn = function(failPointName, i) { + assert.commandWorked(conn.adminCommand( + {configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}})); + }; + + let turnFailPointOff = function(failPointName) { + assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"})); + }; + + // Unique background index build fails when there exists duplicate indexed values + // for the duration of the build. + let failOnExistingDuplicateValue = function(coll) { + let duplicateKey = 0; + assert.writeOK(coll.save({i: duplicateKey})); + + let bgIndexBuildPid = indexBuild(); + waitProgram(bgIndexBuildPid); + assert.eq(1, coll.getIndexes().length, "Index should fail. There exist duplicate values."); + + // Revert to unique key set + coll.deleteOne({i: duplicateKey}); + }; + + // Unique background index build fails when started with a unique key set, + // but a document with a duplicate key is inserted prior to that key being indexed. + let failOnInsertedDuplicateValue = function(coll) { + let duplicateKey = 7; + + turnFailPointOn("hangBeforeIndexBuildOf", duplicateKey); + + let bgIndexBuildPid; + try { + bgIndexBuildPid = indexBuild(); + jsTestLog("Waiting to hang before index build of i=" + duplicateKey); + checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey); + + assert.writeOK(coll.save({i: duplicateKey})); + } finally { + turnFailPointOff("hangBeforeIndexBuildOf"); } - assert.eq(size, t.count()); - bgIndexBuildPid = - doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )"); + waitProgram(bgIndexBuildPid); + assert.eq(1, + coll.getIndexes().length, + "Index should fail. Duplicate key is inserted prior to that key being indexed."); + + // Revert to unique key set + coll.deleteOne({i: duplicateKey}); + }; + + // Unique background index build succeeds: + // 1) when a document is inserted with a key that has already been indexed + // (with the insert failing on duplicate key error). + // 2) when a document with a key not present in the initial set is inserted twice + // (with the initial insert succeeding and the second failing on duplicate key error). + let succeedWithWriteErrors = function(coll, newKey) { + let duplicateKey = 3; + + turnFailPointOn("hangAfterIndexBuildOf", duplicateKey); + + let bgIndexBuildPid; try { - // wait for indexing to start - assert.soon(function() { - return 2 === t.getIndexes().length; - }, "no index created", 30000, 50); - assert.writeError(t.save({i: 0, n: true})); // duplicate key violation - assert.writeOK(t.save({i: size - 1, n: true})); + bgIndexBuildPid = indexBuild(); + + jsTestLog("Waiting to hang after index build of i=" + duplicateKey); + checkLog.contains(conn, "Hanging after index build of i=" + duplicateKey); + + assert.writeError(coll.save({i: duplicateKey, n: true})); + + // First insert on key not present in initial set + assert.writeOK(coll.save({i: newKey, n: true})); } catch (e) { - // only a failure if we're still indexing - // wait for parallel status to update to reflect indexing status - sleep(1000); - if (!doneParallel()) { - waitProgram(bgIndexBuildPid); - throw e; - } + turnFailPointOff("hangAfterIndexBuildOf"); + throw e; } - if (!doneParallel()) { - // Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM - // which can lead to a false test failure. - waitProgram(bgIndexBuildPid); - break; + + try { + // We are currently hanging after indexing document with {i: duplicateKey}. + // To perform next check, we need to hang after indexing document with {i: newKey}. + // Add a hang before indexing document {i: newKey}, then turn off current hang + // so we are always in a known state and don't skip over the indexing of {i: newKey}. + turnFailPointOn("hangBeforeIndexBuildOf", newKey); + turnFailPointOff("hangAfterIndexBuildOf"); + turnFailPointOn("hangAfterIndexBuildOf", newKey); + turnFailPointOff("hangBeforeIndexBuildOf"); + + // Second insert on key not present in intial set fails with duplicate key error + jsTestLog("Waiting to hang after index build of i=" + newKey); + checkLog.contains(conn, "Hanging after index build of i=" + newKey); + + assert.writeError(coll.save({i: newKey, n: true})); + } finally { + turnFailPointOff("hangBeforeIndexBuildOf"); + turnFailPointOff("hangAfterIndexBuildOf"); } - print("indexing finished too soon, retrying..."); - // Although the index build finished, ensure the shell has exited. + waitProgram(bgIndexBuildPid); - size *= 2; - assert(size < 5000000, "unable to run checks in parallel with index creation"); - } + assert.eq(2, coll.getIndexes().length, "Index build should succeed"); + }; + + let doTest = function() { + "use strict"; + const size = 10; + + let coll = db[baseName]; + coll.drop(); + + for (let i = 0; i < size; ++i) { + assert.writeOK(coll.save({i: i})); + } + assert.eq(size, coll.count()); + assert.eq(1, coll.getIndexes().length, "_id index should already exist"); + + failOnExistingDuplicateValue(coll); + assert.eq(size, coll.count()); - waitParallel(); + failOnInsertedDuplicateValue(coll); + assert.eq(size, coll.count()); - /* it could be that there is more than size now but the index failed - to build - which is valid. we check index isn't there. - */ - if (t.count() != size) { - assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there"); - } + succeedWithWriteErrors(coll, size); -}; + waitParallel(); + }; -doTest(); + doTest(); -testServer.stop(); + MongoRunner.stopMongod(conn); +})(); diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp index 17a3d981d5d..9b244bf5f71 100644 --- a/src/mongo/db/catalog/index_create.cpp +++ b/src/mongo/db/catalog/index_create.cpp @@ -65,6 +65,8 @@ using std::endl; MONGO_FP_DECLARE(crashAfterStartingIndexBuild); MONGO_FP_DECLARE(hangAfterStartingIndexBuild); MONGO_FP_DECLARE(hangAfterStartingIndexBuildUnlocked); +MONGO_FP_DECLARE(hangBeforeIndexBuildOf); +MONGO_FP_DECLARE(hangAfterIndexBuildOf); std::atomic<std::int32_t> maxIndexBuildMemoryUsageMegabytes(500); // NOLINT @@ -282,6 +284,16 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj return indexInfoObjs; } +void failPointHangDuringBuild(FailPoint* fp, StringData where, const BSONObj& doc) { + MONGO_FAIL_POINT_BLOCK(*fp, data) { + int i = doc.getIntField("i"); + if (data.getData()["i"].numberInt() == i) { + log() << "Hanging " << where << " index build of i=" << i; + MONGO_FAIL_POINT_PAUSE_WHILE_SET((*fp)); + } + } +} + Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) { const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build"; const auto numRecords = _collection->numRecords(_txn); @@ -323,6 +335,8 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO // Done before insert so we can retry document if it WCEs. progress->setTotalWhileRunning(_collection->numRecords(_txn)); + failPointHangDuringBuild(&hangBeforeIndexBuildOf, "before", objToIndex.value()); + WriteUnitOfWork wunit(_txn); Status ret = insert(objToIndex.value(), loc); if (_buildInBackground) @@ -340,6 +354,8 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO if (_buildInBackground) exec->restoreState(); // Handles any WCEs internally. + failPointHangDuringBuild(&hangAfterIndexBuildOf, "after", objToIndex.value()); + // Go to the next document progress->hit(); n++; |