summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Do <do.vincent@live.com>2016-05-17 16:11:09 -0400
committerVincent Do <do.vincent@live.com>2016-05-20 14:16:26 -0400
commitbd81003cf222ac705ed8cfa476f952d87d5d04ba (patch)
tree87cf1031ed20dcf3f6575c0b77d472f332348195
parent225a2a57350c79a190c13054a038b6c3c559a558 (diff)
downloadmongo-bd81003cf222ac705ed8cfa476f952d87d5d04ba.tar.gz
SERVER-24070 Make index_delete.js not timing dependent
-rw-r--r--jstests/replsets/index_delete.js63
-rw-r--r--src/mongo/db/catalog/index_create.cpp8
2 files changed, 39 insertions, 32 deletions
diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js
index 9013f8d4ab0..f7c45e2f2e0 100644
--- a/jstests/replsets/index_delete.js
+++ b/jstests/replsets/index_delete.js
@@ -1,37 +1,27 @@
/**
- * TODO: SERVER-13204
- * This tests inserts a huge number of documents, initiates a background index build
- * and tries to perform another task in parallel while the background index task is
- * active. The problem is that this is timing dependent and the current test setup
- * tries to achieve this by inserting insane amount of documents.
- */
-
-/**
* Starts a replica set with arbiter, build an index
* drop index once secondary starts building index,
* index should not exist on secondary afterwards
*/
-var checkOp = function(checkDB) {
- var curOp = checkDB.currentOp(true);
- for (var i = 0; i < curOp.inprog.length; i++) {
- try {
- if (curOp.inprog[i].query.background) {
- // should throw something when string contains > 90%
- printjson(curOp.inprog[i].msg);
+function indexBuildInProgress(checkDB) {
+ var inprog = checkDB.currentOp().inprog;
+ var indexOps = inprog.filter(function(op) {
+ if (op.msg && op.msg.includes('Index Build')) {
+ if (op.progress && (op.progress.done / op.progress.total) > 0.20) {
+ printjson(op);
return true;
}
- } catch (e) {
- // catchem if you can
}
- }
- return false;
-};
+ });
+ return indexOps.length > 0;
+}
+
// Set up replica set
var replTest = new ReplSetTest({name: 'fgIndex', nodes: 3});
var nodes = replTest.nodeList();
-// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
+// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary.
replTest.startSet();
replTest.initiate({
"_id": "fgIndex",
@@ -47,9 +37,12 @@ var second = replTest.getSecondary();
var masterDB = master.getDB('fgIndexSec');
var secondDB = second.getDB('fgIndexSec');
-var size = 50000;
+var size = 100;
+
+// Make sure that the index build does not terminate on the secondary.
+assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
-jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp();
for (var i = 0; i < size; ++i) {
bulk.insert({i: i});
@@ -60,22 +53,28 @@ jsTest.log("Creating index");
masterDB.jstests_fgsec.ensureIndex({i: 1});
assert.eq(2, masterDB.jstests_fgsec.getIndexes().length);
-// Wait for the secondary to get the index entry
-assert.soon(function() {
- return 2 == secondDB.jstests_fgsec.getIndexes().length;
-}, "index not created on secondary", 1000 * 60 * 10, 50);
+try {
+ assert.soon(function() {
+ if (indexBuildInProgress(secondDB)) {
+ return true;
+ } else {
+ return false;
+ }
+ }, "index not started on secondary", 30000, 50);
+} finally {
+ // Turn off failpoint and let the index build resumes.
+ assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
+}
jsTest.log("Index created on secondary");
masterDB.runCommand({dropIndexes: "jstests_fgsec", index: "i_1"});
+
jsTest.log("Waiting on replication");
replTest.awaitReplication();
-assert.soon(function() {
- return !checkOp(secondDB);
-}, "index not cancelled on secondary", 30000, 50);
+
masterDB.jstests_fgsec.getIndexes().forEach(printjson);
secondDB.jstests_fgsec.getIndexes().forEach(printjson);
assert.soon(function() {
return 1 == secondDB.jstests_fgsec.getIndexes().length;
}, "Index not dropped on secondary", 30000, 50);
-
-jsTest.log("index-restart-secondary.js complete");
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index b18e90ac880..72c0bc6a28a 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -62,6 +62,7 @@ using std::string;
using std::endl;
MONGO_FP_DECLARE(crashAfterStartingIndexBuild);
+MONGO_FP_DECLARE(hangAfterStartingIndexBuild);
/**
* On rollback sets MultiIndexBlock::_needToCleanup to true.
@@ -311,6 +312,13 @@ Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsO
WorkingSetCommon::toStatusString(objToIndex.value()),
state == PlanExecutor::IS_EOF);
+ // Need the index build to hang before the progress meter is marked as finished so we can
+ // reliably check that the index build has actually started in js tests.
+ while (MONGO_FAIL_POINT(hangAfterStartingIndexBuild)) {
+ log() << "Hanging index build due to 'hangAfterStartingIndexBuild' failpoint";
+ sleepmillis(1000);
+ }
+
progress->finished();
Status ret = doneInserting(dupsOut);