summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Do <do.vincent@live.com>2016-11-07 15:08:46 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2016-11-07 15:08:46 -0500
commit415100327abaf0e2265f86d30640bc3a1ae2a372 (patch)
tree0289d15a8d05cbaed8783b8b761efb628a923aa3
parente0e6faf67235d5eb9a758d7b0dd140bd939682c3 (diff)
downloadmongo-415100327abaf0e2265f86d30640bc3a1ae2a372.tar.gz
SERVER-23510 Fix index_retry and index_no_retry to not be timing based
(cherry picked from commit a97efaa18399fa43bb2a66be204c9f433e71f50b) Additional changes on top of a97efaa18399fa43bb2a66be204c9f433e71f50b: - Included the changes from ff285b342fd98416a458a070f05e62c274028c89 to fix invalid write concerns. - Fixed the tests to get the pid using serverStatus(). - Worked around MongoRunner.EXIT_TEST not being defined.
-rw-r--r--jstests/noPassthrough/index_no_retry.js66
-rw-r--r--jstests/noPassthrough/index_retry.js67
-rw-r--r--jstests/noPassthroughWithMongod/index_no_retry.js94
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js95
-rw-r--r--src/mongo/db/catalog/index_create.cpp17
5 files changed, 149 insertions, 190 deletions
diff --git a/jstests/noPassthrough/index_no_retry.js b/jstests/noPassthrough/index_no_retry.js
new file mode 100644
index 00000000000..e3c00182a17
--- /dev/null
+++ b/jstests/noPassthrough/index_no_retry.js
@@ -0,0 +1,66 @@
+// Check index rebuild is disabled with --noIndexBuildRetry when MongoDB is killed.
+//
+// This test requires persistence beacuase it assumes data/indices will survive a restart.
+// This test requires journaling because the information that an index build was started
+// must be made durable when the process aborts.
+// @tags: [requires_persistence, requires_journaling]
+(function() {
+ 'use strict';
+ var baseName = 'index_no_retry';
+ var dbpath = MongoRunner.dataPath + baseName;
+
+ var conn = MongoRunner.runMongod({dbpath: dbpath});
+ assert.neq(null, conn, 'failed to start mongod');
+
+ var test = conn.getDB("test");
+ var pid = test.serverStatus().pid;
+
+ var name = 'jstests_slownightly_' + baseName;
+ var t = test.getCollection(name);
+ t.drop();
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({a: i});
+ }
+
+ // Make sure the documents are journaled
+ assert.writeOK(bulk.execute({j: true}));
+
+ assert.eq(100, t.count(), 'unexpected number of documents after bulk insert.');
+
+ function abortDuringIndexBuild(options) {
+ var createIdx = startParallelShell(function() {
+ var coll = db.getSiblingDB('test').getCollection('jstests_slownightly_index_no_retry');
+
+ // Fail point will handle journal flushing and killing the mongod
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'crashAfterStartingIndexBuild', mode: 'alwaysOn'}));
+ coll.createIndex({a: 1}, {background: true});
+ }, conn.port);
+
+ var exitCode = createIdx({checkExitSuccess: false});
+ assert.neq(0, exitCode, "expected shell to exit abnormally due to mongod being terminated");
+ }
+
+ abortDuringIndexBuild();
+
+ var EXIT_TEST = 101;
+ assert.eq(waitProgram(pid),
+ EXIT_TEST,
+ "mongod should have crashed due to the 'crashAfterStartingIndexBuild' " +
+ "failpoint being set.");
+
+ conn = MongoRunner.runMongod({dbpath: dbpath, noIndexBuildRetry: '', restart: true});
+ test = conn.getDB("test");
+ t = test.getCollection(name);
+
+ assert.throws(function() {
+ t.find({}, {_id: 0, a: 1}).hint({a: 1}).next();
+ }, null, 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
+
+ var indexes = t.getIndexes();
+ assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
+
+ MongoRunner.stopMongod(conn.port);
+}());
diff --git a/jstests/noPassthrough/index_retry.js b/jstests/noPassthrough/index_retry.js
new file mode 100644
index 00000000000..a221f199471
--- /dev/null
+++ b/jstests/noPassthrough/index_retry.js
@@ -0,0 +1,67 @@
+// Check index rebuild when MongoDB is killed.
+//
+// This test requires persistence because it assumes data/indices will survive a restart.
+// This test requires journaling because the information that an index build was started
+// must be made durable when the process aborts.
+// @tags: [requires_persistence, requires_journaling]
+(function() {
+ 'use strict';
+ var baseName = 'index_retry';
+ var dbpath = MongoRunner.dataPath + baseName;
+
+ var conn = MongoRunner.runMongod({dbpath: dbpath});
+ assert.neq(null, conn, 'failed to start mongod');
+
+ var test = conn.getDB("test");
+ var pid = test.serverStatus().pid;
+
+ var name = 'jstests_slownightly_' + baseName;
+ var t = test.getCollection(name);
+ t.drop();
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({a: i});
+ }
+
+ // Make sure the documents are journaled
+ assert.writeOK(bulk.execute({j: true}));
+
+ assert.eq(100, t.count(), 'unexpected number of documents after bulk insert.');
+
+ function abortDuringIndexBuild() {
+ var createIdx = startParallelShell(function() {
+ var coll = db.getSiblingDB('test').getCollection('jstests_slownightly_index_retry');
+
+ // Fail point will handle journal flushing and killing the mongod
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'crashAfterStartingIndexBuild', mode: 'alwaysOn'}));
+ coll.createIndex({a: 1}, {background: true});
+ }, conn.port);
+
+ var exitCode = createIdx({checkExitSuccess: false});
+ assert.neq(0, exitCode, "expected shell to exit abnormally due to mongod being terminated");
+ }
+
+ abortDuringIndexBuild();
+
+ var EXIT_TEST = 101;
+ assert.eq(waitProgram(pid),
+ EXIT_TEST,
+ "mongod should have crashed due to the 'crashAfterStartingIndexBuild' " +
+ "failpoint being set.");
+
+ conn = MongoRunner.runMongod({dbpath: dbpath, restart: true});
+ test = conn.getDB("test");
+ t = test.getCollection(name);
+
+ assert.eq(100,
+ t.find({}, {_id: 0, a: 1}).hint({a: 1}).itcount(),
+ 'index {a: 1} was expected to be rebuilt on startup');
+ var indexes = t.getIndexes();
+ assert.eq(2,
+ indexes.length,
+ 'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
+
+ MongoRunner.stopMongod(conn.port);
+}());
diff --git a/jstests/noPassthroughWithMongod/index_no_retry.js b/jstests/noPassthroughWithMongod/index_no_retry.js
deleted file mode 100644
index ff09b70d039..00000000000
--- a/jstests/noPassthroughWithMongod/index_no_retry.js
+++ /dev/null
@@ -1,94 +0,0 @@
-// Check index rebuild is disabled with --noIndexBuildRetry when MongoDB is killed.
-//
-// This test requires persistence beacuase it assumes data/indices will survive a restart.
-// @tags: [requires_persistence]
-(function() {
- 'use strict';
- var baseName = 'index_retry';
- var dbpath = MongoRunner.dataPath + baseName;
-
- var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
-
- var test = conn.getDB("test");
-
- var name = 'jstests_slownightly_' + baseName;
- var t = test.getCollection(name);
- t.drop();
-
- // Insert a large number of documents, enough to ensure that an index build on these documents
- // can be interrupted before complete.
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 5e5; ++i) {
- bulk.insert({a: i});
- if (i % 10000 == 0) {
- print("i: " + i);
- }
- }
- assert.writeOK(bulk.execute());
-
- function debug(x) {
- printjson(x);
- }
-
- /**
- * @return if there's a current running index build
- */
- function indexBuildInProgress() {
- var inprog = test.currentOp().inprog;
- debug(inprog);
- var indexBuildOpId = -1;
- inprog.forEach(function(op) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
- op.progress && (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
- }
- });
- return indexBuildOpId != -1;
- }
-
- function abortDuringIndexBuild(options) {
- var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
-
- // Wait for the index build to start.
- var times = 0;
- assert.soon(function() {
- return indexBuildInProgress() && times++ >= 2;
- });
-
- print("killing the mongod");
- MongoRunner.stopMongod(conn.port, /* signal */ 9);
-
- var exitCode = createIdx({checkExitSuccess: false});
- assert.neq(0, exitCode, "expected shell to exit abnormally due to mongod being terminated");
- }
-
- abortDuringIndexBuild();
-
- conn =
- MongoRunner.runMongod({dbpath: dbpath, journal: '', noIndexBuildRetry: '', restart: true});
- test = conn.getDB("test");
- t = test.getCollection(name);
-
- assert.throws(function() {
- t.find({a: 42}).hint({a: 1}).next();
- }, null, 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
-
- var indexes = t.getIndexes();
- assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
-
- print("Index rebuilding disabled successfully");
-
- MongoRunner.stopMongod(conn.port);
- print("SUCCESS!");
-}());
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
deleted file mode 100644
index cb33de4b95b..00000000000
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// Check index rebuild when MongoDB is killed.
-//
-// This test requires persistence beacuase it assumes data/indices will survive a restart.
-// @tags: [requires_persistence]
-(function() {
- 'use strict';
- var baseName = 'index_retry';
- var dbpath = MongoRunner.dataPath + baseName;
-
- var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
-
- var test = conn.getDB("test");
-
- var name = 'jstests_slownightly_' + baseName;
- var t = test.getCollection(name);
- t.drop();
-
- // Insert a large number of documents, enough to ensure that an index build on these documents
- // can be interrupted before complete.
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 5e5; ++i) {
- bulk.insert({a: i});
- if (i % 10000 == 0) {
- print("i: " + i);
- }
- }
- assert.writeOK(bulk.execute());
-
- function debug(x) {
- printjson(x);
- }
-
- /**
- * @return if there's a current running index build
- */
- function indexBuildInProgress() {
- var inprog = test.currentOp().inprog;
- debug(inprog);
- var indexBuildOpId = -1;
- inprog.forEach(function(op) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
- op.progress && (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
- }
- });
- return indexBuildOpId != -1;
- }
-
- function abortDuringIndexBuild(options) {
- var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
-
- // Wait for the index build to start.
- var times = 0;
- assert.soon(function() {
- return indexBuildInProgress() && times++ >= 2;
- });
-
- print("killing the mongod");
- MongoRunner.stopMongod(conn.port, /* signal */ 9);
-
- var exitCode = createIdx({checkExitSuccess: false});
- assert.neq(0, exitCode, "expected shell to exit abnormally due to mongod being terminated");
- }
-
- abortDuringIndexBuild();
-
- conn = MongoRunner.runMongod({dbpath: dbpath, journal: '', restart: true});
- test = conn.getDB("test");
- t = test.getCollection(name);
-
- assert.eq({a: 42},
- t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
- 'index {a: 1} was rebuilt on startup');
-
- var indexes = t.getIndexes();
- assert.eq(2,
- indexes.length,
- 'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
-
- print("Index built");
-
- MongoRunner.stopMongod(conn.port);
- print("SUCCESS!");
-}());
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 0814277262f..b18e90ac880 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -34,7 +34,6 @@
#include "mongo/db/catalog/index_create.h"
-
#include "mongo/base/error_codes.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/audit.h"
@@ -49,9 +48,12 @@
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/operation_context.h"
#include "mongo/stdx/mutex.h"
+#include "mongo/util/fail_point.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/progress_meter.h"
+#include "mongo/util/quick_exit.h"
namespace mongo {
@@ -59,6 +61,8 @@ using std::unique_ptr;
using std::string;
using std::endl;
+MONGO_FP_DECLARE(crashAfterStartingIndexBuild);
+
/**
* On rollback sets MultiIndexBlock::_needToCleanup to true.
*/
@@ -213,6 +217,17 @@ Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
_backgroundOperation.reset(new BackgroundOperation(ns));
wunit.commit();
+
+ if (MONGO_FAIL_POINT(crashAfterStartingIndexBuild)) {
+ log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. Exiting "
+ "after waiting for changes to become durable.";
+ Locker::LockSnapshot lockInfo;
+ _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
+ if (_txn->recoveryUnit()->waitUntilDurable()) {
+ quickExit(EXIT_TEST);
+ }
+ }
+
return Status::OK();
}