summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Do <do.vincent@live.com>2016-11-07 15:20:31 -0500
committerMax Hirschhorn <max.hirschhorn@mongodb.com>2016-11-07 15:20:31 -0500
commit8abafecf2812be6df7df798a71572a34b1a50054 (patch)
treec4ac691da9ce54a83f55584122eb5f9fb403a573
parent08352afcca24bfc145240a0fac9d28b978ab77f3 (diff)
downloadmongo-8abafecf2812be6df7df798a71572a34b1a50054.tar.gz
SERVER-23510 Fix index_retry and index_no_retry to not be timing based
(cherry picked from commit a97efaa18399fa43bb2a66be204c9f433e71f50b) Additional changes on top of a97efaa18399fa43bb2a66be204c9f433e71f50b: - Included the changes from ff285b342fd98416a458a070f05e62c274028c89 to fix invalid write concerns. - Fixed the tests to get the pid using serverStatus(). - Worked around MongoRunner.EXIT_TEST not being defined. - Removed checks that the parallel shell exited with a non-zero return code. - Limited the scope of a WriteUnitOfWork to avoid an invariant failure.
-rw-r--r--jstests/noPassthrough/index_no_retry.js70
-rw-r--r--jstests/noPassthrough/index_retry.js66
-rw-r--r--jstests/noPassthroughWithMongod/index_no_retry.js104
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js103
-rw-r--r--src/mongo/db/catalog/index_create.cpp135
5 files changed, 213 insertions, 265 deletions
diff --git a/jstests/noPassthrough/index_no_retry.js b/jstests/noPassthrough/index_no_retry.js
new file mode 100644
index 00000000000..2b20bb8573b
--- /dev/null
+++ b/jstests/noPassthrough/index_no_retry.js
@@ -0,0 +1,70 @@
+// Check index rebuild is disabled with --noIndexBuildRetry when MongoDB is killed.
+//
+// This test requires persistence beacuase it assumes data/indices will survive a restart.
+// This test requires journaling because the information that an index build was started
+// must be made durable when the process aborts.
+// @tags: [requires_persistence, requires_journaling]
+(function() {
+ 'use strict';
+ var baseName = 'index_no_retry';
+ var dbpath = MongoRunner.dataPath + baseName;
+
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
+ assert.neq(null, conn, 'failed to start mongod');
+
+ var test = conn.getDB("test");
+ var pid = test.serverStatus().pid;
+
+ var name = 'jstests_slownightly_' + baseName;
+ var t = test.getCollection(name);
+ t.drop();
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({a: i});
+ }
+
+ // Make sure the documents are journaled
+ assert.writeOK(bulk.execute({j: true}));
+
+ assert.eq(100, t.count(), 'unexpected number of documents after bulk insert.');
+
+ function abortDuringIndexBuild(options) {
+ var createIdx = startParallelShell(function() {
+ var coll = db.getSiblingDB('test').getCollection('jstests_slownightly_index_no_retry');
+
+ // Fail point will handle journal flushing and killing the mongod
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'crashAfterStartingIndexBuild', mode: 'alwaysOn'}));
+ coll.createIndex({a: 1}, {background: true});
+ }, conn.port);
+
+ createIdx();
+ }
+
+ abortDuringIndexBuild();
+
+ var EXIT_TEST = 101;
+ assert.eq(waitProgram(pid),
+ EXIT_TEST,
+ "mongod should have crashed due to the 'crashAfterStartingIndexBuild' " +
+ "failpoint being set.");
+
+ conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ journal: '',
+ noIndexBuildRetry: '',
+ restart: true
+ });
+ test = conn.getDB("test");
+ t = test.getCollection(name);
+
+ assert.throws(function() {
+ t.find({}, {_id: 0, a: 1}).hint({a: 1}).next();
+ }, null, 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
+
+ var indexes = t.getIndexes();
+ assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
+
+ MongoRunner.stopMongod(conn.port);
+}());
diff --git a/jstests/noPassthrough/index_retry.js b/jstests/noPassthrough/index_retry.js
new file mode 100644
index 00000000000..7f03628f604
--- /dev/null
+++ b/jstests/noPassthrough/index_retry.js
@@ -0,0 +1,66 @@
+// Check index rebuild when MongoDB is killed.
+//
+// This test requires persistence because it assumes data/indices will survive a restart.
+// This test requires journaling because the information that an index build was started
+// must be made durable when the process aborts.
+// @tags: [requires_persistence, requires_journaling]
+(function() {
+ 'use strict';
+ var baseName = 'index_retry';
+ var dbpath = MongoRunner.dataPath + baseName;
+
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
+ assert.neq(null, conn, 'failed to start mongod');
+
+ var test = conn.getDB("test");
+ var pid = test.serverStatus().pid;
+
+ var name = 'jstests_slownightly_' + baseName;
+ var t = test.getCollection(name);
+ t.drop();
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({a: i});
+ }
+
+ // Make sure the documents are journaled
+ assert.writeOK(bulk.execute({j: true}));
+
+ assert.eq(100, t.count(), 'unexpected number of documents after bulk insert.');
+
+ function abortDuringIndexBuild() {
+ var createIdx = startParallelShell(function() {
+ var coll = db.getSiblingDB('test').getCollection('jstests_slownightly_index_retry');
+
+ // Fail point will handle journal flushing and killing the mongod
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'crashAfterStartingIndexBuild', mode: 'alwaysOn'}));
+ coll.createIndex({a: 1}, {background: true});
+ }, conn.port);
+
+ createIdx();
+ }
+
+ abortDuringIndexBuild();
+
+ var EXIT_TEST = 101;
+ assert.eq(waitProgram(pid),
+ EXIT_TEST,
+ "mongod should have crashed due to the 'crashAfterStartingIndexBuild' " +
+ "failpoint being set.");
+
+ conn = MongoRunner.runMongod({dbpath: dbpath, journal: '', restart: true});
+ test = conn.getDB("test");
+ t = test.getCollection(name);
+
+ assert.eq(100,
+ t.find({}, {_id: 0, a: 1}).hint({a: 1}).itcount(),
+ 'index {a: 1} was expected to be rebuilt on startup');
+ var indexes = t.getIndexes();
+ assert.eq(2,
+ indexes.length,
+ 'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
+
+ MongoRunner.stopMongod(conn.port);
+}());
diff --git a/jstests/noPassthroughWithMongod/index_no_retry.js b/jstests/noPassthroughWithMongod/index_no_retry.js
deleted file mode 100644
index 5b7aac4350f..00000000000
--- a/jstests/noPassthroughWithMongod/index_no_retry.js
+++ /dev/null
@@ -1,104 +0,0 @@
-// Check index rebuild is disabled with --noIndexBuildRetry when MongoDB is killed
-(function() {
- 'use strict';
- var baseName = 'index_retry';
- var dbpath = MongoRunner.dataPath + baseName;
- var ports = allocatePorts(1);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- port: ports[0],
- journal: ''});
-
- var test = conn.getDB("test");
-
- var name = 'jstests_slownightly_' + baseName;
- var t = test.getCollection(name);
- t.drop();
-
- // Insert a large number of documents, enough to ensure that an index build on these documents
- // can be interrupted before complete.
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
- if (i % 10000 == 0) {
- print("i: " + i);
- }
- }
- assert.writeOK(bulk.execute());
-
- function debug(x) {
- printjson(x);
- }
-
- /**
- * @return if there's a current running index build
- */
- function indexBuildInProgress() {
- var inprog = test.currentOp().inprog;
- debug(inprog);
- var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( op.op == 'query' && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
- }
- }
- );
- return indexBuildOpId != -1;
- }
-
- function abortDuringIndexBuild(options) {
- var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- ports[0]);
-
- // Wait for the index build to start.
- var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
-
- print("killing the mongod");
- MongoRunner.stopMongod(ports[0], /* signal */ 9);
- createIdx();
- }
-
- abortDuringIndexBuild();
-
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- port: ports[0],
- journal: '',
- noIndexBuildRetry: '',
- restart: true});
- test = conn.getDB("test");
- t = test.getCollection(name);
-
- assert.throws(function() { t.find({a: 42}).hint({a: 1}).next(); },
- null,
- 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
-
- var indexes = t.getIndexes();
- assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
-
- print("Index rebuilding disabled successfully");
-
- MongoRunner.stopMongod(ports[0]);
- print("SUCCESS!");
-}());
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
deleted file mode 100644
index 1fbb645c418..00000000000
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ /dev/null
@@ -1,103 +0,0 @@
-// Check index rebuild when MongoDB is killed
-(function() {
- 'use strict';
- var baseName = 'index_retry';
- var dbpath = MongoRunner.dataPath + baseName;
- var ports = allocatePorts(1);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- port: ports[0],
- journal: ''});
-
- var test = conn.getDB("test");
-
- var name = 'jstests_slownightly_' + baseName;
- var t = test.getCollection(name);
- t.drop();
-
- // Insert a large number of documents, enough to ensure that an index build on these documents
- // can be interrupted before complete.
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
- if (i % 10000 == 0) {
- print("i: " + i);
- }
- }
- assert.writeOK(bulk.execute());
-
- function debug(x) {
- printjson(x);
- }
-
- /**
- * @return if there's a current running index build
- */
- function indexBuildInProgress() {
- var inprog = test.currentOp().inprog;
- debug(inprog);
- var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( op.op == 'query' && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
- }
- }
- );
- return indexBuildOpId != -1;
- }
-
- function abortDuringIndexBuild(options) {
- var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- ports[0]);
-
- // Wait for the index build to start.
- var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
-
- print("killing the mongod");
- MongoRunner.stopMongod(ports[0], /* signal */ 9);
- createIdx();
- }
-
- abortDuringIndexBuild();
-
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- port: ports[0],
- journal: '',
- restart: true});
- test = conn.getDB("test");
- t = test.getCollection(name);
-
- assert.eq({a: 42}, t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
- 'index {a: 1} was rebuilt on startup');
-
- var indexes = t.getIndexes();
- assert.eq(2, indexes.length,
- 'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
-
- print("Index built");
-
- MongoRunner.stopMongod(ports[0]);
- print("SUCCESS!");
-}());
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 3209b6cfbf5..8645db9a528 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -50,9 +50,12 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/operation_context.h"
+#include "mongo/util/fail_point.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/progress_meter.h"
+#include "mongo/util/quick_exit.h"
namespace mongo {
@@ -60,6 +63,8 @@ using boost::scoped_ptr;
using std::string;
using std::endl;
+MONGO_FP_DECLARE(crashAfterStartingIndexBuild);
+
/**
* On rollback sets MultiIndexBlock::_needToCleanup to true.
*/
@@ -140,82 +145,96 @@ void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
}
Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn);
+ {
+ WriteUnitOfWork wunit(_txn);
- invariant(_indexes.empty());
- _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
+ invariant(_indexes.empty());
+ _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
- const string& ns = _collection->ns().ns();
+ const string& ns = _collection->ns().ns();
- Status status = _collection->getIndexCatalog()->checkUnfinished();
- if (!status.isOK())
- return status;
+ Status status = _collection->getIndexCatalog()->checkUnfinished();
+ if (!status.isOK())
+ return status;
- for (size_t i = 0; i < indexSpecs.size(); i++) {
- BSONObj info = indexSpecs[i];
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
- string pluginName = IndexNames::findPluginName(info["key"].Obj());
- if (pluginName.size()) {
- Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
- _txn, pluginName);
- if (!s.isOK())
- return s;
- }
+ string pluginName = IndexNames::findPluginName(info["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
+ _txn, pluginName);
+ if (!s.isOK())
+ return s;
+ }
- // Any foreground indexes make all indexes be built in the foreground.
- _buildInBackground = (_buildInBackground && info["background"].trueValue());
- }
+ // Any foreground indexes make all indexes be built in the foreground.
+ _buildInBackground = (_buildInBackground && info["background"].trueValue());
+ }
- for (size_t i = 0; i < indexSpecs.size(); i++) {
- BSONObj info = indexSpecs[i];
- StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
- Status status = statusWithInfo.getStatus();
- if (!status.isOK())
- return status;
- info = statusWithInfo.getValue();
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
+ StatusWith<BSONObj> statusWithInfo =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
+ Status status = statusWithInfo.getStatus();
+ if (!status.isOK())
+ return status;
+ info = statusWithInfo.getValue();
+
+ IndexToBuild index;
+ index.block =
+ boost::make_shared<IndexCatalog::IndexBuildBlock>(_txn, _collection, info);
+ status = index.block->init();
+ if (!status.isOK())
+ return status;
+
+ index.real = index.block->getEntry()->accessMethod();
+ status = index.real->initializeAsEmpty(_txn);
+ if (!status.isOK())
+ return status;
+
+ if (!_buildInBackground) {
+ // Bulk build process requires foreground building as it assumes nothing is changing
+ // under it.
+ index.bulk.reset(index.real->initiateBulk(_txn));
+ }
- IndexToBuild index;
- index.block = boost::make_shared<IndexCatalog::IndexBuildBlock>(_txn, _collection, info);
- status = index.block->init();
- if (!status.isOK())
- return status;
+ const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_txn);
- if (!status.isOK())
- return status;
+ index.options.logIfError = false; // logging happens elsewhere if needed.
+ index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique ||
+ repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
- if (!_buildInBackground) {
- // Bulk build process requires foreground building as it assumes nothing is changing
- // under it.
- index.bulk.reset(index.real->initiateBulk(_txn));
- }
+ log() << "build index on: " << ns << " properties: " << descriptor->toString();
+ if (index.bulk)
+ log() << "\t building index using bulk method";
- const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
+ // TODO SERVER-14888 Suppress this in cases we don't want to audit.
+ audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
- index.options.logIfError = false; // logging happens elsewhere if needed.
- index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique ||
- repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
+ _indexes.push_back(index);
+ }
- log() << "build index on: " << ns << " properties: " << descriptor->toString();
- if (index.bulk)
- log() << "\t building index using bulk method";
+ // this is so that operations examining the list of indexes know there are more keys to look
+ // at when doing things like in place updates, etc...
+ _collection->infoCache()->addedIndex(_txn);
- // TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
+ if (_buildInBackground)
+ _backgroundOperation.reset(new BackgroundOperation(ns));
- _indexes.push_back(index);
+ wunit.commit();
}
- // this is so that operations examining the list of indexes know there are more keys to look
- // at when doing things like in place updates, etc...
- _collection->infoCache()->addedIndex(_txn);
-
- if (_buildInBackground)
- _backgroundOperation.reset(new BackgroundOperation(ns));
+ if (MONGO_FAIL_POINT(crashAfterStartingIndexBuild)) {
+ log() << "Index build interrupted due to 'crashAfterStartingIndexBuild' failpoint. Exiting "
+ "after waiting for changes to become durable.";
+ Locker::LockSnapshot lockInfo;
+ _txn->lockState()->saveLockStateAndUnlock(&lockInfo);
+ if (_txn->recoveryUnit()->awaitCommit()) {
+ quickExit(EXIT_TEST);
+ }
+ }
- wunit.commit();
return Status::OK();
}