diff options
17 files changed, 360 insertions, 78 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml index aa36c2234a3..c883203614a 100644 --- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml @@ -33,6 +33,8 @@ selector: # TODO(SERVER-45368) re-enable once collection creation is permitted in cross-shard transactions. - jstests/core/txns/create_collection.js - jstests/core/txns/create_collection_parallel.js + - jstests/core/txns/create_indexes.js + - jstests/core/txns/create_indexes_parallel.js exclude_with_any_tags: - assumes_against_mongod_not_mongos diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml index 79cf9b8dde7..9debf1e661b 100644 --- a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml @@ -32,6 +32,8 @@ selector: # TODO(SERVER-45368) re-enable once collection creation is permitted in cross-shard transactions. - jstests/core/txns/create_collection.js - jstests/core/txns/create_collection_parallel.js + - jstests/core/txns/create_indexes.js + - jstests/core/txns/create_indexes_parallel.js exclude_with_any_tags: - assumes_against_mongod_not_mongos diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml index 6e4ba3a3368..1b5722b7eba 100644 --- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml +++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml @@ -29,6 +29,8 @@ selector: # TODO(SERVER-45368) re-enable once collection creation is permitted in cross-shard transactions. - jstests/core/txns/create_collection.js - jstests/core/txns/create_collection_parallel.js + - jstests/core/txns/create_indexes.js + - jstests/core/txns/create_indexes_parallel.js exclude_with_any_tags: diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml index 5cf115e842f..d617ebc9504 100644 --- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml +++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml @@ -28,6 +28,8 @@ selector: # TODO(SERVER-45368) re-enable once collection creation is permitted in cross-shard transactions. - jstests/core/txns/create_collection.js - jstests/core/txns/create_collection_parallel.js + - jstests/core/txns/create_indexes.js + - jstests/core/txns/create_indexes_parallel.js exclude_with_any_tags: - assumes_against_mongod_not_mongos diff --git a/jstests/core/txns/create_indexes.js b/jstests/core/txns/create_indexes.js new file mode 100644 index 00000000000..c13d9b94f6f --- /dev/null +++ b/jstests/core/txns/create_indexes.js @@ -0,0 +1,91 @@ +/* Tests simple cases of creating indexes inside a multi-document transaction, both + * committing and aborting. + * + * @tags: [uses_transactions, + * # Creating collections inside multi-document transactions is supported only in v4.4 + * # onwards. + * requires_fcv_44] + */ +(function() { +"use strict"; + +load("jstests/libs/create_index_txn_helpers.js"); + +let doCreateIndexesTest = function(explicitCollectionCreate) { + const session = db.getMongo().startSession({causalConsistency: false}); + const collName = "create_new_indexes"; + const secondCollName = collName + "_second"; + + let sessionDB = session.getDatabase("test"); + let sessionColl = sessionDB[collName]; + let secondSessionColl = sessionDB[secondCollName]; + sessionColl.drop({writeConcern: {w: "majority"}}); + secondSessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing createIndexes in a transaction"); + session.startTransaction({writeConcern: {w: "majority"}}); + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + session.commitTransaction(); + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + + sessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing multiple createIndexess in a transaction"); + session.startTransaction({writeConcern: {w: "majority"}}); + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + createIndexAndCRUDInTxn(sessionDB, secondCollName, explicitCollectionCreate); + session.commitTransaction(); + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(secondSessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + assert.eq(secondSessionColl.getIndexes().length, 2); + + sessionColl.drop({writeConcern: {w: "majority"}}); + secondSessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing createIndexes in a transaction that aborts"); + session.startTransaction({writeConcern: {w: "majority"}}); + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + assert.commandWorked(session.abortTransaction_forTesting()); + + assert.eq(sessionColl.find({}).itcount(), 0); + assert.eq(sessionColl.getIndexes().length, 0); + + sessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log( + "Testing createIndexes with conflicting index specs in a transaction that aborts (SHOULD FAIL)"); + session.startTransaction({writeConcern: {w: "majority"}}); + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + assert.commandFailedWithCode( + sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}), + ErrorCodes.IndexKeySpecsConflict); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + assert.eq(sessionColl.find({}).itcount(), 0); + assert.eq(sessionColl.getIndexes().length, 0); + + sessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log( + "Testing createIndexes on a non-empty collection created in the same transaction (SHOULD FAIL)"); + session.startTransaction({writeConcern: {w: "majority"}}); + assert.commandWorked(sessionDB.runCommand({create: collName})); + assert.commandWorked(sessionColl.insert({a: 1})); + + assert.commandFailedWithCode( + sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]}), + ErrorCodes.OperationNotSupportedInTransaction); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + + assert.eq(sessionColl.find({}).itcount(), 0); + assert.eq(sessionColl.getIndexes().length, 0); + + session.endSession(); +}; + +doCreateIndexesTest(false /*explicitCollectionCreate*/); +doCreateIndexesTest(true /*explicitCollectionCreate*/); +}()); diff --git a/jstests/core/txns/create_indexes_parallel.js b/jstests/core/txns/create_indexes_parallel.js new file mode 100644 index 00000000000..45c3e9c5ee0 --- /dev/null +++ b/jstests/core/txns/create_indexes_parallel.js @@ -0,0 +1,110 @@ +/* Tests parallel transactions with createIndexes. + * + * @tags: [uses_transactions, + * # Creating collections inside multi-document transactions is supported only in v4.4 + * # onwards. + * requires_fcv_44] + */ +(function() { +"use strict"; + +load("jstests/libs/create_index_txn_helpers.js"); + +let doParallelCreateIndexesTest = function(explicitCollectionCreate) { + const dbName = "test"; + const collName = "create_new_collection"; + const distinctCollName = collName + "_second"; + const session = db.getMongo().getDB(dbName).getMongo().startSession({causalConsistency: false}); + const secondSession = + db.getMongo().getDB(dbName).getMongo().startSession({causalConsistency: false}); + + let sessionDB = session.getDatabase("test"); + let secondSessionDB = secondSession.getDatabase("test"); + let sessionColl = sessionDB[collName]; + let secondSessionColl = secondSessionDB[collName]; + sessionColl.drop({writeConcern: {w: "majority"}}); + secondSessionColl.drop({writeConcern: {w: "majority"}}); + let distinctSessionColl = sessionDB[distinctCollName]; + distinctSessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing duplicate sequential createIndexes, both succeed"); + + session.startTransaction({writeConcern: {w: "majority"}}); // txn 1 + secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2 + + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + jsTest.log("Committing transaction 1"); + session.commitTransaction(); + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + + // Ensuring existing index succeeds. + assert.commandWorked( + secondSessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]})); + secondSession.commitTransaction(); + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + + sessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing conflicting sequential createIndexes, second fails"); + session.startTransaction({writeConcern: {w: "majority"}}); // txn 1 + secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2 + + createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate); + jsTest.log("Committing transaction 2"); + secondSession.commitTransaction(); + assert.eq(secondSessionColl.find({}).itcount(), 1); + assert.eq(secondSessionColl.getIndexes().length, 2); + + assert.commandFailedWithCode( + sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}), + ErrorCodes.IndexKeySpecsConflict); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + + sessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log( + "Testing duplicate createIndexes in parallel, both attempt to commit, second to commit fails"); + + secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2 + createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate); + + session.startTransaction({writeConcern: {w: "majority"}}); // txn 1 + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + + jsTest.log("Committing transaction 2"); + secondSession.commitTransaction(); + jsTest.log("Committing transaction 1 (SHOULD FAIL)"); + // WriteConflict occurs here because in all test cases (i.e., explicitCollectionCreate is true + // versus false), we must create a collection as part of each transaction. The conflicting + // collection creation causes the WriteConflict. + assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict); + assert.eq(sessionColl.find({}).itcount(), 1); + assert.eq(sessionColl.getIndexes().length, 2); + + sessionColl.drop({writeConcern: {w: "majority"}}); + secondSessionColl.drop({writeConcern: {w: "majority"}}); + distinctSessionColl.drop({writeConcern: {w: "majority"}}); + + jsTest.log("Testing distinct createIndexes in parallel, both successfully commit."); + session.startTransaction({writeConcern: {w: "majority"}}); // txn 1 + createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate); + + secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2 + createIndexAndCRUDInTxn(secondSessionDB, distinctCollName, explicitCollectionCreate); + + session.commitTransaction(); + secondSession.commitTransaction(); + + secondSession.endSession(); + session.endSession(); +}; + +doParallelCreateIndexesTest(false /*explicitCollectionCreate*/); +doParallelCreateIndexesTest(true /*explicitCollectionCreate*/); +}()); diff --git a/jstests/libs/create_index_txn_helpers.js b/jstests/libs/create_index_txn_helpers.js new file mode 100644 index 00000000000..c9b239fa80b --- /dev/null +++ b/jstests/libs/create_index_txn_helpers.js @@ -0,0 +1,23 @@ +/** + * Helper function shared by createIndexes inside txns tests. + */ + +const indexSpecs = { + key: {a: 1}, + name: "a_1" +}; +const conflictingIndexSpecs = { + key: {a: -1}, + name: "a_1" +}; + +const createIndexAndCRUDInTxn = function(sessionDB, collName, explicitCollCreate) { + if (explicitCollCreate) { + assert.commandWorked(sessionDB.runCommand({create: collName})); + } + let sessionColl = sessionDB[collName]; + assert.commandWorked(sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]})); + assert.commandWorked(sessionColl.insert({a: 1})); + assert.eq(sessionColl.find({a: 1}).itcount(), 1); + assert.eq(sessionColl.find({}).itcount(), 1); +}; diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp index 1efc401f13c..64fb8c10cdd 100644 --- a/src/mongo/db/catalog/index_build_block.cpp +++ b/src/mongo/db/catalog/index_build_block.cpp @@ -160,8 +160,8 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) { // Being in a WUOW means all timestamping responsibility can be pushed up to the caller. invariant(opCtx->lockState()->inAWriteUnitOfWork()); - invariant( - UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns())); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, + collection->ns()); if (_indexBuildInterceptor) { // Skipped records are only checked when we complete an index build as primary. diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 0eaff8e3bf6..8ca12e22356 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -419,8 +419,8 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx, StatusWith<BSONObj> IndexCatalogImpl::createIndexOnEmptyCollection(OperationContext* opCtx, BSONObj spec) { - invariant(UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, - _collection->ns())); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, + _collection->ns()); invariant(_collection->numRecords(opCtx) == 0, str::stream() << "Collection must be empty. Collection: " << _collection->ns() << " UUID: " << _collection->uuid() diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index b506c2dc9cc..06cf823e09c 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -117,8 +117,7 @@ void MultiIndexBlock::cleanUpAfterBuild(OperationContext* opCtx, } auto nss = collection->ns(); - invariant(UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, nss), - nss.toString()); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, nss); while (true) { try { diff --git a/src/mongo/db/catalog/uncommitted_collections.cpp b/src/mongo/db/catalog/uncommitted_collections.cpp index 9563da3afd5..4d56b2a777b 100644 --- a/src/mongo/db/catalog/uncommitted_collections.cpp +++ b/src/mongo/db/catalog/uncommitted_collections.cpp @@ -127,12 +127,8 @@ void UncommittedCollections::commit(OperationContext* opCtx, map->_nssIndex.erase(nss); } -bool UncommittedCollections::hasExclusiveAccessToCollection(OperationContext* opCtx, - const NamespaceString& nss) const { - if (opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)) { - return true; - } - +bool UncommittedCollections::isUncommittedCollection(OperationContext* opCtx, + const NamespaceString& nss) const { if (_resourcesPtr->_nssIndex.count(nss) == 1) { // If the collection is found in the local catalog, the appropriate locks must have already // been taken. @@ -143,4 +139,11 @@ bool UncommittedCollections::hasExclusiveAccessToCollection(OperationContext* op return false; } +void UncommittedCollections::invariantHasExclusiveAccessToCollection( + OperationContext* opCtx, const NamespaceString& nss) const { + invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X) || + isUncommittedCollection(opCtx, nss), + nss.toString()); +} + } // namespace mongo diff --git a/src/mongo/db/catalog/uncommitted_collections.h b/src/mongo/db/catalog/uncommitted_collections.h index 65b813fb668..8cc6e2ea813 100644 --- a/src/mongo/db/catalog/uncommitted_collections.h +++ b/src/mongo/db/catalog/uncommitted_collections.h @@ -92,7 +92,9 @@ public: Timestamp createTs, UncommittedCollectionsMap* map); - bool hasExclusiveAccessToCollection(OperationContext* opCtx, const NamespaceString& nss) const; + bool isUncommittedCollection(OperationContext* opCtx, const NamespaceString& nss) const; + void invariantHasExclusiveAccessToCollection(OperationContext* opCtx, + const NamespaceString& nss) const; static void clear(UncommittedCollectionsMap* map) { map->_collections.clear(); diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index f9788719875..b534acba487 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -375,8 +375,9 @@ void checkCollectionShardingState(OperationContext* opCtx, const NamespaceString } /** - * Attempts to create indexes in `specs` on a non-existent collection with namespace `ns`, thereby - * implicitly creating the collection. + * Attempts to create indexes in `specs` on a non-existent collection (or empty collection created + * in the same multi-document transaction) with namespace `ns`. In the former case, the collection + * is implicitly created. * Returns a BSONObj containing fields to be appended to the result of the calling function. * `commitQuorum` is passed only to be appended to the result, for completeness. It is otherwise * unused. @@ -385,7 +386,8 @@ void checkCollectionShardingState(OperationContext* opCtx, const NamespaceString BSONObj runCreateIndexesOnNewCollection(OperationContext* opCtx, const NamespaceString& ns, const std::vector<BSONObj>& specs, - boost::optional<CommitQuorumOptions> commitQuorum) { + boost::optional<CommitQuorumOptions> commitQuorum, + bool createCollImplicitly) { BSONObjBuilder createResult; WriteUnitOfWork wunit(opCtx); @@ -396,39 +398,44 @@ BSONObj runCreateIndexesOnNewCollection(OperationContext* opCtx, "Cannot create indexes on a view", !db || !ViewCatalog::get(db)->lookup(opCtx, ns.ns())); - // We need to create the collection. - BSONObjBuilder builder; - builder.append("create", ns.coll()); - CollectionOptions options; - builder.appendElements(options.toBSON()); - BSONObj idIndexSpec; - - if (MONGO_unlikely(hangBeforeCreateIndexesCollectionCreate.shouldFail())) { - // Simulate a scenario where a conflicting collection creation occurs - // mid-index build. - log() << "Hanging create collection due to failpoint " - "'hangBeforeCreateIndexesCollectionCreate'"; - hangBeforeCreateIndexesCollectionCreate.pauseWhileSet(); - } + if (createCollImplicitly) { + // We need to create the collection. + BSONObjBuilder builder; + builder.append("create", ns.coll()); + CollectionOptions options; + builder.appendElements(options.toBSON()); + BSONObj idIndexSpec; + + if (MONGO_unlikely(hangBeforeCreateIndexesCollectionCreate.shouldFail())) { + // Simulate a scenario where a conflicting collection creation occurs + // mid-index build. + log() << "Hanging create collection due to failpoint " + "'hangBeforeCreateIndexesCollectionCreate'"; + hangBeforeCreateIndexesCollectionCreate.pauseWhileSet(); + } - auto createStatus = - createCollection(opCtx, ns.db().toString(), builder.obj().getOwned(), idIndexSpec); - if (createStatus == ErrorCodes::NamespaceExists) { - // We should retry the createIndexes command so we can perform the checks for index - // and/or collection existence again. - throw WriteConflictException(); - } + auto createStatus = + createCollection(opCtx, ns.db().toString(), builder.obj().getOwned(), idIndexSpec); - uassertStatusOK(createStatus); + if (createStatus == ErrorCodes::NamespaceExists) { + throw WriteConflictException(); + } + + uassertStatusOK(createStatus); + } - // Obtain the newly-created collection object. + // By this point, we have exclusive access to our collection, either because we created the + // collection implicitly as part of createIndexes or because the collection was created earlier + // in the same multi-document transaction. auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns); - invariant( - UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns())); - /** - * TODO(SERVER-44849) Ensure the collection, which may or may not have been created earlier - * in the same multi-document transaction, is empty. - */ + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, + collection->ns()); + invariant(opCtx->inMultiDocumentTransaction() || createCollImplicitly); + + uassert(ErrorCodes::OperationNotSupportedInTransaction, + str::stream() << "Cannot create new indexes on non-empty collection " << ns + << " in a multi-document transaction.", + collection->numRecords(opCtx) == 0); const int numIndexesBefore = IndexBuildsCoordinator::getNumIndexesTotal(opCtx, collection); auto filteredSpecs = @@ -497,33 +504,39 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx, return true; } - // TODO SERVER-44849 Remove once createIndexes on new indexes is permitted - // inside transactions. - uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot create new indexes on " << ns - << " in a multi-document transaction.", - !opCtx->inMultiDocumentTransaction()); - auto collection = CollectionCatalog::get(opCtx).lookupCollectionByNamespace(opCtx, ns); - if (!collection) { - auto createIndexesResult = - runCreateIndexesOnNewCollection(opCtx, ns, specs, commitQuorum); - // No further sources of WriteConflicts can occur at this point, so it is safe to - // append elements to `result` inside the writeConflictRetry loop. - result.appendBool(kCreateCollectionAutomaticallyFieldName, true); - result.appendElements(createIndexesResult); - return true; + if (collection && + !UncommittedCollections::get(opCtx).isUncommittedCollection(opCtx, ns)) { + // The collection exists and was not created in the same multi-document transaction + // as the createIndexes. + collectionUUID = collection->uuid(); + result.appendBool(kCreateCollectionAutomaticallyFieldName, false); + return false; } - collectionUUID = collection->uuid(); - result.appendBool(kCreateCollectionAutomaticallyFieldName, false); - return false; + bool createCollImplicitly = collection ? false : true; + + auto createIndexesResult = runCreateIndexesOnNewCollection( + opCtx, ns, specs, commitQuorum, createCollImplicitly); + // No further sources of WriteConflicts can occur at this point, so it is safe to + // append elements to `result` inside the writeConflictRetry loop. + result.appendBool(kCreateCollectionAutomaticallyFieldName, true); + result.appendElements(createIndexesResult); + return true; }); if (indexExists) { // No need to proceed if the index either already existed or has just been built. return true; } + + // If the index does not exist by this point, the index build must go through the index + // builds coordinator and take an exclusive lock. We should not take exclusive locks inside + // of transactions, so we fail early here if we are inside of a transaction. + uassert(ErrorCodes::OperationNotSupportedInTransaction, + str::stream() << "Cannot create new indexes on existing collection " << ns + << " in a multi-document transaction.", + !opCtx->inMultiDocumentTransaction()); } // Use AutoStatsTracker to update Top. diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index c7bff176eee..974e6d004dd 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -84,7 +84,7 @@ constexpr StringData kUniqueFieldName = "unique"_sd; void checkShardKeyRestrictions(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& newIdxKey) { - invariant(UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, nss)); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, nss); const auto metadata = CollectionShardingState::get(opCtx, nss)->getCurrentMetadata(); if (!metadata->isSharded()) @@ -974,8 +974,8 @@ void IndexBuildsCoordinator::createIndexesOnEmptyCollection(OperationContext* op invariant(!specs.empty(), str::stream() << collectionUUID); auto nss = collection->ns(); - invariant( - UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns())); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, + collection->ns()); auto opObserver = opCtx->getServiceContext()->getOpObserver(); @@ -2103,8 +2103,8 @@ std::vector<BSONObj> IndexBuildsCoordinator::prepareSpecListForCreate( Collection* collection, const NamespaceString& nss, const std::vector<BSONObj>& indexSpecs) { - invariant( - UncommittedCollections::get(opCtx).hasExclusiveAccessToCollection(opCtx, collection->ns())); + UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, + collection->ns()); invariant(collection); // During secondary oplog application, the index specs have already been normalized in the diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp index b514d532f5e..060c8f7a799 100644 --- a/src/mongo/db/op_observer_impl.cpp +++ b/src/mongo/db/op_observer_impl.cpp @@ -247,17 +247,28 @@ void OpObserverImpl::onCreateIndex(OperationContext* opCtx, CollectionUUID uuid, BSONObj indexDoc, bool fromMigrate) { - BSONObjBuilder builder; - builder.append("createIndexes", nss.coll()); - builder.appendElements(indexDoc); + auto txnParticipant = TransactionParticipant::get(opCtx); + const bool inMultiDocumentTransaction = + txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); - MutableOplogEntry oplogEntry; - oplogEntry.setOpType(repl::OpTypeEnum::kCommand); - oplogEntry.setNss(nss.getCommandNS()); - oplogEntry.setUuid(uuid); - oplogEntry.setObject(builder.done()); - oplogEntry.setFromMigrateIfTrue(fromMigrate); - logOperation(opCtx, &oplogEntry); + if (inMultiDocumentTransaction) { + invariant(serverGlobalParams.featureCompatibility.getVersion() == + ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44); + auto operation = MutableOplogEntry::makeCreateIndexesCommand(nss, uuid, indexDoc); + txnParticipant.addTransactionOperation(opCtx, operation); + } else { + BSONObjBuilder builder; + builder.append("createIndexes", nss.coll()); + builder.appendElements(indexDoc); + + MutableOplogEntry oplogEntry; + oplogEntry.setOpType(repl::OpTypeEnum::kCommand); + oplogEntry.setNss(nss.getCommandNS()); + oplogEntry.setUuid(uuid); + oplogEntry.setObject(builder.done()); + oplogEntry.setFromMigrateIfTrue(fromMigrate); + logOperation(opCtx, &oplogEntry); + } } void OpObserverImpl::onStartIndexBuild(OperationContext* opCtx, diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp index e38732f5df0..2fb86786ed3 100644 --- a/src/mongo/db/repl/oplog_entry.cpp +++ b/src/mongo/db/repl/oplog_entry.cpp @@ -215,6 +215,23 @@ ReplOperation MutableOplogEntry::makeCreateCommand(const NamespaceString nss, return op; } +ReplOperation MutableOplogEntry::makeCreateIndexesCommand(const NamespaceString nss, + CollectionUUID uuid, + const BSONObj& indexDoc) { + ReplOperation op; + op.setOpType(OpTypeEnum::kCommand); + op.setNss(nss.getCommandNS()); + op.setUuid(uuid); + + BSONObjBuilder builder; + builder.append("createIndexes", nss.coll()); + builder.appendElements(indexDoc); + + op.setObject(builder.obj()); + + return op; +} + ReplOperation MutableOplogEntry::makeDeleteOperation(const NamespaceString& nss, boost::optional<UUID> uuid, const BSONObj& docToDelete) { diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h index 4181a30c1ad..970627810ae 100644 --- a/src/mongo/db/repl/oplog_entry.h +++ b/src/mongo/db/repl/oplog_entry.h @@ -95,6 +95,10 @@ public: const mongo::CollectionOptions& options, const BSONObj& idIndex); + static ReplOperation makeCreateIndexesCommand(const NamespaceString nss, + CollectionUUID uuid, + const BSONObj& indexDoc); + static BSONObj makeCreateCollCmdObj(const NamespaceString& collectionName, const mongo::CollectionOptions& options, const BSONObj& idIndex); @@ -223,6 +227,7 @@ public: // Make helper functions accessible. using MutableOplogEntry::getOpTime; using MutableOplogEntry::makeCreateCommand; + using MutableOplogEntry::makeCreateIndexesCommand; using MutableOplogEntry::makeDeleteOperation; using MutableOplogEntry::makeInsertOperation; using MutableOplogEntry::makeUpdateOperation; |