diff options
37 files changed, 526 insertions, 168 deletions
diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js index 8c858f606a8..0bd98a82e7f 100644 --- a/jstests/replsets/initial_sync_applier_error.js +++ b/jstests/replsets/initial_sync_applier_error.js @@ -4,9 +4,8 @@ * 2) copies all non-local databases from the source and fetches operations from sync source; and * 3) applies operations from the source after op_start1. * - * This test creates, deletes and creates again an index on the source between phases 1 and 2. - * The index that we create again is not exactly the same as the first index we created, but - * it will have the same name. The secondary will initially fail to apply the operations in phase 3 + * This test renames a collection on the source between phases 1 and 2, but renameCollection is not + * supported in initial sync. The secondary will initially fail to apply the command in phase 3 * and subsequently have to retry the initial sync. */ @@ -54,23 +53,18 @@ }; checkLog(secondary, 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled'); - assert.commandWorked(coll.createIndex({content: "text"}, {default_language: "spanish"})); - assert.commandWorked(coll.dropIndex("content_text")); - assert.commandWorked(coll.createIndex({content: "text"}, {default_language: "english"})); - + var newCollName = name + '_2'; + assert.commandWorked(coll.renameCollection(newCollName, true)); assert.commandWorked(secondary.getDB('admin').runCommand( {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'})); - checkLog(secondary, 'content_text already exists with different options'); + checkLog(secondary, 'Applying renameCollection not supported'); checkLog(secondary, 'initial sync done'); replSet.awaitReplication(); replSet.awaitSecondaryNodes(); - var textIndexes = - secondary.getDB('test').getCollection(name).getIndexes().filter(function(index) { - return index.name == "content_text"; - }); - assert.eq(1, textIndexes.length); - assert.eq("english", textIndexes[0].default_language); + assert.eq(0, secondary.getDB('test').getCollection(name).count()); + assert.eq(1, secondary.getDB('test').getCollection(newCollName).count()); + assert.eq("hi", secondary.getDB('test').getCollection(newCollName).findOne({_id: 0}).content); })(); diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 60b8ce717a1..757077efb56 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -674,10 +674,7 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn, IndexAccessMethod* iam = ii.accessMethod(descriptor); InsertDeleteOptions options; - options.logIfError = false; - options.dupsAllowed = - !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) || - repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor); + IndexCatalog::prepareInsertDeleteOptions(txn, descriptor, &options); UpdateTicket* updateTicket = new UpdateTicket(); updateTickets.mutableMap()[descriptor] = updateTicket; Status ret = iam->validateUpdate(txn, @@ -1105,7 +1102,10 @@ public: // There's no need to compute the prefixes of the indexed fields that cause the // index to be multikey when validating the index keys. MultikeyPaths* multikeyPaths = nullptr; - iam->getKeys(recordBson, &documentKeySet, multikeyPaths); + iam->getKeys(recordBson, + IndexAccessMethod::GetKeysMode::kEnforceConstraints, + &documentKeySet, + multikeyPaths); if (!descriptor->isMultikey(_txn) && documentKeySet.size() > 1) { string msg = str::stream() << "Index " << descriptor->indexName() diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index f8d3a04f4c1..ffbfea515fd 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -1249,23 +1249,12 @@ const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn, // --------------------------- -namespace { -bool isDupsAllowed(IndexDescriptor* desc) { - bool isUnique = desc->unique() || KeyPattern::isIdKeyPattern(desc->keyPattern()); - if (!isUnique) - return true; - - return repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(desc); -} -} - Status IndexCatalog::_indexFilteredRecords(OperationContext* txn, IndexCatalogEntry* index, const std::vector<BsonRecord>& bsonRecords, int64_t* keysInsertedOut) { InsertDeleteOptions options; - options.logIfError = false; - options.dupsAllowed = isDupsAllowed(index->descriptor()); + prepareInsertDeleteOptions(txn, index->descriptor(), &options); for (auto bsonRecord : bsonRecords) { int64_t inserted; @@ -1306,8 +1295,8 @@ Status IndexCatalog::_unindexRecord(OperationContext* txn, bool logIfError, int64_t* keysDeletedOut) { InsertDeleteOptions options; + prepareInsertDeleteOptions(txn, index->descriptor(), &options); options.logIfError = logIfError; - options.dupsAllowed = isDupsAllowed(index->descriptor()); // For unindex operations, dupsAllowed=false really means that it is safe to delete anything // that matches the key, without checking the RecordID, since dups are impossible. We need @@ -1376,6 +1365,25 @@ BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) { return key; } +void IndexCatalog::prepareInsertDeleteOptions(OperationContext* txn, + const IndexDescriptor* desc, + InsertDeleteOptions* options) { + auto replCoord = repl::ReplicationCoordinator::get(txn); + if (replCoord->shouldRelaxIndexConstraints(NamespaceString(desc->parentNS()))) { + options->getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints; + } else { + options->getKeysMode = IndexAccessMethod::GetKeysMode::kEnforceConstraints; + } + + // Don't allow dups for Id key. Allow dups for non-unique keys or when constraints relaxed. + if (KeyPattern::isIdKeyPattern(desc->keyPattern())) { + options->dupsAllowed = false; + } else { + options->dupsAllowed = !desc->unique() || + options->getKeysMode == IndexAccessMethod::GetKeysMode::kRelaxConstraints; + } +} + StatusWith<BSONObj> IndexCatalog::_fixIndexSpec(OperationContext* txn, Collection* collection, const BSONObj& spec) { diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index bd0c919076a..4016b7860d4 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -48,6 +48,7 @@ class Collection; class IndexDescriptor; class IndexAccessMethod; +struct InsertDeleteOptions; /** * how many: 1 per Collection @@ -337,6 +338,14 @@ public: static BSONObj fixIndexKey(const BSONObj& key); + /** + * Fills out 'options' in order to indicate whether to allow dups or relax + * index constraints, as needed by replication. + */ + static void prepareInsertDeleteOptions(OperationContext* txn, + const IndexDescriptor* desc, + InsertDeleteOptions* options); + private: static const BSONObj _idObj; // { _id : 1 } diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp index 823dacff550..d71d26b13b7 100644 --- a/src/mongo/db/catalog/index_create.cpp +++ b/src/mongo/db/catalog/index_create.cpp @@ -215,9 +215,11 @@ StatusWith<std::vector<BSONObj>> MultiIndexBlock::init(const std::vector<BSONObj const IndexDescriptor* descriptor = index.block->getEntry()->descriptor(); - index.options.logIfError = false; // logging happens elsewhere if needed. - index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique || - repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor); + IndexCatalog::prepareInsertDeleteOptions(_txn, descriptor, &index.options); + index.options.dupsAllowed = index.options.dupsAllowed || _ignoreUnique; + if (_ignoreUnique) { + index.options.getKeysMode = IndexAccessMethod::GetKeysMode::kRelaxConstraints; + } log() << "build index on: " << ns << " properties: " << descriptor->toString(); if (index.bulk) diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp index 32d14e643e1..51e4652c4b1 100644 --- a/src/mongo/db/commands/feature_compatibility_version.cpp +++ b/src/mongo/db/commands/feature_compatibility_version.cpp @@ -219,7 +219,7 @@ void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version) repl::ReplicationCoordinator::get(txn->getServiceContext()) ->canAcceptWritesFor(nss)); - IndexBuilder builder(k32IncompatibleIndexSpec); + IndexBuilder builder(k32IncompatibleIndexSpec, false); auto status = builder.buildInForeground(txn, autoDB.getDb()); uassertStatusOK(status); @@ -303,7 +303,7 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn, ScopedTransaction transaction(txn, MODE_IX); AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X); - IndexBuilder builder(k32IncompatibleIndexSpec); + IndexBuilder builder(k32IncompatibleIndexSpec, false); auto status = builder.buildInForeground(txn, autoDB.getDb()); uassertStatusOK(status); } diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp index 966e32eec61..a9035311bba 100644 --- a/src/mongo/db/exec/working_set_common.cpp +++ b/src/mongo/db/exec/working_set_common.cpp @@ -118,7 +118,10 @@ bool WorkingSetCommon::fetch(OperationContext* txn, // There's no need to compute the prefixes of the indexed fields that cause the index to // be multikey when ensuring the keyData is still valid. MultikeyPaths* multikeyPaths = nullptr; - member->keyData[i].index->getKeys(member->obj.value(), &keys, multikeyPaths); + member->keyData[i].index->getKeys(member->obj.value(), + IndexAccessMethod::GetKeysMode::kEnforceConstraints, + &keys, + multikeyPaths); if (!keys.count(member->keyData[i].keyData)) { // document would no longer be at this position in the index. return false; diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp index ed63659593a..603df21bbdb 100644 --- a/src/mongo/db/index/2d_access_method.cpp +++ b/src/mongo/db/index/2d_access_method.cpp @@ -47,9 +47,9 @@ TwoDAccessMethod::TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInte } /** Finds the key objects to put in an index */ -void TwoDAccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void TwoDAccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { ExpressionKeysPrivate::get2DKeys(obj, _params, keys, NULL); } diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h index 5d181904ead..72b0e641e03 100644 --- a/src/mongo/db/index/2d_access_method.h +++ b/src/mongo/db/index/2d_access_method.h @@ -60,7 +60,7 @@ private: * This function ignores the 'multikeyPaths' pointer because 2d indexes don't support tracking * path-level multikey information. */ - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; TwoDIndexingParams _params; }; diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp index d47eca4f3a5..1e1d7f4e090 100644 --- a/src/mongo/db/index/btree_access_method.cpp +++ b/src/mongo/db/index/btree_access_method.cpp @@ -61,9 +61,9 @@ BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn massert(16745, "Invalid index version for key generation.", _keyGenerator); } -void BtreeAccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void BtreeAccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { _keyGenerator->getKeys(obj, keys, multikeyPaths); } diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h index ed5389d5f79..caed0eccab5 100644 --- a/src/mongo/db/index/btree_access_method.h +++ b/src/mongo/db/index/btree_access_method.h @@ -48,7 +48,7 @@ public: BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree); private: - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; // Our keys differ for V0 and V1. std::unique_ptr<BtreeKeyGenerator> _keyGenerator; diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp index e54e1760f55..6741b5ef95e 100644 --- a/src/mongo/db/index/fts_access_method.cpp +++ b/src/mongo/db/index/fts_access_method.cpp @@ -34,9 +34,9 @@ namespace mongo { FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree) : IndexAccessMethod(btreeState, btree), _ftsSpec(btreeState->descriptor()->infoObj()) {} -void FTSAccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void FTSAccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { ExpressionKeysPrivate::getFTSKeys(obj, _ftsSpec, keys); } diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h index aa3a7dfc23e..8f843e32bc6 100644 --- a/src/mongo/db/index/fts_access_method.h +++ b/src/mongo/db/index/fts_access_method.h @@ -51,7 +51,7 @@ private: * This function ignores the 'multikeyPaths' pointer because text indexes don't support tracking * path-level multikey information. */ - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; fts::FTSSpec _ftsSpec; }; diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp index 34f4323fede..87efa44b6a3 100644 --- a/src/mongo/db/index/hash_access_method.cpp +++ b/src/mongo/db/index/hash_access_method.cpp @@ -51,9 +51,9 @@ HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInte _collator = btreeState->getCollator(); } -void HashAccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void HashAccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { ExpressionKeysPrivate::getHashKeys( obj, _hashedField, _seed, _hashVersion, _descriptor->isSparse(), _collator, keys); } diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h index e73fc2c623e..fdcf3aa6183 100644 --- a/src/mongo/db/index/hash_access_method.h +++ b/src/mongo/db/index/hash_access_method.h @@ -52,7 +52,7 @@ private: * This function ignores the 'multikeyPaths' pointer because hashed indexes don't support * tracking path-level multikey information. */ - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; // Only one of our fields is hashed. This is the field name for it. std::string _hashedField; diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp index 3322a6f0aa9..877e94c0c7d 100644 --- a/src/mongo/db/index/haystack_access_method.cpp +++ b/src/mongo/db/index/haystack_access_method.cpp @@ -62,9 +62,9 @@ HaystackAccessMethod::HaystackAccessMethod(IndexCatalogEntry* btreeState, uassert(16774, "no non-geo fields specified", _otherFields.size()); } -void HaystackAccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void HaystackAccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys); } diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h index 0f5e519e1e5..a6ff68bdd9f 100644 --- a/src/mongo/db/index/haystack_access_method.h +++ b/src/mongo/db/index/haystack_access_method.h @@ -75,7 +75,7 @@ private: * This function ignores the 'multikeyPaths' pointer because geoHaystack indexes don't support * tracking path-level multikey information. */ - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; std::string _geoField; std::vector<std::string> _otherFields; diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index a4268c12055..a751492d99c 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -113,10 +113,11 @@ IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn } bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) { - // Ignore this error if we're on a secondary or if the user requested it - const auto canAcceptWritesForNs = repl::ReplicationCoordinator::get(txn)->canAcceptWritesFor( - NamespaceString(_btreeState->ns())); - return !canAcceptWritesForNs || !failIndexKeyTooLong; + // Ignore this error if we cannot write to the collection or if the user requested it + const auto shouldRelaxConstraints = + repl::ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints( + NamespaceString(_btreeState->ns())); + return shouldRelaxConstraints || !failIndexKeyTooLong; } // Find the keys for obj, put them in the tree pointing to loc @@ -130,7 +131,7 @@ Status IndexAccessMethod::insert(OperationContext* txn, BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); MultikeyPaths multikeyPaths; // Delegate to the subclass. - getKeys(obj, &keys, &multikeyPaths); + getKeys(obj, options.getKeysMode, &keys, &multikeyPaths); Status ret = Status::OK(); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { @@ -210,7 +211,7 @@ Status IndexAccessMethod::remove(OperationContext* txn, // multikey when removing a document since the index metadata isn't updated when keys are // deleted. MultikeyPaths* multikeyPaths = nullptr; - getKeys(obj, &keys, multikeyPaths); + getKeys(obj, options.getKeysMode, &keys, multikeyPaths); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { removeOneKey(txn, *i, loc, options.dupsAllowed); @@ -229,7 +230,7 @@ Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) { // There's no need to compute the prefixes of the indexed fields that cause the index to be // multikey when paging a document's index entries into memory. MultikeyPaths* multikeyPaths = nullptr; - getKeys(obj, &keys, multikeyPaths); + getKeys(obj, GetKeysMode::kEnforceConstraints, &keys, multikeyPaths); std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn)); for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) { @@ -251,7 +252,7 @@ RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& req // For performance, call get keys only if there is a non-simple collation. BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); MultikeyPaths* multikeyPaths = nullptr; - getKeys(requestedKey, &keys, multikeyPaths); + getKeys(requestedKey, GetKeysMode::kEnforceConstraints, &keys, multikeyPaths); invariant(keys.size() == 1); actualKey = *keys.begin(); } else { @@ -340,11 +341,11 @@ Status IndexAccessMethod::validateUpdate(OperationContext* txn, // index to be multikey when the old version of the document was written since the index // metadata isn't updated when keys are deleted. MultikeyPaths* multikeyPaths = nullptr; - getKeys(from, &ticket->oldKeys, multikeyPaths); + getKeys(from, options.getKeysMode, &ticket->oldKeys, multikeyPaths); } if (!indexFilter || indexFilter->matchesBSON(to)) { - getKeys(to, &ticket->newKeys, &ticket->newMultikeyPaths); + getKeys(to, options.getKeysMode, &ticket->newKeys, &ticket->newMultikeyPaths); } ticket->loc = record; @@ -423,7 +424,8 @@ Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn, int64_t* numInserted) { BSONObjSet keys = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); MultikeyPaths multikeyPaths; - _real->getKeys(obj, &keys, &multikeyPaths); + + _real->getKeys(obj, options.getKeysMode, &keys, &multikeyPaths); _everGeneratedMultipleKeys = _everGeneratedMultipleKeys || (keys.size() > 1); @@ -536,6 +538,52 @@ Status IndexAccessMethod::commitBulk(OperationContext* txn, return Status::OK(); } +void IndexAccessMethod::getKeys(const BSONObj& obj, + GetKeysMode mode, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { + static stdx::unordered_set<int> whiteList{// Btree + ErrorCodes::KeyTooLong, + ErrorCodes::CannotIndexParallelArrays, + // FTS + 16732, + 16733, + 16675, + // Hash + 16766, + // Haystack + 16775, + 16776, + // 2dsphere geo + 16755, + 16756, + // 2d geo + 16804, + 13067, + 13068, + 13026, + 13027}; + try { + doGetKeys(obj, keys, multikeyPaths); + } catch (const UserException& ex) { + if (mode == GetKeysMode::kEnforceConstraints) { + throw; + } + + // Suppress indexing errors when mode is kRelaxConstraints. + keys->clear(); + if (multikeyPaths) { + multikeyPaths->clear(); + } + // Only suppress the errors in the whitelist. + if (whiteList.find(ex.getCode()) == whiteList.end()) { + throw; + } + LOG(1) << "Ignoring indexing error for idempotency reasons: " << redact(ex) + << " when getting index keys of " << redact(obj); + } +} + } // namespace mongo #include "mongo/db/sorter/sorter.cpp" diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index b3647ed3a09..ac38b0a70d3 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -252,7 +252,15 @@ public: std::set<RecordId>* dups); /** + * Specifies whether getKeys should relax the index constraints or not. + */ + enum class GetKeysMode { kRelaxConstraints, kEnforceConstraints }; + + /** * Fills 'keys' with the keys that should be generated for 'obj' on this index. + * Based on 'mode', it will honor or ignore index constraints, e.g. duplicated key, key too + * long, and geo index parsing errors. The ignoring of constraints is for replication due to + * idempotency reasons. In those cases, the generated 'keys' will be empty. * * If the 'multikeyPaths' pointer is non-null, then it must point to an empty vector. If this * index type supports tracking path-level multikey information, then this function resizes @@ -260,9 +268,10 @@ public: * element with the prefixes of the indexed field that would cause this index to be multikey as * a result of inserting 'keys'. */ - virtual void getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const = 0; + void getKeys(const BSONObj& obj, + GetKeysMode mode, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const; /** * Splits the sets 'left' and 'right' into two vectors, the first containing the elements that @@ -276,7 +285,22 @@ public: const BSONObjSet& left, const BSONObjSet& right); protected: - // Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext + /** + * Fills 'keys' with the keys that should be generated for 'obj' on this index. + * + * If the 'multikeyPaths' pointer is non-null, then it must point to an empty vector. If this + * index type supports tracking path-level multikey information, then this function resizes + * 'multikeyPaths' to have the same number of elements as the index key pattern and fills each + * element with the prefixes of the indexed field that would cause this index to be multikey as + * a result of inserting 'keys'. + */ + virtual void doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const = 0; + + /** + * Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext + */ bool ignoreKeyTooLong(OperationContext* txn); IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry @@ -324,13 +348,15 @@ private: * Flags we can set for inserts and deletes (and updates, which are kind of both). */ struct InsertDeleteOptions { - InsertDeleteOptions() : logIfError(false), dupsAllowed(false) {} - // If there's an error, log() it. - bool logIfError; + bool logIfError = false; // Are duplicate keys allowed in the index? - bool dupsAllowed; + bool dupsAllowed = false; + + // Should we relax the index constraints? + IndexAccessMethod::GetKeysMode getKeysMode = + IndexAccessMethod::GetKeysMode::kEnforceConstraints; }; } // namespace mongo diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp index 8ba47c1a6be..995da4dbeb7 100644 --- a/src/mongo/db/index/s2_access_method.cpp +++ b/src/mongo/db/index/s2_access_method.cpp @@ -139,9 +139,9 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) { return specObj; } -void S2AccessMethod::getKeys(const BSONObj& obj, - BSONObjSet* keys, - MultikeyPaths* multikeyPaths) const { +void S2AccessMethod::doGetKeys(const BSONObj& obj, + BSONObjSet* keys, + MultikeyPaths* multikeyPaths) const { ExpressionKeysPrivate::getS2Keys(obj, _descriptor->keyPattern(), _params, keys, multikeyPaths); } diff --git a/src/mongo/db/index/s2_access_method.h b/src/mongo/db/index/s2_access_method.h index c0104fe144d..ad0044dc128 100644 --- a/src/mongo/db/index/s2_access_method.h +++ b/src/mongo/db/index/s2_access_method.h @@ -63,7 +63,7 @@ private: * and fills each element with the prefixes of the indexed field that would cause this index to * be multikey as a result of inserting 'keys'. */ - void getKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; + void doGetKeys(const BSONObj& obj, BSONObjSet* keys, MultikeyPaths* multikeyPaths) const final; S2IndexingParams _params; diff --git a/src/mongo/db/index_builder.cpp b/src/mongo/db/index_builder.cpp index eb9baaefacc..d749a330184 100644 --- a/src/mongo/db/index_builder.cpp +++ b/src/mongo/db/index_builder.cpp @@ -66,9 +66,10 @@ void _setBgIndexStarting() { } } // namespace -IndexBuilder::IndexBuilder(const BSONObj& index) +IndexBuilder::IndexBuilder(const BSONObj& index, bool relaxConstraints) : BackgroundJob(true /* self-delete */), _index(index.getOwned()), + _relaxConstraints(relaxConstraints), _name(str::stream() << "repl index builder " << _indexBuildCount.addAndFetch(1)) {} IndexBuilder::~IndexBuilder() {} @@ -162,7 +163,9 @@ Status IndexBuilder::_build(OperationContext* txn, try { status = indexer.init(_index).getStatus(); - if (status.code() == ErrorCodes::IndexAlreadyExists) { + if (status == ErrorCodes::IndexAlreadyExists || + (status == ErrorCodes::IndexOptionsConflict && _relaxConstraints)) { + LOG(1) << "Ignoring indexing error: " << redact(status); if (allowBackgroundBuilding) { // Must set this in case anyone is waiting for this build. _setBgIndexStarting(); diff --git a/src/mongo/db/index_builder.h b/src/mongo/db/index_builder.h index 2e87a8198a3..bdca706c108 100644 --- a/src/mongo/db/index_builder.h +++ b/src/mongo/db/index_builder.h @@ -59,10 +59,12 @@ class OperationContext; * ensured by the replication system, since commands are effectively run single-threaded * by the replication applier, and index builds are treated as commands even though they look * like inserts on system.indexes. + * The argument "relaxConstraints" specifies whether we should honor or ignore index constraints, + * The ignoring of constraints is for replication due to idempotency reasons. */ class IndexBuilder : public BackgroundJob { public: - IndexBuilder(const BSONObj& index); + IndexBuilder(const BSONObj& index, bool relaxConstraints); virtual ~IndexBuilder(); virtual void run(); @@ -88,6 +90,7 @@ private: Lock::DBLock* dbLock) const; const BSONObj _index; + const bool _relaxConstraints; std::string _name; // name of this builder, not related to the index static AtomicUInt32 _indexBuildCount; }; diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index a2fd477ab9d..ff45edb0fc1 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -753,17 +753,19 @@ Status applyOperation_inlock(OperationContext* txn, indexSpec = bob.obj(); } + bool relaxIndexConstraints = + ReplicationCoordinator::get(txn)->shouldRelaxIndexConstraints(indexNss); if (indexSpec["background"].trueValue()) { Lock::TempRelease release(txn->lockState()); if (txn->lockState()->isLocked()) { // If TempRelease fails, background index build will deadlock. LOG(3) << "apply op: building background index " << indexSpec << " in the foreground because temp release failed"; - IndexBuilder builder(indexSpec); + IndexBuilder builder(indexSpec, relaxIndexConstraints); Status status = builder.buildInForeground(txn, db); uassertStatusOK(status); } else { - IndexBuilder* builder = new IndexBuilder(indexSpec); + IndexBuilder* builder = new IndexBuilder(indexSpec, relaxIndexConstraints); // This spawns a new thread and returns immediately. builder->go(); // Wait for thread to start and register itself @@ -771,7 +773,7 @@ Status applyOperation_inlock(OperationContext* txn, } txn->recoveryUnit()->abandonSnapshot(); } else { - IndexBuilder builder(indexSpec); + IndexBuilder builder(indexSpec, relaxIndexConstraints); Status status = builder.buildInForeground(txn, db); uassertStatusOK(status); } diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h index 019add3e0cf..66fabbc11a0 100644 --- a/src/mongo/db/repl/replication_coordinator.h +++ b/src/mongo/db/repl/replication_coordinator.h @@ -275,10 +275,12 @@ public: bool slaveOk) = 0; /** - * Returns true if this node should ignore unique index constraints on new documents. - * Currently this is needed for nodes in STARTUP2, RECOVERING, and ROLLBACK states. + * Returns true if this node should ignore index constraints for idempotency reasons. + * + * The namespace "ns" is passed in because the "local" database is usually writable + * and we need to enforce the constraints for it. */ - virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx) = 0; + virtual bool shouldRelaxIndexConstraints(const NamespaceString& ns) = 0; /** * Updates our internal tracking of the last OpTime applied for the given slave diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index 7402541964c..301d454226b 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -1877,33 +1877,8 @@ bool ReplicationCoordinatorImpl::isInPrimaryOrSecondaryState() const { return _canServeNonLocalReads.loadRelaxed(); } -bool ReplicationCoordinatorImpl::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) { - if (!idx->unique()) { - return false; - } - // Never ignore _id index - if (idx->isIdIndex()) { - return false; - } - if (nsToDatabaseSubstring(idx->parentNS()) == kLocalDB) { - // always enforce on local - return false; - } - stdx::lock_guard<stdx::mutex> lock(_mutex); - if (getReplicationMode() != modeReplSet) { - return false; - } - // see SERVER-6671 - MemberState ms = _getMemberState_inlock(); - switch (ms.s) { - case MemberState::RS_SECONDARY: - case MemberState::RS_RECOVERING: - case MemberState::RS_ROLLBACK: - case MemberState::RS_STARTUP2: - return true; - default: - return false; - } +bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(const NamespaceString& ns) { + return !canAcceptWritesFor(ns); } OID ReplicationCoordinatorImpl::getElectionId() { diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h index e518850b506..d2d95ea44aa 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.h +++ b/src/mongo/db/repl/replication_coordinator_impl.h @@ -152,7 +152,7 @@ public: const NamespaceString& ns, bool slaveOk); - virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx); + virtual bool shouldRelaxIndexConstraints(const NamespaceString& ns); virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts); diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp index daa9c379545..4b032ba582b 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.cpp +++ b/src/mongo/db/repl/replication_coordinator_mock.cpp @@ -120,8 +120,13 @@ bool ReplicationCoordinatorMock::isMasterForReportingPurposes() { } bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(StringData dbName) { - // TODO - return true; + // Return true if we allow writes explicitly even when not in primary state, as in sharding + // unit tests, so that the op observers can fire but the tests don't have to set all the states + // as if it's in primary. + if (_alwaysAllowWrites) { + return true; + } + return dbName == "local" || _memberState.primary(); } bool ReplicationCoordinatorMock::canAcceptWritesFor(const NamespaceString& ns) { @@ -136,9 +141,8 @@ Status ReplicationCoordinatorMock::checkCanServeReadsFor(OperationContext* txn, return Status::OK(); } -bool ReplicationCoordinatorMock::shouldIgnoreUniqueIndex(const IndexDescriptor* idx) { - // TODO - return false; +bool ReplicationCoordinatorMock::shouldRelaxIndexConstraints(const NamespaceString& ns) { + return !canAcceptWritesFor(ns); } Status ReplicationCoordinatorMock::setLastOptimeForSlave(const OID& rid, const Timestamp& ts) { @@ -462,5 +466,9 @@ Status ReplicationCoordinatorMock::stepUpIfEligible() { return Status::OK(); } +void ReplicationCoordinatorMock::alwaysAllowWrites(bool allowWrites) { + _alwaysAllowWrites = allowWrites; +} + } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h index e554fee534b..0a61d010a1a 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.h +++ b/src/mongo/db/repl/replication_coordinator_mock.h @@ -100,7 +100,7 @@ public: const NamespaceString& ns, bool slaveOk); - virtual bool shouldIgnoreUniqueIndex(const IndexDescriptor* idx); + virtual bool shouldRelaxIndexConstraints(const NamespaceString& ns); virtual Status setLastOptimeForSlave(const OID& rid, const Timestamp& ts); @@ -264,6 +264,11 @@ public: */ void setGetConfigReturnValue(ReplicaSetConfig returnValue); + /** + * Always allow writes even if this node is not master. Used by sharding unit tests. + */ + void alwaysAllowWrites(bool allowWrites); + private: AtomicUInt64 _snapshotNameGenerator; const ReplSettings _settings; @@ -271,6 +276,7 @@ private: OpTime _myLastDurableOpTime; OpTime _myLastAppliedOpTime; ReplicaSetConfig _getConfigReturnValue; + bool _alwaysAllowWrites = false; }; } // namespace repl diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index f0b63a15aee..107b026831c 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -1214,12 +1214,9 @@ Status multiInitialSyncApply_noAbort(OperationContext* txn, // subsequently got deleted and no longer exists on the Sync Target at all } } catch (const DBException& e) { - // SERVER-24927 and SERVER-24997 If we have a NamespaceNotFound or a - // CannotIndexParallelArrays exception, then this document will be + // SERVER-24927 If we have a NamespaceNotFound exception, then this document will be // dropped before initial sync ends anyways and we should ignore it. - if ((e.getCode() == ErrorCodes::NamespaceNotFound || - e.getCode() == ErrorCodes::CannotIndexParallelArrays) && - entry.isCrudOpType()) { + if (e.getCode() == ErrorCodes::NamespaceNotFound && entry.isCrudOpType()) { continue; } diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp index 891a389a3d9..1ce29f5d51f 100644 --- a/src/mongo/db/repl/sync_tail_test.cpp +++ b/src/mongo/db/repl/sync_tail_test.cpp @@ -33,6 +33,7 @@ #include <utility> #include <vector> +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" @@ -43,6 +44,7 @@ #include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/internal_plans.h" #include "mongo/db/repl/bgsync.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_interface_local.h" @@ -56,6 +58,7 @@ #include "mongo/stdx/mutex.h" #include "mongo/unittest/unittest.h" #include "mongo/util/concurrency/old_thread_pool.h" +#include "mongo/util/md5.hpp" #include "mongo/util/string_map.h" namespace { @@ -78,7 +81,6 @@ protected: return Status::OK(); } -private: void setUp() override; void tearDown() override; }; @@ -108,6 +110,7 @@ void SyncTailTest::setUp() { ServiceContextMongoDTest::setUp(); ReplSettings replSettings; replSettings.setOplogSizeBytes(5 * 1024 * 1024); + replSettings.setReplSetString("repl"); auto serviceContext = getServiceContext(); ReplicationCoordinator::set(serviceContext, @@ -188,20 +191,30 @@ void createCollection(OperationContext* txn, } /** - * Creates a create collection oplog entry with given optime. + * Creates a command oplog entry with given optime and namespace. */ -OplogEntry makeCreateCollectionOplogEntry(OpTime opTime, - const NamespaceString& nss = NamespaceString("test.t")) { +OplogEntry makeCommandOplogEntry(OpTime opTime, + const NamespaceString& nss, + const BSONObj& command) { BSONObjBuilder bob; bob.appendElements(opTime.toBSON()); bob.append("h", 1LL); + bob.append("v", 2); bob.append("op", "c"); bob.append("ns", nss.getCommandNS()); - bob.append("o", BSON("create" << nss.coll())); + bob.append("o", command); return OplogEntry(bob.obj()); } /** + * Creates a create collection oplog entry with given optime. + */ +OplogEntry makeCreateCollectionOplogEntry(OpTime opTime, + const NamespaceString& nss = NamespaceString("test.t")) { + return makeCommandOplogEntry(opTime, nss, BSON("create" << nss.coll())); +} + +/** * Creates an insert oplog entry with given optime and namespace. */ OplogEntry makeInsertDocumentOplogEntry(OpTime opTime, @@ -921,30 +934,262 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyPassesThroughShouldSyncTailRetryError) ASSERT_EQUALS(fetchCount.load(), 1U); } -TEST_F(SyncTailTest, MultiInitialSyncApplyFailsOnRenameCollection) { +class IdempotencyTest : public SyncTailTest { +protected: + OplogEntry createCollection(); + OplogEntry insert(const BSONObj& obj); + OplogEntry update(int _id, const BSONObj& obj); + OplogEntry buildIndex(const BSONObj& indexSpec, const BSONObj& options = BSONObj()); + OplogEntry dropIndex(const std::string& indexName); + OpTime nextOpTime() { + static long long lastSecond = 1; + return OpTime(Timestamp(Seconds(lastSecond++), 0), 1LL); + } + Status runOp(const OplogEntry& entry); + Status runOps(std::initializer_list<OplogEntry> ops); + // Validate data and indexes. Return the MD5 hash of the documents ordered by _id. + std::string validate(); + + NamespaceString nss{"test.foo"}; + NamespaceString nssIndex{"test.system.indexes"}; +}; + +Status IdempotencyTest::runOp(const OplogEntry& op) { + return runOps({op}); +} + +Status IdempotencyTest::runOps(std::initializer_list<OplogEntry> ops) { SyncTail syncTail(nullptr, SyncTail::MultiSyncApplyFunc(), nullptr); + MultiApplier::OperationPtrs opsPtrs; + for (auto& op : ops) { + opsPtrs.push_back(&op); + } + AtomicUInt32 fetchCount(0); + return multiInitialSyncApply_noAbort(_txn.get(), &opsPtrs, &syncTail, &fetchCount); +} +OplogEntry IdempotencyTest::createCollection() { + return makeCreateCollectionOplogEntry(nextOpTime(), nss); +} + +OplogEntry IdempotencyTest::insert(const BSONObj& obj) { + return makeInsertDocumentOplogEntry(nextOpTime(), nss, obj); +} + +OplogEntry IdempotencyTest::update(int id, const BSONObj& obj) { + return makeUpdateDocumentOplogEntry(nextOpTime(), nss, BSON("_id" << id), obj); +} + +OplogEntry IdempotencyTest::buildIndex(const BSONObj& indexSpec, const BSONObj& options) { BSONObjBuilder bob; - bob.appendElements(OpTime(Timestamp(1, 0), 1LL).toBSON()); - bob.append("h", 1LL); - bob.append("op", "c"); - bob.append("ns", "test.$cmd"); - bob.append("o", - BSON("renameCollection" - << "test.foo" - << "to" - << "test.bar" - << "stayTemp" - << false - << "dropTarget" - << false)); - auto op = OplogEntry(bob.obj()); + bob.append("v", 2); + bob.append("key", indexSpec); + bob.append("name", std::string(indexSpec.firstElementFieldName()) + "_index"); + bob.append("ns", nss.ns()); + bob.appendElementsUnique(options); + return makeInsertDocumentOplogEntry(nextOpTime(), nssIndex, bob.obj()); +} - MultiApplier::OperationPtrs ops = {&op}; - AtomicUInt32 fetchCount(0); - ASSERT_EQUALS(ErrorCodes::OplogOperationUnsupported, - multiInitialSyncApply_noAbort(_txn.get(), &ops, &syncTail, &fetchCount)); - ASSERT_EQUALS(fetchCount.load(), 0U); +OplogEntry IdempotencyTest::dropIndex(const std::string& indexName) { + auto cmd = BSON("deleteIndexes" << nss.coll() << "index" << indexName); + return makeCommandOplogEntry(nextOpTime(), nss, cmd); +} + +std::string IdempotencyTest::validate() { + auto collection = AutoGetCollectionForRead(_txn.get(), nss).getCollection(); + ValidateResults validateResults; + BSONObjBuilder bob; + + Lock::DBLock lk(_txn->lockState(), nss.db(), MODE_IS); + Lock::CollectionLock lock(_txn->lockState(), nss.ns(), MODE_IS); + ASSERT_OK(collection->validate(_txn.get(), kValidateFull, &validateResults, &bob)); + ASSERT_TRUE(validateResults.valid); + + IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(_txn.get()); + ASSERT_TRUE(desc); + auto exec = InternalPlanner::indexScan(_txn.get(), + collection, + desc, + BSONObj(), + BSONObj(), + BoundInclusion::kIncludeStartKeyOnly, + PlanExecutor::YIELD_MANUAL, + InternalPlanner::FORWARD, + InternalPlanner::IXSCAN_FETCH); + ASSERT(NULL != exec.get()); + md5_state_t st; + md5_init(&st); + + PlanExecutor::ExecState state; + BSONObj c; + while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) { + md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize()); + } + ASSERT_EQUALS(PlanExecutor::IS_EOF, state); + md5digest d; + md5_finish(&st, d); + return digestToString(d); +} + +TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnUpdate) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + ASSERT_OK(runOp(createCollection())); + auto insertOp = insert(fromjson("{_id: 1, loc: 'hi'}")); + auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}")); + auto indexOp = buildIndex(fromjson("{loc: '2dsphere'}"), BSON("2dsphereIndexVersion" << 3)); + + auto ops = {insertOp, updateOp, indexOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), 16755); +} + +TEST_F(IdempotencyTest, Geo2dsphereIndexFailedOnIndexing) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + ASSERT_OK(runOp(createCollection())); + auto indexOp = buildIndex(fromjson("{loc: '2dsphere'}"), BSON("2dsphereIndexVersion" << 3)); + auto dropIndexOp = dropIndex("loc_index"); + auto insertOp = insert(fromjson("{_id: 1, loc: 'hi'}")); + + auto ops = {indexOp, dropIndexOp, insertOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), 16755); +} + +TEST_F(IdempotencyTest, Geo2dIndex) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + ASSERT_OK(runOp(createCollection())); + auto insertOp = insert(fromjson("{_id: 1, loc: [1]}")); + auto updateOp = update(1, fromjson("{$set: {loc: [1, 2]}}")); + auto indexOp = buildIndex(fromjson("{loc: '2d'}")); + + auto ops = {insertOp, updateOp, indexOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), 13068); +} + +TEST_F(IdempotencyTest, UniqueKeyIndex) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + ASSERT_OK(runOp(createCollection())); + auto insertOp = insert(fromjson("{_id: 1, x: 5}")); + auto updateOp = update(1, fromjson("{$set: {x: 6}}")); + auto insertOp2 = insert(fromjson("{_id: 2, x: 5}")); + auto indexOp = buildIndex(fromjson("{x: 1}"), fromjson("{unique: true}")); + + auto ops = {insertOp, updateOp, insertOp2, indexOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey); +} + +TEST_F(IdempotencyTest, ParallelArrayError) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + + ASSERT_OK(runOp(createCollection())); + ASSERT_OK(runOp(insert(fromjson("{_id: 1}")))); + + auto updateOp1 = update(1, fromjson("{$set: {x: [1, 2]}}")); + auto updateOp2 = update(1, fromjson("{$set: {x: 1}}")); + auto updateOp3 = update(1, fromjson("{$set: {y: [3, 4]}}")); + auto indexOp = buildIndex(fromjson("{x: 1, y: 1}")); + + auto ops = {updateOp1, updateOp2, updateOp3, indexOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), ErrorCodes::CannotIndexParallelArrays); +} + +TEST_F(IdempotencyTest, IndexKeyTooLongError) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + + ASSERT_OK(runOp(createCollection())); + ASSERT_OK(runOp(insert(fromjson("{_id: 1}")))); + + // Key size limit is 1024 for ephemeral storage engine, so two 800 byte fields cannot + // co-exist. + std::string longStr(800, 'a'); + auto updateOp1 = update(1, BSON("$set" << BSON("x" << longStr))); + auto updateOp2 = update(1, fromjson("{$set: {x: 1}}")); + auto updateOp3 = update(1, BSON("$set" << BSON("y" << longStr))); + auto indexOp = buildIndex(fromjson("{x: 1, y: 1}")); + + auto ops = {updateOp1, updateOp2, updateOp3, indexOp}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), ErrorCodes::KeyTooLong); +} + +TEST_F(IdempotencyTest, IndexWithDifferentOptions) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + + ASSERT_OK(runOp(createCollection())); + ASSERT_OK(runOp(insert(fromjson("{_id: 1, x: 'hi'}")))); + + auto indexOp1 = buildIndex(fromjson("{x: 'text'}"), fromjson("{default_language: 'spanish'}")); + auto dropIndexOp = dropIndex("x_index"); + auto indexOp2 = buildIndex(fromjson("{x: 'text'}"), fromjson("{default_language: 'english'}")); + + auto ops = {indexOp1, dropIndexOp, indexOp2}; + + ASSERT_OK(runOps(ops)); + auto hash = validate(); + ASSERT_OK(runOps(ops)); + ASSERT_EQUALS(hash, validate()); + + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_PRIMARY); + auto status = runOps(ops); + ASSERT_EQ(status.code(), ErrorCodes::IndexOptionsConflict); +} + +TEST_F(IdempotencyTest, ResyncOnRenameCollection) { + getGlobalReplicationCoordinator()->setFollowerMode(MemberState::RS_RECOVERING); + + auto cmd = BSON("renameCollection" << nss.ns() << "to" + << "test.bar" + << "stayTemp" + << false + << "dropTarget" + << false); + auto op = makeCommandOplogEntry(nextOpTime(), nss, cmd); + ASSERT_EQUALS(runOp(op), ErrorCodes::OplogOperationUnsupported); } } // namespace diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp index c20c1ac4a41..e96e29bef33 100644 --- a/src/mongo/db/s/sharding_state_test.cpp +++ b/src/mongo/db/s/sharding_state_test.cpp @@ -34,6 +34,7 @@ #include "mongo/db/dbdirectclient.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" #include "mongo/db/server_options.h" @@ -447,6 +448,8 @@ TEST_F(ShardingStateTest, TEST_F(ShardingStateTest, InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndInvalidShardIdentity) { + replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY); + // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole // is ShardServer, the OpObserver for inserts will prevent the insert from occurring, since the // shardIdentity doc is invalid. @@ -471,6 +474,8 @@ TEST_F(ShardingStateTest, TEST_F(ShardingStateTest, InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndValidShardIdentity) { + replicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY); + // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole // is ShardServer, the OpObserver for inserts will trigger sharding initialization from the // inserted doc. diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp index 7f4e743b205..99fdcf0ecab 100644 --- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp @@ -32,6 +32,10 @@ #include <vector> #include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/concurrency/write_conflict_exception.h" +#include "mongo/db/curop.h" +#include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator_mock.h" @@ -216,11 +220,26 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) { }); operationContext()->setReplicatedWrites(false); replicationCoordinator()->setFollowerMode(repl::MemberState::RS_ROLLBACK); - ASSERT_OK( - catalogClient()->removeConfigDocuments(operationContext(), - VersionType::ConfigNS, - BSONObj(), - ShardingCatalogClient::kMajorityWriteConcern)); + auto txn = operationContext(); + auto nss = NamespaceString(VersionType::ConfigNS); + MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { + ScopedTransaction transaction(txn, MODE_IX); + AutoGetCollection autoColl(txn, nss, MODE_IX); + auto coll = autoColl.getCollection(); + ASSERT_TRUE(coll); + auto cursor = coll->getCursor(txn); + std::vector<RecordId> recordIds; + while (auto recordId = cursor->next()) { + recordIds.push_back(recordId->id); + } + mongo::WriteUnitOfWork wuow(txn); + for (auto recordId : recordIds) { + coll->deleteDocument(txn, recordId, nullptr); + } + wuow.commit(); + ASSERT_EQUALS(0UL, coll->numRecords(txn)); + } + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "removeConfigDocuments", nss.ns()); } // Verify the document was actually removed. diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp index 3bd026db9ed..25d4f197a2a 100644 --- a/src/mongo/s/client/shard_local_test.cpp +++ b/src/mongo/s/client/shard_local_test.cpp @@ -86,6 +86,7 @@ void ShardLocalTest::setUp() { _shardLocal = stdx::make_unique<ShardLocal>(ShardId("config")); const repl::ReplSettings replSettings = {}; repl::setGlobalReplicationCoordinator(new repl::ReplicationCoordinatorMock(replSettings)); + repl::getGlobalReplicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY); } void ShardLocalTest::tearDown() { diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp index 4261d037073..95189db6ae5 100644 --- a/src/mongo/s/config_server_test_fixture.cpp +++ b/src/mongo/s/config_server_test_fixture.cpp @@ -46,6 +46,7 @@ #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" @@ -98,6 +99,9 @@ ConfigServerTestFixture::~ConfigServerTestFixture() = default; void ConfigServerTestFixture::setUp() { ShardingMongodTestFixture::setUp(); + // TODO: SERVER-26919 set the flag on the mock repl coordinator just for the window where it + // actually needs to bypass the op observer. + replicationCoordinator()->alwaysAllowWrites(true); // Initialize sharding components as a config server. serverGlobalParams.clusterRole = ClusterRole::ConfigServer; diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp index ae91dc4ed5e..3e8c9f013d7 100644 --- a/src/mongo/s/sharding_mongod_test_fixture.cpp +++ b/src/mongo/s/sharding_mongod_test_fixture.cpp @@ -120,9 +120,7 @@ void ShardingMongodTestFixture::setUp() { replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members" << serversBob.arr())); - auto replCoordMockPtr = dynamic_cast<ReplicationCoordinatorMock*>(replicationCoordinator()); - invariant(replCoordMockPtr); - replCoordMockPtr->setGetConfigReturnValue(replSetConfig); + replCoordPtr->setGetConfigReturnValue(replSetConfig); repl::ReplicationCoordinator::set(serviceContext, std::move(replCoordPtr)); @@ -131,7 +129,7 @@ void ShardingMongodTestFixture::setUp() { repl::createOplog(_opCtx.get()); } -std::unique_ptr<ReplicationCoordinator> ShardingMongodTestFixture::makeReplicationCoordinator( +std::unique_ptr<ReplicationCoordinatorMock> ShardingMongodTestFixture::makeReplicationCoordinator( ReplSettings replSettings) { return stdx::make_unique<repl::ReplicationCoordinatorMock>(replSettings); } @@ -372,7 +370,7 @@ executor::TaskExecutor* ShardingMongodTestFixture::executor() const { return Grid::get(operationContext())->getExecutorPool()->getFixedExecutor(); } -repl::ReplicationCoordinator* ShardingMongodTestFixture::replicationCoordinator() const { +repl::ReplicationCoordinatorMock* ShardingMongodTestFixture::replicationCoordinator() const { invariant(_replCoord); return _replCoord; } diff --git a/src/mongo/s/sharding_mongod_test_fixture.h b/src/mongo/s/sharding_mongod_test_fixture.h index e3ffa13d582..62435c97e79 100644 --- a/src/mongo/s/sharding_mongod_test_fixture.h +++ b/src/mongo/s/sharding_mongod_test_fixture.h @@ -58,7 +58,7 @@ class TaskExecutorPool; } // namespace executor namespace repl { -class ReplicationCoordinator; +class ReplicationCoordinatorMock; class ReplSettings; } // namespace repl @@ -117,7 +117,7 @@ public: executor::TaskExecutor* executor() const; executor::NetworkInterfaceMock* network() const; - repl::ReplicationCoordinator* replicationCoordinator() const; + repl::ReplicationCoordinatorMock* replicationCoordinator() const; /** * Returns the stored raw pointer to the DistLockCatalog, if it has been initialized. @@ -182,7 +182,7 @@ protected: /** * Base class returns ReplicationCoordinatorMock. */ - virtual std::unique_ptr<repl::ReplicationCoordinator> makeReplicationCoordinator( + virtual std::unique_ptr<repl::ReplicationCoordinatorMock> makeReplicationCoordinator( repl::ReplSettings replSettings); /** @@ -264,7 +264,7 @@ private: // store a raw pointer to it here. DistLockManager* _distLockManager = nullptr; - repl::ReplicationCoordinator* _replCoord = nullptr; + repl::ReplicationCoordinatorMock* _replCoord = nullptr; // Allows for processing tasks through the NetworkInterfaceMock/ThreadPoolMock subsystem. std::unique_ptr<executor::NetworkTestEnv> _networkTestEnv; |