/** * Copyright (C) 2018-present MongoDB, Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the Server Side Public License, version 1, * as published by MongoDB, Inc. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Server Side Public License for more details. * * You should have received a copy of the Server Side Public License * along with this program. If not, see * . * * As a special exception, the copyright holders give permission to link the * code of portions of this program with the OpenSSL library under certain * conditions as described in each individual source file and distribute * linked combinations including the program with the OpenSSL library. You * must comply with the Server Side Public License in all respects for * all of the code used other than as permitted herein. If you modify file(s) * with this exception, you may extend this exception to your version of the * file(s), but you are not obligated to do so. If you do not wish to do so, * delete this exception statement from your version. If you delete this * exception statement from all source files in the program, then also delete * it in the license file. */ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kIndex #include "mongo/platform/basic.h" #include "mongo/db/index/btree_access_method.h" #include #include #include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_consistency.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/write_conflict_exception.h" #include "mongo/db/curop.h" #include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/jsobj.h" #include "mongo/db/keypattern.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/timestamp_block.h" #include "mongo/db/storage/durable_catalog.h" #include "mongo/db/storage/execution_context.h" #include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" #include "mongo/util/progress_meter.h" #include "mongo/util/scopeguard.h" #include "mongo/util/stacktrace.h" namespace mongo { using std::pair; using std::set; using IndexVersion = IndexDescriptor::IndexVersion; namespace { // Reserved RecordId against which multikey metadata keys are indexed. static const RecordId kMultikeyMetadataKeyId = RecordId{RecordId::ReservedId::kWildcardMultikeyMetadataId}; /** * Returns true if at least one prefix of any of the indexed fields causes the index to be * multikey, and returns false otherwise. This function returns false if the 'multikeyPaths' * vector is empty. */ bool isMultikeyFromPaths(const MultikeyPaths& multikeyPaths) { return std::any_of(multikeyPaths.cbegin(), multikeyPaths.cend(), [](const MultikeyComponents& components) { return !components.empty(); }); } } // namespace struct BtreeExternalSortComparison { typedef std::pair Data; int operator()(const Data& l, const Data& r) const { return l.first.compare(r.first); } }; AbstractIndexAccessMethod::AbstractIndexAccessMethod(IndexCatalogEntry* btreeState, std::unique_ptr btree) : _indexCatalogEntry(btreeState), _descriptor(btreeState->descriptor()), _newInterface(std::move(btree)) { verify(IndexDescriptor::isIndexVersionSupported(_descriptor->version())); } bool AbstractIndexAccessMethod::isFatalError(OperationContext* opCtx, Status status, KeyString::Value key) { // If the status is Status::OK() return false immediately. if (status.isOK()) { return false; } // A document might be indexed multiple times during a background index build if it moves ahead // of the cursor (e.g. via an update). We test this scenario and swallow the error accordingly. if (status == ErrorCodes::DuplicateKeyValue && !_indexCatalogEntry->isReady(opCtx)) { LOGV2_DEBUG(20681, 3, "KeyString {key} already in index during background indexing (ok)", "key"_attr = key); return false; } return true; } // Find the keys for obj, put them in the tree pointing to loc. Status AbstractIndexAccessMethod::insert(OperationContext* opCtx, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, InsertResult* result) { invariant(options.fromIndexBuilder || !_indexCatalogEntry->isHybridBuilding()); auto& executionCtx = StorageExecutionContext::get(opCtx); auto keys = executionCtx.keys(); auto multikeyMetadataKeys = executionCtx.multikeyMetadataKeys(); auto multikeyPaths = executionCtx.multikeyPaths(); getKeys(executionCtx.pooledBufferBuilder(), obj, options.getKeysMode, GetKeysContext::kAddingKeys, keys.get(), multikeyMetadataKeys.get(), multikeyPaths.get(), loc, kNoopOnSuppressedErrorFn); return insertKeys(opCtx, *keys, *multikeyMetadataKeys, *multikeyPaths, loc, options, result); } Status AbstractIndexAccessMethod::insertKeys(OperationContext* opCtx, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, const RecordId& loc, const InsertDeleteOptions& options, InsertResult* result) { // Add all new data keys, and all new multikey metadata keys, into the index. When iterating // over the data keys, each of them should point to the doc's RecordId. When iterating over // the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'. for (const auto keyVec : {&keys, &multikeyMetadataKeys}) { for (const auto& keyString : *keyVec) { bool unique = _descriptor->unique(); Status status = _newInterface->insert(opCtx, keyString, !unique /* dupsAllowed */); // When duplicates are encountered and allowed, retry with dupsAllowed. Add the // key to the output vector so callers know which duplicate keys were inserted. if (ErrorCodes::DuplicateKey == status.code() && options.dupsAllowed) { invariant(unique); status = _newInterface->insert(opCtx, keyString, true /* dupsAllowed */); if (status.isOK() && result) { auto key = KeyString::toBson(keyString, getSortedDataInterface()->getOrdering()); result->dupsInserted.push_back(key); } } if (isFatalError(opCtx, status, keyString)) { return status; } } } if (result) { result->numInserted += keys.size() + multikeyMetadataKeys.size(); } if (shouldMarkIndexAsMultikey(keys.size(), multikeyMetadataKeys, multikeyPaths)) { _indexCatalogEntry->setMultikey(opCtx, multikeyPaths); } return Status::OK(); } void AbstractIndexAccessMethod::removeOneKey(OperationContext* opCtx, const KeyString::Value& keyString, const RecordId& loc, bool dupsAllowed) { try { _newInterface->unindex(opCtx, keyString, dupsAllowed); } catch (AssertionException& e) { LOGV2(20682, "Assertion failure: _unindex failed on: {descriptor_parentNS} for index: " "{descriptor_indexName}", "descriptor_parentNS"_attr = _descriptor->parentNS(), "descriptor_indexName"_attr = _descriptor->indexName()); LOGV2(20683, "Assertion failure: _unindex failed: {e} KeyString:{keyString} dl:{loc}", "e"_attr = redact(e), "keyString"_attr = keyString, "loc"_attr = loc); printStackTrace(); } } std::unique_ptr AbstractIndexAccessMethod::newCursor( OperationContext* opCtx, bool isForward) const { return _newInterface->newCursor(opCtx, isForward); } std::unique_ptr AbstractIndexAccessMethod::newCursor( OperationContext* opCtx) const { return newCursor(opCtx, true); } Status AbstractIndexAccessMethod::removeKeys(OperationContext* opCtx, const KeyStringSet& keys, const RecordId& loc, const InsertDeleteOptions& options, int64_t* numDeleted) { for (const auto& key : keys) { removeOneKey(opCtx, key, loc, options.dupsAllowed); } *numDeleted = keys.size(); return Status::OK(); } Status AbstractIndexAccessMethod::initializeAsEmpty(OperationContext* opCtx) { return _newInterface->initAsEmpty(opCtx); } RecordId AbstractIndexAccessMethod::findSingle(OperationContext* opCtx, const BSONObj& requestedKey) const { // Generate the key for this index. KeyString::Value actualKey = [&]() { if (_indexCatalogEntry->getCollator()) { // For performance, call get keys only if there is a non-simple collation. auto& executionCtx = StorageExecutionContext::get(opCtx); auto keys = executionCtx.keys(); KeyStringSet* multikeyMetadataKeys = nullptr; MultikeyPaths* multikeyPaths = nullptr; getKeys(executionCtx.pooledBufferBuilder(), requestedKey, GetKeysMode::kEnforceConstraints, GetKeysContext::kAddingKeys, keys.get(), multikeyMetadataKeys, multikeyPaths, boost::none, // loc kNoopOnSuppressedErrorFn); invariant(keys->size() == 1); return *keys->begin(); } else { KeyString::HeapBuilder requestedKeyString( getSortedDataInterface()->getKeyStringVersion(), BSONObj::stripFieldNames(requestedKey), getSortedDataInterface()->getOrdering()); return requestedKeyString.release(); } }(); std::unique_ptr cursor(_newInterface->newCursor(opCtx)); const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc : SortedDataInterface::Cursor::kWantLoc; if (auto kv = cursor->seekExact(actualKey, requestedInfo)) { // StorageEngine should guarantee these. dassert(!kv->loc.isNull()); dassert(kv->key.woCompare(KeyString::toBson(actualKey.getBuffer(), actualKey.getSize(), getSortedDataInterface()->getOrdering(), actualKey.getTypeBits()), /*order*/ BSONObj(), /*considerFieldNames*/ false) == 0); return kv->loc; } return RecordId(); } void AbstractIndexAccessMethod::validate(OperationContext* opCtx, int64_t* numKeys, ValidateResults* fullResults) const { long long keys = 0; _newInterface->fullValidate(opCtx, &keys, fullResults); *numKeys = keys; } bool AbstractIndexAccessMethod::appendCustomStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) const { return _newInterface->appendCustomStats(opCtx, output, scale); } long long AbstractIndexAccessMethod::getSpaceUsedBytes(OperationContext* opCtx) const { return _newInterface->getSpaceUsedBytes(opCtx); } pair AbstractIndexAccessMethod::setDifference( const KeyStringSet& left, const KeyStringSet& right) { // Two iterators to traverse the two sets in sorted order. auto leftIt = left.begin(); auto rightIt = right.begin(); KeyStringSet::sequence_type onlyLeft; KeyStringSet::sequence_type onlyRight; while (leftIt != left.end() && rightIt != right.end()) { // Use compareWithTypeBits instead of the regular compare as we want just a difference in // typeinfo to also result in an index change. const int cmp = leftIt->compareWithTypeBits(*rightIt); if (cmp == 0) { ++leftIt; ++rightIt; } else if (cmp > 0) { onlyRight.push_back(*rightIt); ++rightIt; } else { onlyLeft.push_back(*leftIt); ++leftIt; } } // Add the rest of 'left' to 'onlyLeft', and the rest of 'right' to 'onlyRight', if any. onlyLeft.insert(onlyLeft.end(), leftIt, left.end()); onlyRight.insert(onlyRight.end(), rightIt, right.end()); KeyStringSet outLeft; KeyStringSet outRight; // The above algorithm guarantees that the elements are sorted and unique, so we can let the // container know so we get O(1) complexity adopting it. outLeft.adopt_sequence(boost::container::ordered_unique_range_t(), std::move(onlyLeft)); outRight.adopt_sequence(boost::container::ordered_unique_range_t(), std::move(onlyRight)); return {{std::move(outLeft)}, {std::move(outRight)}}; } void AbstractIndexAccessMethod::prepareUpdate(OperationContext* opCtx, IndexCatalogEntry* index, const BSONObj& from, const BSONObj& to, const RecordId& record, const InsertDeleteOptions& options, UpdateTicket* ticket) const { auto& executionCtx = StorageExecutionContext::get(opCtx); const MatchExpression* indexFilter = index->getFilterExpression(); if (!indexFilter || indexFilter->matchesBSON(from)) { // Override key constraints when generating keys for removal. This only applies to keys // that do not apply to a partial filter expression. const auto getKeysMode = index->isHybridBuilding() ? IndexAccessMethod::GetKeysMode::kRelaxConstraintsUnfiltered : options.getKeysMode; // There's no need to compute the prefixes of the indexed fields that possibly caused the // index to be multikey when the old version of the document was written since the index // metadata isn't updated when keys are deleted. getKeys(executionCtx.pooledBufferBuilder(), from, getKeysMode, GetKeysContext::kRemovingKeys, &ticket->oldKeys, nullptr, nullptr, record, kNoopOnSuppressedErrorFn); } if (!indexFilter || indexFilter->matchesBSON(to)) { getKeys(executionCtx.pooledBufferBuilder(), to, options.getKeysMode, GetKeysContext::kAddingKeys, &ticket->newKeys, &ticket->newMultikeyMetadataKeys, &ticket->newMultikeyPaths, record, kNoopOnSuppressedErrorFn); } ticket->loc = record; ticket->dupsAllowed = options.dupsAllowed; std::tie(ticket->removed, ticket->added) = setDifference(ticket->oldKeys, ticket->newKeys); ticket->_isValid = true; } Status AbstractIndexAccessMethod::update(OperationContext* opCtx, const UpdateTicket& ticket, int64_t* numInserted, int64_t* numDeleted) { invariant(!_indexCatalogEntry->isHybridBuilding()); invariant(ticket.newKeys.size() == ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size()); invariant(numInserted); invariant(numDeleted); *numInserted = 0; *numDeleted = 0; if (!ticket._isValid) { return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update"); } for (const auto& remKey : ticket.removed) { _newInterface->unindex(opCtx, remKey, ticket.dupsAllowed); } // Add all new data keys, and all new multikey metadata keys, into the index. When iterating // over the data keys, each of them should point to the doc's RecordId. When iterating over // the multikey metadata keys, they should point to the reserved 'kMultikeyMetadataKeyId'. for (const auto keySet : {&ticket.added, &ticket.newMultikeyMetadataKeys}) { for (const auto& keyString : *keySet) { Status status = _newInterface->insert(opCtx, keyString, ticket.dupsAllowed); if (isFatalError(opCtx, status, keyString)) { return status; } } } if (shouldMarkIndexAsMultikey( ticket.newKeys.size(), ticket.newMultikeyMetadataKeys, ticket.newMultikeyPaths)) { _indexCatalogEntry->setMultikey(opCtx, ticket.newMultikeyPaths); } *numDeleted = ticket.removed.size(); *numInserted = ticket.added.size(); return Status::OK(); } Status AbstractIndexAccessMethod::compact(OperationContext* opCtx) { return this->_newInterface->compact(opCtx); } class AbstractIndexAccessMethod::BulkBuilderImpl : public IndexAccessMethod::BulkBuilder { public: BulkBuilderImpl(IndexCatalogEntry* indexCatalogEntry, const IndexDescriptor* descriptor, size_t maxMemoryUsageBytes); Status insert(OperationContext* opCtx, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options) final; const MultikeyPaths& getMultikeyPaths() const final; bool isMultikey() const final; /** * Inserts all multikey metadata keys cached during the BulkBuilder's lifetime into the * underlying Sorter, finalizes it, and returns an iterator over the sorted dataset. */ Sorter::Iterator* done() final; int64_t getKeysInserted() const final; private: std::unique_ptr _sorter; IndexCatalogEntry* _indexCatalogEntry; int64_t _keysInserted = 0; // Set to true if any document added to the BulkBuilder causes the index to become multikey. bool _isMultiKey = false; // Holds the path components that cause this index to be multikey. The '_indexMultikeyPaths' // vector remains empty if this index doesn't support path-level multikey tracking. MultikeyPaths _indexMultikeyPaths; // Caches the set of all multikey metadata keys generated during the bulk build process. // These are inserted into the sorter after all normal data keys have been added, just // before the bulk build is committed. KeyStringSet _multikeyMetadataKeys; }; std::unique_ptr AbstractIndexAccessMethod::initiateBulk( size_t maxMemoryUsageBytes) { return std::make_unique(_indexCatalogEntry, _descriptor, maxMemoryUsageBytes); } AbstractIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(IndexCatalogEntry* index, const IndexDescriptor* descriptor, size_t maxMemoryUsageBytes) : _sorter(Sorter::make( SortOptions() .TempDir(storageGlobalParams.dbpath + "/_tmp") .ExtSortAllowed() .MaxMemoryUsageBytes(maxMemoryUsageBytes), BtreeExternalSortComparison(), std::pair( {index->accessMethod()->getSortedDataInterface()->getKeyStringVersion()}, {}))), _indexCatalogEntry(index) {} Status AbstractIndexAccessMethod::BulkBuilderImpl::insert(OperationContext* opCtx, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options) { auto& executionCtx = StorageExecutionContext::get(opCtx); auto keys = executionCtx.keys(); auto multikeyPaths = executionCtx.multikeyPaths(); try { _indexCatalogEntry->accessMethod()->getKeys( executionCtx.pooledBufferBuilder(), obj, options.getKeysMode, GetKeysContext::kAddingKeys, keys.get(), &_multikeyMetadataKeys, multikeyPaths.get(), loc, [&](Status status, const BSONObj&, boost::optional) { // If a key generation error was suppressed, record the document as "skipped" so the // index builder can retry at a point when data is consistent. auto interceptor = _indexCatalogEntry->indexBuildInterceptor(); if (interceptor && interceptor->getSkippedRecordTracker()) { LOGV2_DEBUG(20684, 1, "Recording suppressed key generation error to retry later: " "{status} on {loc}: {obj}", "status"_attr = status, "loc"_attr = loc, "obj"_attr = redact(obj)); interceptor->getSkippedRecordTracker()->record(opCtx, loc); } }); } catch (...) { return exceptionToStatus(); } if (!multikeyPaths->empty()) { if (_indexMultikeyPaths.empty()) { _indexMultikeyPaths = *multikeyPaths; } else { invariant(_indexMultikeyPaths.size() == multikeyPaths->size()); for (size_t i = 0; i < multikeyPaths->size(); ++i) { _indexMultikeyPaths[i].insert(boost::container::ordered_unique_range_t(), (*multikeyPaths)[i].begin(), (*multikeyPaths)[i].end()); } } } for (const auto& keyString : *keys) { _sorter->add(keyString, mongo::NullValue()); ++_keysInserted; } _isMultiKey = _isMultiKey || _indexCatalogEntry->accessMethod()->shouldMarkIndexAsMultikey( keys->size(), _multikeyMetadataKeys, *multikeyPaths); return Status::OK(); } const MultikeyPaths& AbstractIndexAccessMethod::BulkBuilderImpl::getMultikeyPaths() const { return _indexMultikeyPaths; } bool AbstractIndexAccessMethod::BulkBuilderImpl::isMultikey() const { return _isMultiKey; } IndexAccessMethod::BulkBuilder::Sorter::Iterator* AbstractIndexAccessMethod::BulkBuilderImpl::done() { for (const auto& keyString : _multikeyMetadataKeys) { _sorter->add(keyString, mongo::NullValue()); ++_keysInserted; } return _sorter->done(); } int64_t AbstractIndexAccessMethod::BulkBuilderImpl::getKeysInserted() const { return _keysInserted; } Status AbstractIndexAccessMethod::commitBulk(OperationContext* opCtx, BulkBuilder* bulk, bool dupsAllowed, set* dupRecords, std::vector* dupKeysInserted) { // Cannot simultaneously report uninserted duplicates 'dupRecords' and inserted duplicates // 'dupKeysInserted'. invariant(!(dupRecords && dupKeysInserted)); Timer timer; std::unique_ptr it(bulk->done()); static const char* message = "Index Build: inserting keys from external sorter into index"; ProgressMeterHolder pm; { stdx::unique_lock lk(*opCtx->getClient()); pm.set(CurOp::get(opCtx)->setProgress_inlock( message, bulk->getKeysInserted(), 3 /* secondsBetween */)); } auto builder = std::unique_ptr( _newInterface->getBulkBuilder(opCtx, dupsAllowed)); KeyString::Value previousKey; while (it->more()) { opCtx->checkForInterrupt(); WriteUnitOfWork wunit(opCtx); // Get the next datum and add it to the builder. BulkBuilder::Sorter::Data data = it->next(); // Assert that keys are retrieved from the sorter in non-decreasing order, but only in // debug builds since this check can be expensive. int cmpData; if (kDebugBuild || _descriptor->unique()) { cmpData = data.first.compareWithoutRecordId(previousKey); if (cmpData < 0) { LOGV2_FATAL_NOTRACE( 31171, "expected the next key{data_first} to be greater than or equal to the " "previous key{previousKey}", "data_first"_attr = data.first.toString(), "previousKey"_attr = previousKey.toString()); } } // Before attempting to insert, perform a duplicate key check. bool isDup = false; if (_descriptor->unique()) { isDup = cmpData == 0; if (isDup && !dupsAllowed) { if (dupRecords) { RecordId recordId = KeyString::decodeRecordIdAtEnd(data.first.getBuffer(), data.first.getSize()); dupRecords->insert(recordId); continue; } auto dupKey = KeyString::toBson(data.first, getSortedDataInterface()->getOrdering()); return buildDupKeyErrorStatus(dupKey.getOwned(), _descriptor->parentNS(), _descriptor->indexName(), _descriptor->keyPattern(), _descriptor->collation()); } } Status status = builder->addKey(data.first); if (!status.isOK()) { // Duplicates are checked before inserting. invariant(status.code() != ErrorCodes::DuplicateKey); return status; } previousKey = data.first; if (isDup && dupsAllowed && dupKeysInserted) { auto dupKey = KeyString::toBson(data.first, getSortedDataInterface()->getOrdering()); dupKeysInserted->push_back(dupKey.getOwned()); } // If we're here either it's a dup and we're cool with it or the addKey went just fine. pm.hit(); wunit.commit(); } pm.finished(); LOGV2(20685, "index build: inserted {bulk_getKeysInserted} keys from external sorter into index in " "{timer_seconds} seconds", "bulk_getKeysInserted"_attr = bulk->getKeysInserted(), "timer_seconds"_attr = timer.seconds()); WriteUnitOfWork wunit(opCtx); builder->commit(true); wunit.commit(); return Status::OK(); } void AbstractIndexAccessMethod::setIndexIsMultikey(OperationContext* opCtx, MultikeyPaths paths) { _indexCatalogEntry->setMultikey(opCtx, paths); } IndexAccessMethod::OnSuppressedErrorFn IndexAccessMethod::kNoopOnSuppressedErrorFn = [](Status status, const BSONObj& obj, boost::optional loc) { LOGV2_DEBUG( 20686, 1, "Suppressed key generation error: {status} when getting index keys for {loc}: {obj}", "status"_attr = redact(status), "loc"_attr = loc, "obj"_attr = redact(obj)); }; void AbstractIndexAccessMethod::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysMode mode, GetKeysContext context, KeyStringSet* keys, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, boost::optional id, OnSuppressedErrorFn onSuppressedError) const { static stdx::unordered_set whiteList{ErrorCodes::CannotBuildIndexKeys, // Btree ErrorCodes::CannotIndexParallelArrays, // FTS 16732, 16733, 16675, 17261, 17262, // Hash 16766, // Ambiguous array field name 16746, // Haystack 16775, 16776, // 2dsphere geo 16755, 16756, // 2d geo 16804, 13067, 13068, 13026, 13027}; try { doGetKeys(pooledBufferBuilder, obj, context, keys, multikeyMetadataKeys, multikeyPaths, id); } catch (const AssertionException& ex) { // Suppress all indexing errors when mode is kRelaxConstraints. if (mode == GetKeysMode::kEnforceConstraints) { throw; } keys->clear(); if (multikeyPaths) { multikeyPaths->clear(); } // Only suppress the errors in the whitelist. if (whiteList.find(ex.code()) == whiteList.end()) { throw; } // If the document applies to the filter (which means that it should have never been // indexed), do not supress the error. const MatchExpression* filter = _indexCatalogEntry->getFilterExpression(); if (mode == GetKeysMode::kRelaxConstraintsUnfiltered && filter && filter->matchesBSON(obj)) { throw; } onSuppressedError(ex.toStatus(), obj, id); } } bool AbstractIndexAccessMethod::shouldMarkIndexAsMultikey( size_t numberOfKeys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths) const { return numberOfKeys > 1 || isMultikeyFromPaths(multikeyPaths); } SortedDataInterface* AbstractIndexAccessMethod::getSortedDataInterface() const { return _newInterface.get(); } /** * Generates a new file name on each call using a static, atomic and monotonically increasing * number. * * Each user of the Sorter must implement this function to ensure that all temporary files that the * Sorter instances produce are uniquely identified using a unique file name extension with separate * atomic variable. This is necessary because the sorter.cpp code is separately included in multiple * places, rather than compiled in one place and linked, and so cannot provide a globally unique ID. */ std::string nextFileName() { static AtomicWord indexAccessMethodFileCounter; return "extsort-index." + std::to_string(indexAccessMethodFileCounter.fetchAndAdd(1)); } } // namespace mongo #include "mongo/db/sorter/sorter.cpp" MONGO_CREATE_SORTER(mongo::KeyString::Value, mongo::NullValue, mongo::BtreeExternalSortComparison);