diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/base/error_codes.err | 2 | ||||
-rw-r--r-- | src/mongo/db/catalog/collection_impl.cpp | 63 | ||||
-rw-r--r-- | src/mongo/db/catalog/collection_impl.h | 12 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_catalog_impl.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/commands/do_txn_cmd.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/index/index_descriptor.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/repl/apply_ops.cpp | 9 | ||||
-rw-r--r-- | src/mongo/db/repl/dbcheck.h | 3 | ||||
-rw-r--r-- | src/mongo/db/repl/do_txn.cpp | 9 | ||||
-rw-r--r-- | src/mongo/db/repl/sync_tail.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/service_context.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/service_context.h | 5 | ||||
-rw-r--r-- | src/mongo/db/session.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/storage/record_store.h | 7 | ||||
-rw-r--r-- | src/mongo/db/storage/record_store_test_harness.cpp | 15 | ||||
-rw-r--r-- | src/mongo/db/storage/record_store_test_updaterecord.cpp | 36 | ||||
-rw-r--r-- | src/mongo/db/storage/storage_engine.h | 7 | ||||
-rw-r--r-- | src/mongo/dbtests/multikey_paths_test.cpp | 26 | ||||
-rw-r--r-- | src/mongo/dbtests/oplogstarttests.cpp | 284 |
19 files changed, 20 insertions, 494 deletions
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err index 00ee3fa697c..74620aa4fe7 100644 --- a/src/mongo/base/error_codes.err +++ b/src/mongo/base/error_codes.err @@ -154,7 +154,7 @@ error_code("OplogOutOfOrder", 152) error_code("ChunkTooBig", 153) error_code("InconsistentShardIdentity", 154) error_code("CannotApplyOplogWhilePrimary", 155) -error_code("NeedsDocumentMove", 156) +error_code("OBSOLETE_NeedsDocumentMove", 156) error_code("CanRepairToDowngrade", 157) error_code("MustUpgrade", 158) error_code("DurationOverflow", 159) diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index f74fbabba89..67e689bf569 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -667,13 +667,7 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx, Status updateStatus = _recordStore->updateRecord( opCtx, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this); - if (updateStatus == ErrorCodes::NeedsDocumentMove) { - return uassertStatusOK(_updateDocumentWithMove( - opCtx, oldLocation, oldDoc, newDoc, enforceQuota, opDebug, args, sid)); - } - uassertStatusOK(updateStatus); - - // Object did not move. We update each index with each respective UpdateTicket. + // Update each index with each respective UpdateTicket. if (indexesAffected) { IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(opCtx, true); while (ii.more()) { @@ -699,61 +693,6 @@ RecordId CollectionImpl::updateDocument(OperationContext* opCtx, return {oldLocation}; } -StatusWith<RecordId> CollectionImpl::_updateDocumentWithMove(OperationContext* opCtx, - const RecordId& oldLocation, - const Snapshotted<BSONObj>& oldDoc, - const BSONObj& newDoc, - bool enforceQuota, - OpDebug* opDebug, - OplogUpdateEntryArgs* args, - const SnapshotId& sid) { - invariant(isMMAPV1()); - // Insert new record. - StatusWith<RecordId> newLocation = _recordStore->insertRecord( - opCtx, newDoc.objdata(), newDoc.objsize(), Timestamp(), _enforceQuota(enforceQuota)); - if (!newLocation.isOK()) { - return newLocation; - } - - invariant(newLocation.getValue() != oldLocation); - - _cursorManager.invalidateDocument(opCtx, oldLocation, INVALIDATION_DELETION); - - args->preImageDoc = oldDoc.value().getOwned(); - - // Remove indexes for old record. - int64_t keysDeleted; - _indexCatalog.unindexRecord(opCtx, oldDoc.value(), oldLocation, true, &keysDeleted); - - // Remove old record. - _recordStore->deleteRecord(opCtx, oldLocation); - - std::vector<BsonRecord> bsonRecords; - BsonRecord bsonRecord = {newLocation.getValue(), Timestamp(), &newDoc}; - bsonRecords.push_back(bsonRecord); - - // Add indexes for new record. - int64_t keysInserted; - Status status = _indexCatalog.indexRecords(opCtx, bsonRecords, &keysInserted); - if (!status.isOK()) { - return StatusWith<RecordId>(status); - } - - invariant(sid == opCtx->recoveryUnit()->getSnapshotId()); - args->updatedDoc = newDoc; - - getGlobalServiceContext()->getOpObserver()->onUpdate(opCtx, *args); - - moveCounter.increment(); - if (opDebug) { - opDebug->additiveMetrics.incrementNmoved(1); - opDebug->additiveMetrics.incrementKeysInserted(keysInserted); - opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted); - } - - return newLocation; -} - Status CollectionImpl::recordStoreGoingToUpdateInPlace(OperationContext* opCtx, const RecordId& loc) { // Broadcast the mutation so that query results stay correct. diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h index f426354c713..1530760be64 100644 --- a/src/mongo/db/catalog/collection_impl.h +++ b/src/mongo/db/catalog/collection_impl.h @@ -401,18 +401,6 @@ private: OpDebug* opDebug); - /** - * Perform update when document move will be required. - */ - StatusWith<RecordId> _updateDocumentWithMove(OperationContext* opCtx, - const RecordId& oldLocation, - const Snapshotted<BSONObj>& oldDoc, - const BSONObj& newDoc, - bool enforceQuota, - OpDebug* opDebug, - OplogUpdateEntryArgs* args, - const SnapshotId& sid); - bool _enforceQuota(bool userEnforeQuota) const; int _magic; diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index f6ff27d92f6..22dad6daeac 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -571,14 +571,6 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) } } - // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines - if (indexVersion == IndexVersion::kV0 && - !opCtx->getServiceContext()->getStorageEngine()->isMmapV1()) { - return Status(ErrorCodes::CannotCreateIndex, - str::stream() << "use of v0 indexes is only allowed with the " - << "mmapv1 storage engine"); - } - if (!IndexDescriptor::isIndexVersionSupported(indexVersion)) { return Status(ErrorCodes::CannotCreateIndex, str::stream() << "this version of mongod cannot build new indexes " diff --git a/src/mongo/db/commands/do_txn_cmd.cpp b/src/mongo/db/commands/do_txn_cmd.cpp index cacc9aae349..e71036dab00 100644 --- a/src/mongo/db/commands/do_txn_cmd.cpp +++ b/src/mongo/db/commands/do_txn_cmd.cpp @@ -134,9 +134,6 @@ public: const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) override { - uassert(ErrorCodes::CommandNotSupported, - "This storage engine does not support transactions.", - !opCtx->getServiceContext()->getStorageEngine()->isMmapV1()); validateDoTxnCommand(cmdObj); diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp index 71fe92d0e47..76cccb142ae 100644 --- a/src/mongo/db/index/index_descriptor.cpp +++ b/src/mongo/db/index/index_descriptor.cpp @@ -99,6 +99,7 @@ constexpr StringData IndexDescriptor::kWeightsFieldName; bool IndexDescriptor::isIndexVersionSupported(IndexVersion indexVersion) { switch (indexVersion) { case IndexVersion::kV0: + return false; case IndexVersion::kV1: case IndexVersion::kV2: return true; @@ -107,7 +108,7 @@ bool IndexDescriptor::isIndexVersionSupported(IndexVersion indexVersion) { } std::set<IndexVersion> IndexDescriptor::getSupportedIndexVersions() { - return {IndexVersion::kV0, IndexVersion::kV1, IndexVersion::kV2}; + return {IndexVersion::kV1, IndexVersion::kV2}; } Status IndexDescriptor::isIndexVersionAllowedForCreation( diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index e5cec9c985b..0b4262b3e85 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -295,15 +295,6 @@ Status _applyOps(OperationContext* opCtx, (*numApplied)++; if (MONGO_FAIL_POINT(applyOpsPauseBetweenOperations)) { - // While holding a database lock under MMAPv1, we would be implicitly holding the - // flush lock here. This would prevent other threads from acquiring the global - // lock or any database locks. We release all locks temporarily while the fail - // point is enabled to allow other threads to make progress. - boost::optional<Lock::TempRelease> release; - auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); - if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) { - release.emplace(opCtx->lockState()); - } MONGO_FAIL_POINT_PAUSE_WHILE_SET(applyOpsPauseBetweenOperations); } } diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h index a8d1339d3a7..e7fc1921a4e 100644 --- a/src/mongo/db/repl/dbcheck.h +++ b/src/mongo/db/repl/dbcheck.h @@ -176,8 +176,7 @@ private: /** * Get the given collection in MODE_S, except that if the collection is missing it will report that - * to the health log, and it takes an IX lock on "local" as a workaround to SERVER-28544 and to - * ensure correct flush lock acquisition for MMAPV1. + * to the health log, and it takes an IX lock on "local" as a workaround to SERVER-28544. */ class AutoGetCollectionForDbCheck { public: diff --git a/src/mongo/db/repl/do_txn.cpp b/src/mongo/db/repl/do_txn.cpp index 33d306d5fce..afaa4ccb212 100644 --- a/src/mongo/db/repl/do_txn.cpp +++ b/src/mongo/db/repl/do_txn.cpp @@ -183,15 +183,6 @@ Status _doTxn(OperationContext* opCtx, (*numApplied)++; if (MONGO_FAIL_POINT(doTxnPauseBetweenOperations)) { - // While holding a database lock under MMAPv1, we would be implicitly holding the - // flush lock here. This would prevent other threads from acquiring the global - // lock or any database locks. We release all locks temporarily while the fail - // point is enabled to allow other threads to make progress. - boost::optional<Lock::TempRelease> release; - auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); - if (storageEngine->isMmapV1() && !opCtx->lockState()->isW()) { - release.emplace(opCtx->lockState()); - } MONGO_FAIL_POINT_PAUSE_WHILE_SET(doTxnPauseBetweenOperations); } } diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index 94fbca8fa6f..ad8f5ee1848 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -730,14 +730,6 @@ private: }; void SyncTail::oplogApplication(OplogBuffer* oplogBuffer, ReplicationCoordinator* replCoord) { - if (isMMAPV1()) { - // Overwrite prefetch index mode if ReplSettings has a mode set. - auto&& replSettings = replCoord->getSettings(); - if (replSettings.isPrefetchIndexModeSet()) { - replCoord->setIndexPrefetchConfig(replSettings.getPrefetchIndexMode()); - } - } - // We don't start data replication for arbiters at all and it's not allowed to reconfig // arbiterOnly field for any member. invariant(!replCoord->getMemberState().arbiter()); diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index a3749d29ce0..bac2809f798 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -85,13 +85,6 @@ bool supportsDocLocking() { return _supportsDocLocking; } -bool isMMAPV1() { - StorageEngine* globalStorageEngine = getGlobalServiceContext()->getStorageEngine(); - - invariant(globalStorageEngine); - return globalStorageEngine->isMmapV1(); -} - ServiceContext::ServiceContext() : _tickSource(stdx::make_unique<SystemTickSource>()), _fastClockSource(stdx::make_unique<SystemClockSource>()), diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h index bcd4f67eee3..14d95b14ca3 100644 --- a/src/mongo/db/service_context.h +++ b/src/mongo/db/service_context.h @@ -616,9 +616,4 @@ void setGlobalServiceContext(ServiceContext::UniqueServiceContext&& serviceConte */ bool supportsDocLocking(); -/** - * Returns true if the storage engine in use is MMAPV1. - */ -bool isMMAPV1(); - } // namespace mongo diff --git a/src/mongo/db/session.cpp b/src/mongo/db/session.cpp index ac81a40ea12..5ddcdff9acf 100644 --- a/src/mongo/db/session.cpp +++ b/src/mongo/db/session.cpp @@ -714,7 +714,6 @@ void Session::stashTransactionResources(OperationContext* opCtx) { // effectively owns the Session. That is, a user might lock the Client to ensure it doesn't go // away, and then lock the Session owned by that client. We rely on the fact that we are not // using the DefaultLockerImpl to avoid deadlock. - invariant(!isMMAPV1()); stdx::lock_guard<Client> lk(*opCtx->getClient()); stdx::unique_lock<stdx::mutex> lg(_mutex); @@ -752,12 +751,6 @@ void Session::unstashTransactionResources(OperationContext* opCtx, const std::st invariant(opCtx->getTxnNumber()); - // If the storage engine is mmapv1, it is not safe to lock both the Client and the Session - // mutex. This is fine because mmapv1 does not support transactions. - if (isMMAPV1()) { - return; - } - { // We must lock the Client to change the Locker on the OperationContext and the Session // mutex to access Session state. We must lock the Client before the Session mutex, since diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 0c76bf0e05b..e09ef89980b 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -426,13 +426,6 @@ public: * @param notifier - Only used by record stores which do not support doc-locking. Called only * in the case of an in-place update. Called just before the in-place write * occurs. - * @return Status - If a document move is required (MMAPv1 only) then a status of - * ErrorCodes::NeedsDocumentMove will be returned. On receipt of this status - * no update will be performed. It is the caller's responsibility to: - * 1. Remove the existing document and associated index keys. - * 2. Insert a new document and index keys. - * - * For capped record stores, the record size will never change. */ virtual Status updateRecord(OperationContext* opCtx, const RecordId& oldLocation, diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index 52dcaec6766..2287e03052c 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -266,20 +266,7 @@ TEST(RecordStoreTestHarness, Update1) { WriteUnitOfWork uow(opCtx.get()); Status status = rs->updateRecord(opCtx.get(), loc, s2.c_str(), s2.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == status) { - // NeedsDocumentMove should only be possible under MMAPv1. We don't have the means - // to check storageEngine here but asserting 'supportsDocLocking()' is false - // provides an equivalent check as only MMAPv1 will/should return false. - ASSERT_FALSE(harnessHelper->supportsDocLocking()); - StatusWith<RecordId> newLocation = - rs->insertRecord(opCtx.get(), s2.c_str(), s2.size() + 1, Timestamp(), false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - } else { - ASSERT_OK(status); - } + ASSERT_OK(status); uow.commit(); } diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp index 32c2545f65e..f9c9e33c375 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.cpp +++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp @@ -82,16 +82,7 @@ TEST(RecordStoreTestHarness, UpdateRecord) { WriteUnitOfWork uow(opCtx.get()); Status res = rs->updateRecord(opCtx.get(), loc, data.c_str(), data.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = rs->insertRecord( - opCtx.get(), data.c_str(), data.size() + 1, Timestamp(), false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - } else { - ASSERT_OK(res); - } + ASSERT_OK(res); uow.commit(); } @@ -150,16 +141,7 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { WriteUnitOfWork uow(opCtx.get()); Status res = rs->updateRecord(opCtx.get(), locs[i], data.c_str(), data.size() + 1, false, NULL); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = rs->insertRecord( - opCtx.get(), data.c_str(), data.size() + 1, Timestamp(), false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), locs[i]); - locs[i] = newLocation.getValue(); - } else { - ASSERT_OK(res); - } + ASSERT_OK(res); uow.commit(); } @@ -217,18 +199,8 @@ TEST(RecordStoreTestHarness, UpdateRecordWithMoveNotifier) { WriteUnitOfWork uow(opCtx.get()); Status res = rs->updateRecord( opCtx.get(), loc, newData.c_str(), newData.size() + 1, false, &umn); - - if (ErrorCodes::NeedsDocumentMove == res) { - StatusWith<RecordId> newLocation = rs->insertRecord( - opCtx.get(), newData.c_str(), newData.size() + 1, Timestamp(), false); - ASSERT_OK(newLocation.getStatus()); - rs->deleteRecord(opCtx.get(), loc); - loc = newLocation.getValue(); - ASSERT_EQUALS(0, umn.numInPlaceCallbacks()); - } else { - ASSERT_OK(res); - ASSERT_GTE(1, umn.numInPlaceCallbacks()); - } + ASSERT_OK(res); + ASSERT_GTE(1, umn.numInPlaceCallbacks()); uow.commit(); } diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h index 41c937019cd..5635070a7d9 100644 --- a/src/mongo/db/storage/storage_engine.h +++ b/src/mongo/db/storage/storage_engine.h @@ -208,13 +208,6 @@ public: virtual bool isEphemeral() const = 0; /** - * Only MMAPv1 should override this and return true to trigger MMAPv1-specific behavior. - */ - virtual bool isMmapV1() const { - return false; - } - - /** * Populates and tears down in-memory data structures, respectively. Only required for storage * engines that support recoverToStableTimestamp(). * diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp index bc47129d279..d3ee08fcc8a 100644 --- a/src/mongo/dbtests/multikey_paths_test.cpp +++ b/src/mongo/dbtests/multikey_paths_test.cpp @@ -92,18 +92,14 @@ public: const IndexCatalogEntry* ice = indexCatalog->getEntry(desc); auto actualMultikeyPaths = ice->getMultikeyPaths(_opCtx.get()); - if (storageEngineSupportsPathLevelMultikeyTracking()) { - ASSERT_FALSE(actualMultikeyPaths.empty()); - const bool match = (expectedMultikeyPaths == actualMultikeyPaths); - if (!match) { - FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths) - << ", Actual: " - << dumpMultikeyPaths(actualMultikeyPaths)); - } - ASSERT_TRUE(match); - } else { - ASSERT_TRUE(actualMultikeyPaths.empty()); + ASSERT_FALSE(actualMultikeyPaths.empty()); + const bool match = (expectedMultikeyPaths == actualMultikeyPaths); + if (!match) { + FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths) + << ", Actual: " + << dumpMultikeyPaths(actualMultikeyPaths)); } + ASSERT_TRUE(match); } protected: @@ -111,14 +107,6 @@ protected: const NamespaceString _nss; private: - bool storageEngineSupportsPathLevelMultikeyTracking() { - // Path-level multikey tracking is supported for all storage engines that use the KVCatalog. - // MMAPv1 is the only storage engine that does not. - // - // TODO SERVER-22727: Store path-level multikey information in MMAPv1 index catalog. - return !getGlobalServiceContext()->getStorageEngine()->isMmapV1(); - } - std::string dumpMultikeyPaths(const MultikeyPaths& multikeyPaths) { std::stringstream ss; diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp index 243ec79c89a..46843286ff5 100644 --- a/src/mongo/dbtests/oplogstarttests.cpp +++ b/src/mongo/dbtests/oplogstarttests.cpp @@ -46,11 +46,6 @@ static const NamespaceString nss("unittests.oplogstarttests"); class Base { public: Base() : _lk(&_opCtx), _context(&_opCtx, nss.ns()), _client(&_opCtx) { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } - Collection* c = _context.db()->getCollection(&_opCtx, nss); if (!c) { WriteUnitOfWork wuow(&_opCtx); @@ -61,10 +56,6 @@ public: } ~Base() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } client()->dropCollection(nss.ns()); // The OplogStart stage is not allowed to outlive it's RecoveryUnit. @@ -125,10 +116,6 @@ private: class OplogStartIsOldest : public Base { public: void run() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } for (int i = 0; i < 10; ++i) { client()->insert(nss.ns(), BSON("_id" << i << "ts" << Timestamp(1000, i))); } @@ -153,10 +140,6 @@ public: class OplogStartIsNewest : public Base { public: void run() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } for (int i = 0; i < 10; ++i) { client()->insert(nss.ns(), BSON("_id" << i << "ts" << Timestamp(1000, i))); } @@ -177,278 +160,17 @@ public: } }; -/** - * Find the starting oplog record by hopping to the - * beginning of the extent. - */ -class OplogStartIsNewestExtentHop : public Base { -public: - void run() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } - for (int i = 0; i < 10; ++i) { - client()->insert(nss.ns(), BSON("_id" << i << "ts" << Timestamp(1000, i))); - } - - setupFromQuery(BSON("ts" << BSON("$gte" << Timestamp(1000, 1)))); - - WorkingSetID id = WorkingSet::INVALID_ID; - // ensure that we go into extent hopping mode immediately - _stage->setBackwardsScanTime(0); - - // We immediately switch to extent hopping mode, and - // should find the beginning of the extent - ASSERT_EQUALS(_stage->work(&id), PlanStage::ADVANCED); - ASSERT(_stage->isExtentHopping()); - - assertWorkingSetMemberHasId(id, 0); - } -}; - -class SizedExtentHopBase : public Base { +class All : public Suite { public: - SizedExtentHopBase() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } - client()->dropCollection(nss.ns()); - } - virtual ~SizedExtentHopBase() { - // Replication is not supported by mobile SE. - if (mongo::storageGlobalParams.engine == "mobile") { - return; - } - client()->dropCollection(nss.ns()); - } + All() : Suite("oplogstart") {} - void run() { + void setupTests() { // Replication is not supported by mobile SE. if (mongo::storageGlobalParams.engine == "mobile") { return; } - buildCollection(); - - WorkingSetID id = WorkingSet::INVALID_ID; - setupFromQuery(BSON("ts" << BSON("$gte" << Timestamp(1000, tsGte())))); - - // ensure that we go into extent hopping mode immediately - _stage->setBackwardsScanTime(0); - - // hop back extent by extent - for (int i = 0; i < numHops(); i++) { - ASSERT_EQUALS(_stage->work(&id), PlanStage::NEED_TIME); - ASSERT(_stage->isExtentHopping()); - } - // find the right loc without hopping again - ASSERT_EQUALS(_stage->work(&id), finalState()); - - int startDocId = tsGte() - 1; - if (startDocId >= 0) { - assertWorkingSetMemberHasId(id, startDocId); - } - } - -protected: - void buildCollection() { - BSONObj info; - // Create a collection with specified extent sizes - BSONObj command = - BSON("create" << nss.coll() << "capped" << true << "$nExtents" << extentSizes()); - ASSERT(client()->runCommand(nss.db().toString(), command, info)); - - // Populate documents. - for (int i = 0; i < numDocs(); ++i) { - client()->insert( - nss.ns(), - BSON("_id" << i << "ts" << Timestamp(1000, i + 1) << "payload" << payload8k())); - } - } - - static string payload8k() { - return string(8 * 1024, 'a'); - } - /** An extent of this size is too small to contain one document containing payload8k(). */ - static int tooSmall() { - return 1 * 1024; - } - /** An extent of this size fits one document. */ - static int fitsOne() { - return 10 * 1024; - } - /** An extent of this size fits many documents. */ - static int fitsMany() { - return 50 * 1024; - } - - // to be defined by subclasses - virtual BSONArray extentSizes() const = 0; - virtual int numDocs() const = 0; - virtual int numHops() const = 0; - virtual PlanStage::StageState finalState() const { - return PlanStage::ADVANCED; - } - virtual int tsGte() const { - return 1; - } -}; - -/** - * Test hopping over a single empty extent. - * - * Collection structure: - * - * [--- extent 0 --] [ ext 1 ] [--- extent 2 ---] - * [ {_id: 0} ] [<empty>] [ {_id: 1} ] - */ -class OplogStartOneEmptyExtent : public SizedExtentHopBase { - virtual int numDocs() const { - return 2; - } - virtual int numHops() const { - return 1; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsOne() << tooSmall() << fitsOne()); - } -}; - -/** - * Test hopping over two consecutive empty extents. - * - * Collection structure: - * - * [--- extent 0 --] [ ext 1 ] [ ext 2 ] [--- extent 3 ---] - * [ {_id: 0} ] [<empty>] [<empty>] [ {_id: 1} ] - */ -class OplogStartTwoEmptyExtents : public SizedExtentHopBase { - virtual int numDocs() const { - return 2; - } - virtual int numHops() const { - return 1; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsOne() << tooSmall() << tooSmall() << fitsOne()); - } -}; - -/** - * Two extents, each filled with several documents. This - * should require us to make just a single extent hop. - */ -class OplogStartTwoFullExtents : public SizedExtentHopBase { - virtual int numDocs() const { - return 10; - } - virtual int numHops() const { - return 1; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsMany() << fitsMany()); - } -}; - -/** - * Four extents in total. Three are populated with multiple - * documents, but one of the middle extents is empty. This - * should require two extent hops. - */ -class OplogStartThreeFullOneEmpty : public SizedExtentHopBase { - virtual int numDocs() const { - return 14; - } - virtual int numHops() const { - return 2; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsMany() << fitsMany() << tooSmall() << fitsMany()); - } -}; - -/** - * Test that extent hopping mode works properly in the - * special case of one extent. - */ -class OplogStartOneFullExtent : public SizedExtentHopBase { - virtual int numDocs() const { - return 4; - } - virtual int numHops() const { - return 0; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsMany()); - } -}; - -/** - * Collection structure: - * - * [ ext 0 ] [--- extent 1 --] [--- extent 2 ---] - * [<empty>] [ {_id: 0} ] [ {_id: 1} ] - */ -class OplogStartFirstExtentEmpty : public SizedExtentHopBase { - virtual int numDocs() const { - return 2; - } - virtual int numHops() const { - return 1; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(tooSmall() << fitsOne() << fitsOne()); - } -}; - -/** - * Find that we need to start from the very beginning of - * the collection (the EOF case), after extent hopping - * to the beginning. - * - * This requires two hops: one between the two extents, - * and one to hop back to the "null extent" which precedes - * the first extent. - */ -class OplogStartEOF : public SizedExtentHopBase { - virtual int numDocs() const { - return 2; - } - virtual int numHops() const { - return 2; - } - virtual BSONArray extentSizes() const { - return BSON_ARRAY(fitsOne() << fitsOne()); - } - virtual PlanStage::StageState finalState() const { - return PlanStage::IS_EOF; - } - virtual int tsGte() const { - return 0; - } -}; - -class All : public Suite { -public: - All() : Suite("oplogstart") {} - - void setupTests() { add<OplogStartIsOldest>(); add<OplogStartIsNewest>(); - - // These tests rely on extent allocation details specific to mmapv1. - // TODO figure out a way to generically test this. - if (getGlobalServiceContext()->getStorageEngine()->isMmapV1()) { - add<OplogStartIsNewestExtentHop>(); - add<OplogStartOneEmptyExtent>(); - add<OplogStartTwoEmptyExtents>(); - add<OplogStartTwoFullExtents>(); - add<OplogStartThreeFullOneEmpty>(); - add<OplogStartOneFullExtent>(); - add<OplogStartFirstExtentEmpty>(); - add<OplogStartEOF>(); - } } }; |