diff options
author | wolfkdy <kdy71107216@aliyun.com> | 2017-03-04 16:10:58 +0800 |
---|---|---|
committer | Eric Milkie <milkie@10gen.com> | 2018-12-07 08:26:28 -0500 |
commit | 0802b5f4033e481cc3c2830d6f14a3fdd5b3ffcd (patch) | |
tree | f78de9f429aa8c42ca43a34d9667958282212988 | |
parent | 6a6cd97acc29a0d7e5b001588ca71960a8a7f247 (diff) | |
download | mongo-0802b5f4033e481cc3c2830d6f14a3fdd5b3ffcd.tar.gz |
SERVER-22766 wiredtiger engine support update oplogsize online
(cherry picked from commit 93beb0234eba9dc58ab6070ad472022f96e019e6)
15 files changed, 237 insertions, 90 deletions
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index e1798803a1b..a37baba961d 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -399,6 +399,7 @@ replSetSyncFrom: {skip: isUnrelated}, replSetTest: {skip: isUnrelated}, replSetUpdatePosition: {skip: isUnrelated}, + replSetResizeOplog: {skip: isUnrelated}, resetError: {skip: isUnrelated}, resync: {skip: isUnrelated}, revokePrivilegesFromRole: { diff --git a/src/mongo/db/auth/action_types.txt b/src/mongo/db/auth/action_types.txt index 3c560ccf9b8..2f049dd91c4 100644 --- a/src/mongo/db/auth/action_types.txt +++ b/src/mongo/db/auth/action_types.txt @@ -86,6 +86,7 @@ "replSetGetStatus", "replSetHeartbeat", "replSetReconfig", # Not used for permissions checks, but to id the event in logs. +"replSetResizeOplog", "replSetStateChange", "resync", "revokeRole", diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp index 3e8dd382a50..6273f4505f5 100644 --- a/src/mongo/db/auth/role_graph_builtin_roles.cpp +++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp @@ -217,6 +217,7 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) { << ActionType::fsync << ActionType::invalidateUserCache // userAdminAnyDatabase gets this also << ActionType::killop + << ActionType::replSetResizeOplog << ActionType::resync; // clusterManager gets this also // hostManager role actions that target the database resource diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h index 38e5b12d234..6e3b2a11284 100644 --- a/src/mongo/db/catalog/collection_catalog_entry.h +++ b/src/mongo/db/catalog/collection_catalog_entry.h @@ -137,6 +137,11 @@ public: StringData validationLevel, StringData validationAction) = 0; + /** + * Updates size of a capped Collection. + */ + virtual void updateCappedSize(OperationContext* opCtx, long long size) = 0; + private: NamespaceString _ns; }; diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript index e8d1f572ccb..6a394e54e97 100644 --- a/src/mongo/db/commands/SConscript +++ b/src/mongo/db/commands/SConscript @@ -126,6 +126,7 @@ env.Library( "plan_cache_commands.cpp", "rename_collection_cmd.cpp", "repair_cursor.cpp", + "resize_oplog.cpp", "set_feature_compatibility_version_command.cpp", "snapshot_management.cpp", "test_commands.cpp", diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp new file mode 100644 index 00000000000..e77f140bb04 --- /dev/null +++ b/src/mongo/db/commands/resize_oplog.cpp @@ -0,0 +1,129 @@ +/** +* Copyright (C) 2017 10gen Inc. +* +* This program is free software: you can redistribute it and/or modify +* it under the terms of the GNU Affero General Public License, version 3, +* as published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU Affero General Public License for more details. +* +* You should have received a copy of the GNU Affero General Public License +* along with this program. If not, see <http://www.gnu.org/licenses/>. +* +* As a special exception, the copyright holders give permission to link the +* code of portions of this program with the OpenSSL library under certain +* conditions as described in each individual source file and distribute +* linked combinations including the program with the OpenSSL library. You +* must comply with the GNU Affero General Public License in all respects for +* all of the code used other than as permitted herein. If you modify file(s) +* with this exception, you may extend this exception to your version of the +* file(s), but you are not obligated to do so. If you do not wish to do so, +* delete this exception statement from your version. If you delete this +* exception statement from all source files in the program, then also delete +* it in the license file. +*/ + +#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage + +#include "mongo/platform/basic.h" + +#include <string> + +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authorization_manager_global.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog_entry.h" +#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/commands.h" +#include "mongo/db/db_raii.h" +#include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/log.h" +#include "mongo/util/scopeguard.h" + +namespace mongo { + +using std::string; +using std::stringstream; + +class CmdReplSetResizeOplog : public Command { +public: + CmdReplSetResizeOplog() : Command("replSetResizeOplog") {} + + virtual bool slaveOk() const final { + return true; + } + + bool adminOnly() const final { + return true; + } + + virtual bool supportsWriteConcern(const BSONObj& cmd) const override { + return false; + } + + virtual void help(stringstream& help) const { + help << "resize oplog size"; + } + + Status checkAuthForCommand(Client* client, + const std::string& dbname, + const BSONObj& cmdObj) final { + AuthorizationSession* authzSession = AuthorizationSession::get(client); + if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), + ActionType::replSetResizeOplog)) { + return Status::OK(); + } + return Status(ErrorCodes::Unauthorized, "Unauthorized"); + } + + bool run(OperationContext* opCtx, + const string& dbname, + BSONObj& jsobj, + int, + string& errmsg, + BSONObjBuilder& result) { + const NamespaceString nss("local", "oplog.rs"); + Lock::GlobalWrite global(opCtx->lockState()); + Database* database = dbHolder().get(opCtx, nss.db()); + if (!database) { + return appendCommandStatus( + result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist")); + } + Collection* coll = database->getCollection(nss); + if (!coll) { + return appendCommandStatus( + result, Status(ErrorCodes::NamespaceNotFound, "oplog does not exist")); + } + if (!coll->isCapped()) { + return appendCommandStatus(result, + Status(ErrorCodes::IllegalOperation, "oplog isn't capped")); + } + if (!jsobj["size"].isNumber()) { + return appendCommandStatus( + result, + Status(ErrorCodes::InvalidOptions, "invalid size field, size should be a number")); + } + + long long size = jsobj["size"].numberLong(); + if (size < 990LL * 1024 * 1024) { + return appendCommandStatus( + result, Status(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least")); + } + WriteUnitOfWork wunit(opCtx); + Status status = coll->getRecordStore()->updateCappedSize(opCtx, size); + if (!status.isOK()) { + return appendCommandStatus(result, status); + } + CollectionCatalogEntry* entry = coll->getCatalogEntry(); + entry->updateCappedSize(opCtx, size); + wunit.commit(); + LOG(0) << "replSetResizeOplog success, currentSize:" << size; + return appendCommandStatus(result, Status::OK()); + } +} cmdReplSetResizeOplog; +} diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp index ed050d2e71f..52634c7be63 100644 --- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp +++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp @@ -252,6 +252,12 @@ void KVCollectionCatalogEntry::updateValidator(OperationContext* txn, _catalog->putMetaData(txn, ns().toString(), md); } +void KVCollectionCatalogEntry::updateCappedSize(OperationContext* opCtx, long long size) { + MetaData md = _getMetaData(opCtx); + md.options.cappedSize = size; + _catalog->putMetaData(opCtx, ns().toString(), md); +} + BSONCollectionCatalogEntry::MetaData KVCollectionCatalogEntry::_getMetaData( OperationContext* txn) const { return _catalog->getMetaData(txn, ns().toString()); diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h index 108e41f95d8..3a6895aa52e 100644 --- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.h +++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.h @@ -80,6 +80,8 @@ public: StringData validationLevel, StringData validationAction) final; + void updateCappedSize(OperationContext*, long long int) final; + RecordStore* getRecordStore() { return _recordStore.get(); } diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp index abf43946697..7d146f8d82d 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp @@ -453,4 +453,9 @@ void NamespaceDetailsCollectionCatalogEntry::setNamespacesRecordId(OperationCont _namespacesRecordId = newId; } } + +void NamespaceDetailsCollectionCatalogEntry::updateCappedSize(OperationContext* opCtx, + long long size) { + invariant(false); +} } diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h index 8d57825d0c3..69744c91483 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h @@ -104,6 +104,8 @@ public: StringData validationLevel, StringData validationAction) final; + void updateCappedSize(OperationContext* opCtx, long long size) final; + // not part of interface, but available to my storage engine int _findIndexNumber(OperationContext* txn, StringData indexName) const; diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 64ccf71c39f..977db3441fa 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -622,6 +622,14 @@ public: long long numRecords, long long dataSize) = 0; + /** + * used to support online change oplog size. + */ + virtual Status updateCappedSize(OperationContext* opCtx, long long cappedSize) { + return Status(ErrorCodes::CommandNotSupported, + "this storage engine does not support updateCappedSize"); + } + protected: std::string _ns; }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 7761857bf90..3abdc3b5726 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -157,11 +157,11 @@ WiredTigerRecordStore::OplogStones::OplogStones(OperationContext* txn, WiredTige const unsigned long long kMaxStonesToKeep = 100ULL; unsigned long long numStones = maxSize / BSONObjMaxInternalSize; - _numStonesToKeep = std::min(kMaxStonesToKeep, std::max(kMinStonesToKeep, numStones)); - _minBytesPerStone = maxSize / _numStonesToKeep; + size_t numStonesToKeep = std::min(kMaxStonesToKeep, std::max(kMinStonesToKeep, numStones)); + _minBytesPerStone = maxSize / numStonesToKeep; invariant(_minBytesPerStone > 0); - _calculateStones(txn); + _calculateStones(txn, numStonesToKeep); _pokeReclaimThreadIfNeeded(); // Reclaim stones if over the limit. } @@ -181,8 +181,14 @@ void WiredTigerRecordStore::OplogStones::kill() { void WiredTigerRecordStore::OplogStones::awaitHasExcessStonesOrDead() { // Wait until kill() is called or there are too many oplog stones. stdx::unique_lock<stdx::mutex> lock(_oplogReclaimMutex); - while (!_isDead && !hasExcessStones()) { - MONGO_IDLE_THREAD_BLOCK; + while (!_isDead) { + { + MONGO_IDLE_THREAD_BLOCK; + stdx::lock_guard<stdx::mutex> lk(_mutex); + if (hasExcessStones_inlock()) { + break; + } + } _oplogReclaimCv.wait(lock); } } @@ -191,7 +197,7 @@ boost::optional<WiredTigerRecordStore::OplogStones::Stone> WiredTigerRecordStore::OplogStones::peekOldestStoneIfNeeded() const { stdx::lock_guard<stdx::mutex> lk(_mutex); - if (!hasExcessStones()) { + if (!hasExcessStones_inlock()) { return {}; } @@ -223,6 +229,7 @@ void WiredTigerRecordStore::OplogStones::createNewStoneIfNeeded(RecordId lastRec return; } + LOG(2) << "create new oplogStone, current stones:" << _stones.size(); OplogStones::Stone stone = {_currentRecords.swap(0), _currentBytes.swap(0), lastRecord}; _stones.push_back(stone); @@ -278,19 +285,10 @@ void WiredTigerRecordStore::OplogStones::setMinBytesPerStone(int64_t size) { _minBytesPerStone = size; } -void WiredTigerRecordStore::OplogStones::setNumStonesToKeep(size_t numStones) { - invariant(numStones > 0); - - stdx::lock_guard<stdx::mutex> lk(_mutex); - - // Only allow changing the number of stones to keep if no data has been inserted. - invariant(_stones.size() == 0 && _currentRecords.load() == 0); - _numStonesToKeep = numStones; -} - -void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn) { - long long numRecords = _rs->numRecords(txn); - long long dataSize = _rs->dataSize(txn); +void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* opCtx, + size_t numStonesToKeep) { + long long numRecords = _rs->numRecords(opCtx); + long long dataSize = _rs->dataSize(opCtx); log() << "The size storer reports that the oplog contains " << numRecords << " records totaling to " << dataSize << " bytes"; @@ -303,8 +301,8 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn) // oplog to determine where to put down stones. if (numRecords <= 0 || dataSize <= 0 || uint64_t(numRecords) < - kMinSampleRatioForRandCursor * kRandomSamplesPerStone * _numStonesToKeep) { - _calculateStonesByScanning(txn); + kMinSampleRatioForRandCursor * kRandomSamplesPerStone * numStonesToKeep) { + _calculateStonesByScanning(opCtx); return; } @@ -314,7 +312,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStones(OperationContext* txn) double estRecordsPerStone = std::ceil(_minBytesPerStone / avgRecordSize); double estBytesPerStone = estRecordsPerStone * avgRecordSize; - _calculateStonesBySampling(txn, int64_t(estRecordsPerStone), int64_t(estBytesPerStone)); + _calculateStonesBySampling(opCtx, int64_t(estRecordsPerStone), int64_t(estBytesPerStone)); } void WiredTigerRecordStore::OplogStones::_calculateStonesByScanning(OperationContext* txn) { @@ -426,7 +424,7 @@ void WiredTigerRecordStore::OplogStones::_calculateStonesBySampling(OperationCon } void WiredTigerRecordStore::OplogStones::_pokeReclaimThreadIfNeeded() { - if (hasExcessStones()) { + if (hasExcessStones_inlock()) { _oplogReclaimCv.notify_one(); } } @@ -610,6 +608,18 @@ private: const RecordId _readUntilForOplog; }; +void WiredTigerRecordStore::OplogStones::adjust(int64_t maxSize) { + stdx::lock_guard<stdx::mutex> lk(_mutex); + const unsigned long long kMinStonesToKeep = 10ULL; + const unsigned long long kMaxStonesToKeep = 100ULL; + + unsigned long long numStones = maxSize / BSONObjMaxInternalSize; + size_t numStonesToKeep = std::min(kMaxStonesToKeep, std::max(kMinStonesToKeep, numStones)); + _minBytesPerStone = maxSize / numStonesToKeep; + invariant(_minBytesPerStone > 0); + _pokeReclaimThreadIfNeeded(); +} + StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj options) { StringBuilder ss; BSONForEach(elem, options) { @@ -1940,4 +1950,15 @@ void WiredTigerRecordStore::temp_cappedTruncateAfter(OperationContext* txn, } } +Status WiredTigerRecordStore::updateCappedSize(OperationContext* opCtx, long long cappedSize) { + if (_cappedMaxSize == cappedSize) { + return Status::OK(); + } + _cappedMaxSize = cappedSize; + if (_oplogStones) { + _oplogStones->adjust(cappedSize); + } + return Status::OK(); +} + } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index e680b046686..f1c4aebf7fa 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -205,6 +205,8 @@ public: void waitForAllEarlierOplogWritesToBeVisible(OperationContext* txn) const override; + Status updateCappedSize(OperationContext* opCtx, long long cappedSize) final; + bool isOplog() const { return _isOplog; } @@ -294,7 +296,7 @@ private: const bool _isEphemeral; // True if the namespace of this record store starts with "local.oplog.", and false otherwise. const bool _isOplog; - const int64_t _cappedMaxSize; + int64_t _cappedMaxSize; const int64_t _cappedMaxSizeSlack; // when to start applying backpressure const int64_t _cappedMaxDocs; RecordId _cappedFirstRecord; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h index daa8998af2a..ac00b9ba584 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h @@ -57,8 +57,14 @@ public: void kill(); - bool hasExcessStones() const { - return _stones.size() > _numStonesToKeep; + bool hasExcessStones_inlock() const { + int64_t total_bytes = 0; + for (std::deque<OplogStones::Stone>::const_iterator it = _stones.begin(); + it != _stones.end(); + ++it) { + total_bytes += it->bytes; + } + return total_bytes > _rs->cappedMaxSize(); } void awaitHasExcessStonesOrDead(); @@ -81,6 +87,9 @@ public: int64_t bytesRemoved, RecordId firstRemovedId); + // Resize oplog size + void adjust(int64_t maxSize); + // The start point of where to truncate next. Used by the background reclaim thread to // efficiently truncate records with WiredTiger by skipping over tombstones, etc. RecordId firstRecord; @@ -104,15 +113,13 @@ public: void setMinBytesPerStone(int64_t size); - void setNumStonesToKeep(size_t numStones); - private: class InsertChange; class TruncateChange; - void _calculateStones(OperationContext* txn); - void _calculateStonesByScanning(OperationContext* txn); - void _calculateStonesBySampling(OperationContext* txn, + void _calculateStones(OperationContext* opCtx, size_t size); + void _calculateStonesByScanning(OperationContext* opCtx); + void _calculateStonesBySampling(OperationContext* opCtx, int64_t estRecordsPerStone, int64_t estBytesPerStone); @@ -129,12 +136,8 @@ private: // database, and false otherwise. bool _isDead = false; - // Maximum number of stones to keep in the deque before the background reclaim thread should - // truncate the oldest ones. Does not include the stone currently being filled. This value - // should not be changed after initialization. - size_t _numStonesToKeep; // Minimum number of bytes the stone being filled should contain before it gets added to the - // deque of oplog stones. This value should not be changed after initialization. + // deque of oplog stones. int64_t _minBytesPerStone; AtomicInt64 _currentRecords; // Number of records in the stone being filled. diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index a720028bc9c..0232eee3cf7 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -1381,7 +1381,7 @@ TEST(WiredTigerRecordStoreTest, OplogStones_CappedTruncateAfter) { } } -// Verify that oplog stones are reclaimed when the number of stones to keep is exceeded. +// Verify that oplog stones are reclaimed when cappedMaxSize is exceeded. TEST(WiredTigerRecordStoreTest, OplogStones_ReclaimStones) { WiredTigerHarnessHelper harnessHelper; @@ -1392,8 +1392,12 @@ TEST(WiredTigerRecordStoreTest, OplogStones_ReclaimStones) { WiredTigerRecordStore* wtrs = static_cast<WiredTigerRecordStore*>(rs.get()); WiredTigerRecordStore::OplogStones* oplogStones = wtrs->oplogStones(); + { + ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); + ASSERT_OK(wtrs->updateCappedSize(opCtx.get(), 230U)); + } + oplogStones->setMinBytesPerStone(100); - oplogStones->setNumStonesToKeep(2U); { ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); @@ -1409,7 +1413,7 @@ TEST(WiredTigerRecordStoreTest, OplogStones_ReclaimStones) { ASSERT_EQ(0, oplogStones->currentBytes()); } - // Truncate a stone when number of stones to keep is exceeded. + // Truncate a stone when cappedMaxSize is exceeded. { ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); @@ -1442,71 +1446,27 @@ TEST(WiredTigerRecordStoreTest, OplogStones_ReclaimStones) { wtrs->reclaimOplog(opCtx.get()); - ASSERT_EQ(3, rs->numRecords(opCtx.get())); - ASSERT_EQ(320, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogStones->numStones()); + ASSERT_EQ(2, rs->numRecords(opCtx.get())); + ASSERT_EQ(190, rs->dataSize(opCtx.get())); + ASSERT_EQ(1U, oplogStones->numStones()); ASSERT_EQ(1, oplogStones->currentRecords()); ASSERT_EQ(50, oplogStones->currentBytes()); } - // No-op if the number of oplog stones is less than or equal to the number of stones to keep. + // No-op if dataSize <= cappedMaxSize. { ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); wtrs->reclaimOplog(opCtx.get()); - ASSERT_EQ(3, rs->numRecords(opCtx.get())); - ASSERT_EQ(320, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogStones->numStones()); + ASSERT_EQ(2, rs->numRecords(opCtx.get())); + ASSERT_EQ(190, rs->dataSize(opCtx.get())); + ASSERT_EQ(1U, oplogStones->numStones()); ASSERT_EQ(1, oplogStones->currentRecords()); ASSERT_EQ(50, oplogStones->currentBytes()); } } -// Verify that oplog stones are not reclaimed even if the size of the record store exceeds -// 'cappedMaxSize'. -TEST(WiredTigerRecordStoreTest, OplogStones_ExceedCappedMaxSize) { - WiredTigerHarnessHelper harnessHelper; - - const int64_t cappedMaxSize = 256; - unique_ptr<RecordStore> rs( - harnessHelper.newCappedRecordStore("local.oplog.stones", cappedMaxSize, -1)); - - WiredTigerRecordStore* wtrs = static_cast<WiredTigerRecordStore*>(rs.get()); - WiredTigerRecordStore::OplogStones* oplogStones = wtrs->oplogStones(); - - oplogStones->setMinBytesPerStone(100); - oplogStones->setNumStonesToKeep(10U); - - { - ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); - - ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 100), RecordId(1, 1)); - ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 110), RecordId(1, 2)); - ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 3), 120), RecordId(1, 3)); - - ASSERT_EQ(3, rs->numRecords(opCtx.get())); - ASSERT_EQ(330, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogStones->numStones()); - ASSERT_EQ(0, oplogStones->currentRecords()); - ASSERT_EQ(0, oplogStones->currentBytes()); - } - - // Shouldn't truncate a stone when the number of oplog stones is less than the number of stones - // to keep, even though the size of the record store exceeds 'cappedMaxSize'. - { - ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); - - wtrs->reclaimOplog(opCtx.get()); - - ASSERT_EQ(3, rs->numRecords(opCtx.get())); - ASSERT_EQ(330, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogStones->numStones()); - ASSERT_EQ(0, oplogStones->currentRecords()); - ASSERT_EQ(0, oplogStones->currentBytes()); - } -} - // Verify that an oplog stone isn't created if it would cause the logical representation of the // records to not be in increasing order. TEST(WiredTigerRecordStoreTest, OplogStones_AscendingOrder) { |