diff options
-rw-r--r-- | src/mongo/db/repl/initial_syncer_test.cpp | 64 |
1 files changed, 63 insertions, 1 deletions
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp index cb1dccafaa7..a50879bc432 100644 --- a/src/mongo/db/repl/initial_syncer_test.cpp +++ b/src/mongo/db/repl/initial_syncer_test.cpp @@ -61,6 +61,7 @@ #include "mongo/util/scopeguard.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/death_test.h" #include "mongo/unittest/unittest.h" namespace mongo { @@ -332,7 +333,7 @@ protected: options.getMyLastOptime = [this]() { return _myLastOpTime; }; options.setMyLastOptime = [this](const OpTime& opTime) { _setMyLastOptime(opTime); }; options.resetOptimes = [this]() { _setMyLastOptime(OpTime()); }; - options.getSlaveDelay = [this]() { return Seconds(0); }; + options.getSlaveDelay = []() { return Seconds(0); }; options.syncSourceSelector = this; _options = options; @@ -3588,4 +3589,65 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) { << attempt1; } +DEATH_TEST_F(InitialSyncerTest, + GetInitialSyncProgressThrowsExceptionIfClonerStatsExceedBsonLimit, + "terminate() called") { + auto initialSyncer = &getInitialSyncer(); + auto opCtx = makeOpCtx(); + + _syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 27017)); + ASSERT_OK(initialSyncer->startup(opCtx.get(), 2U)); + + const std::size_t numCollections = 200000U; + + auto net = getNet(); + int baseRollbackId = 1; + { + executor::NetworkInterfaceMock::InNetworkGuard guard(net); + + // Base rollback ID. + net->scheduleSuccessfulResponse(makeRollbackCheckerResponse(baseRollbackId)); + net->runReadyNetworkOperations(); + + // Last oplog entry. + processSuccessfulLastOplogEntryFetcherResponse({makeOplogEntry(1)}); + + // Ignore oplog tailing query. + auto noi = net->getNextReadyRequest(); + auto request = noi->getRequest(); + assertRemoteCommandNameEquals("find", request); + ASSERT_TRUE(request.cmdObj.getBoolField("oplogReplay")); + net->blackHole(noi); + + // listDatabases + NamespaceString nss("a.a"); + request = net->scheduleSuccessfulResponse(makeListDatabasesResponse({nss.db().toString()})); + assertRemoteCommandNameEquals("listDatabases", request); + net->runReadyNetworkOperations(); + + // listCollections for "a" + std::vector<BSONObj> collectionInfos; + for (std::size_t i = 0; i < numCollections; ++i) { + const std::string collName = str::stream() << "coll-" << i; + collectionInfos.push_back(BSON("name" << collName << "options" << BSONObj())); + } + request = net->scheduleSuccessfulResponse( + makeCursorResponse(0LL, NamespaceString(nss.getCommandNS()), collectionInfos)); + assertRemoteCommandNameEquals("listCollections", request); + net->runReadyNetworkOperations(); + } + + // This should throw because we are unable to fit all the cloner stats into a BSON document. + ASSERT_THROWS(initialSyncer->getInitialSyncProgress(), DBException); + + // Initial sync will attempt to log stats again at shutdown in a callback, where it will + // terminate because of the unhandled exception. + ASSERT_OK(initialSyncer->shutdown()); + + // Deliver cancellation signal to callbacks. + executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations(); + + initialSyncer->join(); +} + } // namespace |