summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl
diff options
context:
space:
mode:
authorMatthew Russotto <matthew.russotto@mongodb.com>2019-12-02 18:50:44 +0000
committerevergreen <evergreen@mongodb.com>2019-12-02 18:50:44 +0000
commit4a57893567fc15eda83950bd429abadafbd8c190 (patch)
tree7ed4960f8388cd398c95daad260a8aee2c149833 /src/mongo/db/repl
parent27cf911c3f653f3506c9b34866b76444b323fa35 (diff)
downloadmongo-4a57893567fc15eda83950bd429abadafbd8c190.tar.gz
SERVER-44060 Make stats contain un-started collections and databases.
Diffstat (limited to 'src/mongo/db/repl')
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp14
-rw-r--r--src/mongo/db/repl/all_database_cloner.h1
-rw-r--r--src/mongo/db/repl/all_database_cloner_test.cpp32
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp10
-rw-r--r--src/mongo/db/repl/collection_cloner.h10
-rw-r--r--src/mongo/db/repl/database_cloner.cpp17
-rw-r--r--src/mongo/db/repl/database_cloner.h7
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp109
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp151
9 files changed, 266 insertions, 85 deletions
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index 04cf7c879d0..e5ef48a13be 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -94,8 +94,12 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::listDatabasesStage() {
void AllDatabaseCloner::postStage() {
{
stdx::lock_guard<Latch> lk(_mutex);
- _stats.databaseCount = _databases.size();
_stats.databasesCloned = 0;
+ _stats.databaseStats.reserve(_databases.size());
+ for (const auto& dbName : _databases) {
+ _stats.databaseStats.emplace_back();
+ _stats.databaseStats.back().dbname = dbName;
+ }
}
for (const auto& dbName : _databases) {
{
@@ -138,7 +142,7 @@ void AllDatabaseCloner::postStage() {
}
{
stdx::lock_guard<Latch> lk(_mutex);
- _stats.databaseStats.emplace_back(_currentDatabaseCloner->getStats());
+ _stats.databaseStats[_stats.databasesCloned] = _currentDatabaseCloner->getStats();
_currentDatabaseCloner = nullptr;
_stats.databasesCloned++;
}
@@ -149,7 +153,7 @@ AllDatabaseCloner::Stats AllDatabaseCloner::getStats() const {
stdx::lock_guard<Latch> lk(_mutex);
AllDatabaseCloner::Stats stats = _stats;
if (_currentDatabaseCloner) {
- stats.databaseStats.emplace_back(_currentDatabaseCloner->getStats());
+ stats.databaseStats[_stats.databasesCloned] = _currentDatabaseCloner->getStats();
}
return stats;
}
@@ -159,8 +163,7 @@ std::string AllDatabaseCloner::toString() const {
return str::stream() << "initial sync --"
<< " active:" << isActive(lk) << " status:" << getStatus(lk).toString()
<< " source:" << getSource()
- << " db cloners completed:" << _stats.databasesCloned
- << " db count:" << _stats.databaseCount;
+ << " db cloners completed:" << _stats.databasesCloned;
}
std::string AllDatabaseCloner::Stats::toString() const {
@@ -175,7 +178,6 @@ BSONObj AllDatabaseCloner::Stats::toBSON() const {
void AllDatabaseCloner::Stats::append(BSONObjBuilder* builder) const {
builder->appendNumber("databasesCloned", databasesCloned);
- builder->appendNumber("databaseCount", databaseCount);
for (auto&& db : databaseStats) {
BSONObjBuilder dbBuilder(builder->subobjStart(db.dbname));
db.append(&dbBuilder);
diff --git a/src/mongo/db/repl/all_database_cloner.h b/src/mongo/db/repl/all_database_cloner.h
index 02a5ebda648..ddf6896680f 100644
--- a/src/mongo/db/repl/all_database_cloner.h
+++ b/src/mongo/db/repl/all_database_cloner.h
@@ -41,7 +41,6 @@ class AllDatabaseCloner final : public BaseCloner {
public:
struct Stats {
size_t databasesCloned{0};
- size_t databaseCount{0};
std::vector<DatabaseCloner::Stats> databaseStats;
std::string toString() const;
diff --git a/src/mongo/db/repl/all_database_cloner_test.cpp b/src/mongo/db/repl/all_database_cloner_test.cpp
index 96992cb97ec..6bde3f55c19 100644
--- a/src/mongo/db/repl/all_database_cloner_test.cpp
+++ b/src/mongo/db/repl/all_database_cloner_test.cpp
@@ -424,6 +424,7 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) {
0,
fromjson("{cloner: 'DatabaseCloner', stage: 'listCollections', database: 'admin'}"));
+ _clock.advance(Minutes(1));
// Run the cloner in a separate thread.
stdx::thread clonerThread([&] {
Client::initThread("ClonerRunner");
@@ -440,8 +441,18 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) {
ASSERT_EQUALS("a", databases[2]);
auto stats = cloner->getStats();
- ASSERT_EQUALS(3, stats.databaseCount);
ASSERT_EQUALS(0, stats.databasesCloned);
+ ASSERT_EQUALS(3, stats.databaseStats.size());
+ ASSERT_EQUALS("admin", stats.databaseStats[0].dbname);
+ ASSERT_EQUALS("aab", stats.databaseStats[1].dbname);
+ ASSERT_EQUALS("a", stats.databaseStats[2].dbname);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[0].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[0].end);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[1].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[1].end);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[2].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[2].end);
+ _clock.advance(Minutes(1));
// Allow the cloner to move to the next DB.
timesEntered = dbClonerBeforeFailPoint->setMode(
@@ -457,9 +468,17 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) {
dbClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1);
stats = cloner->getStats();
- ASSERT_EQUALS(3, stats.databaseCount);
ASSERT_EQUALS(1, stats.databasesCloned);
+ ASSERT_EQUALS(3, stats.databaseStats.size());
ASSERT_EQUALS("admin", stats.databaseStats[0].dbname);
+ ASSERT_EQUALS("aab", stats.databaseStats[1].dbname);
+ ASSERT_EQUALS("a", stats.databaseStats[2].dbname);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[0].end);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[1].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[1].end);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[2].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[2].end);
+ _clock.advance(Minutes(1));
ASSERT(isAdminDbValidFnCalled);
// Allow the cloner to move to the last DB.
@@ -476,10 +495,15 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) {
dbClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1);
stats = cloner->getStats();
- ASSERT_EQUALS(3, stats.databaseCount);
ASSERT_EQUALS(2, stats.databasesCloned);
+ ASSERT_EQUALS(3, stats.databaseStats.size());
ASSERT_EQUALS("admin", stats.databaseStats[0].dbname);
ASSERT_EQUALS("aab", stats.databaseStats[1].dbname);
+ ASSERT_EQUALS("a", stats.databaseStats[2].dbname);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[1].end);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[2].start);
+ ASSERT_EQUALS(Date_t(), stats.databaseStats[2].end);
+ _clock.advance(Minutes(1));
// Allow the cloner to finish
dbClonerBeforeFailPoint->setMode(FailPoint::off, 0);
@@ -487,11 +511,11 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) {
clonerThread.join();
stats = cloner->getStats();
- ASSERT_EQUALS(3, stats.databaseCount);
ASSERT_EQUALS(3, stats.databasesCloned);
ASSERT_EQUALS("admin", stats.databaseStats[0].dbname);
ASSERT_EQUALS("aab", stats.databaseStats[1].dbname);
ASSERT_EQUALS("a", stats.databaseStats[2].dbname);
+ ASSERT_EQUALS(_clock.now(), stats.databaseStats[2].end);
}
TEST_F(AllDatabaseClonerTest, FailsOnListCollectionsOnOnlyDatabase) {
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 383cab78daf..0109d36cdbf 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -100,6 +100,16 @@ BaseCloner::ClonerStages CollectionCloner::getStages() {
}
+void CollectionCloner::preStage() {
+ stdx::lock_guard<Latch> lk(_mutex);
+ _stats.start = getClock()->now();
+}
+
+void CollectionCloner::postStage() {
+ stdx::lock_guard<Latch> lk(_mutex);
+ _stats.end = getClock()->now();
+}
+
// Collection cloner stages exit normally if the collection is not found.
BaseCloner::AfterStageBehavior CollectionCloner::CollectionClonerStage::run() {
try {
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index dd994edbec8..25f84da9a81 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -125,6 +125,16 @@ private:
}
/**
+ * The preStage sets the start time in _stats.
+ */
+ void preStage() final;
+
+ /**
+ * The postStage sets the end time in _stats.
+ */
+ void postStage() final;
+
+ /**
* Stage function that counts the number of documents in the collection on the source in order
* to generate progress information.
*/
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 80b7a2de249..c4a51ddc1e7 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -64,6 +64,11 @@ CollectionOptions DatabaseCloner::parseCollectionOptions(const BSONObj& obj) {
return uassertStatusOK(CollectionOptions::parse(obj, CollectionOptions::parseForStorage));
}
+void DatabaseCloner::preStage() {
+ stdx::lock_guard<Latch> lk(_mutex);
+ _stats.start = getClock()->now();
+}
+
BaseCloner::AfterStageBehavior DatabaseCloner::listCollectionsStage() {
BSONObj res;
auto collectionInfos =
@@ -112,7 +117,11 @@ void DatabaseCloner::postStage() {
{
stdx::lock_guard<Latch> lk(_mutex);
_stats.collections = _collections.size();
- _stats.start = getClock()->now();
+ _stats.collectionStats.reserve(_collections.size());
+ for (const auto& coll : _collections) {
+ _stats.collectionStats.emplace_back();
+ _stats.collectionStats.back().ns = coll.first.ns();
+ }
}
for (const auto& coll : _collections) {
auto& sourceNss = coll.first;
@@ -143,7 +152,7 @@ void DatabaseCloner::postStage() {
}
{
stdx::lock_guard<Latch> lk(_mutex);
- _stats.collectionStats.emplace_back(_currentCollectionCloner->getStats());
+ _stats.collectionStats[_stats.clonedCollections] = _currentCollectionCloner->getStats();
_currentCollectionCloner = nullptr;
// Abort the database cloner if the collection clone failed.
if (!collStatus.isOK())
@@ -151,13 +160,15 @@ void DatabaseCloner::postStage() {
_stats.clonedCollections++;
}
}
+ stdx::lock_guard<Latch> lk(_mutex);
+ _stats.end = getClock()->now();
}
DatabaseCloner::Stats DatabaseCloner::getStats() const {
stdx::lock_guard<Latch> lk(_mutex);
DatabaseCloner::Stats stats = _stats;
if (_currentCollectionCloner) {
- stats.collectionStats.emplace_back(_currentCollectionCloner->getStats());
+ stats.collectionStats[_stats.clonedCollections] = _currentCollectionCloner->getStats();
}
return stats;
}
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index 0b3b6f70f38..c31f4b97349 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -82,8 +82,13 @@ private:
AfterStageBehavior listCollectionsStage();
/**
+ * The preStage sets the start time in _stats.
+ */
+ void preStage() final;
+
+ /**
* The postStage creates and runs the individual CollectionCloners on each database found on
- * the sync source.
+ * the sync source, and sets the end time in _stats when done.
*/
void postStage() final;
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index b32bc4eb522..a730ea86e5f 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/clock_source_mock.h"
#include "mongo/util/concurrency/thread_pool.h"
namespace mongo {
@@ -77,7 +78,8 @@ protected:
_source,
_mockClient.get(),
&_storageInterface,
- _dbWorkThreadPool.get());
+ _dbWorkThreadPool.get(),
+ &_clock);
}
BSONObj createListCollectionsResponse(const std::vector<BSONObj>& collections) {
@@ -106,6 +108,7 @@ protected:
std::map<NamespaceString, CollectionCloneInfo> _collections;
static std::string _dbName;
+ ClockSourceMock _clock;
};
/* static */
@@ -388,5 +391,109 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
ASSERT(stats.commitCalled);
}
+TEST_F(DatabaseClonerTest, DatabaseAndCollectionStats) {
+ auto uuid1 = UUID::gen();
+ auto uuid2 = UUID::gen();
+ const BSONObj idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
+ << "_id_");
+ const BSONObj extraIndexSpec = BSON("v" << 1 << "key" << BSON("x" << 1) << "name"
+ << "_extra_");
+ const std::vector<BSONObj> sourceInfos = {BSON("name"
+ << "a"
+ << "type"
+ << "collection"
+ << "options" << BSONObj() << "info"
+ << BSON("readOnly" << false << "uuid" << uuid1)),
+ BSON(
+ "name"
+ << "b"
+ << "type"
+ << "collection"
+ << "options" << BSONObj() << "info"
+ << BSON("readOnly" << false << "uuid" << uuid2))};
+ _mockServer->setCommandReply("listCollections",
+ createListCollectionsResponse({sourceInfos[0], sourceInfos[1]}));
+ _mockServer->setCommandReply("count", {createCountResponse(0), createCountResponse(0)});
+ _mockServer->setCommandReply(
+ "listIndexes",
+ {createCursorResponse(_dbName + ".a", BSON_ARRAY(idIndexSpec << extraIndexSpec)),
+ createCursorResponse(_dbName + ".b", BSON_ARRAY(idIndexSpec))});
+ auto cloner = makeDatabaseCloner();
+
+ auto collClonerBeforeFailPoint = globalFailPointRegistry().find("hangBeforeClonerStage");
+ auto collClonerAfterFailPoint = globalFailPointRegistry().find("hangAfterClonerStage");
+ auto timesEntered = collClonerBeforeFailPoint->setMode(
+ FailPoint::alwaysOn,
+ 0,
+ fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName + ".a'}"));
+ collClonerAfterFailPoint->setMode(
+ FailPoint::alwaysOn,
+ 0,
+ fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName + ".a'}"));
+
+ // Run the cloner in a separate thread.
+ stdx::thread clonerThread([&] {
+ Client::initThread("ClonerRunner");
+ ASSERT_OK(cloner->run());
+ });
+ // Wait for the failpoint to be reached
+ collClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1);
+
+ // Collection stats should be set up with namespace.
+ auto stats = cloner->getStats();
+ ASSERT_EQ(_dbName, stats.dbname);
+ ASSERT_EQ(_clock.now(), stats.start);
+ ASSERT_EQ(2, stats.collections);
+ ASSERT_EQ(0, stats.clonedCollections);
+ ASSERT_EQ(2, stats.collectionStats.size());
+ ASSERT_EQ(_dbName + ".a", stats.collectionStats[0].ns);
+ ASSERT_EQ(_dbName + ".b", stats.collectionStats[1].ns);
+ ASSERT_EQ(_clock.now(), stats.collectionStats[0].start);
+ ASSERT_EQ(Date_t(), stats.collectionStats[0].end);
+ ASSERT_EQ(Date_t(), stats.collectionStats[1].start);
+ ASSERT_EQ(0, stats.collectionStats[0].indexes);
+ ASSERT_EQ(0, stats.collectionStats[1].indexes);
+ _clock.advance(Minutes(1));
+
+ // Move to the next collection
+ timesEntered = collClonerBeforeFailPoint->setMode(
+ FailPoint::alwaysOn,
+ 0,
+ fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName + ".b'}"));
+ collClonerAfterFailPoint->setMode(FailPoint::off);
+
+ // Wait for the failpoint to be reached
+ collClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1);
+
+ stats = cloner->getStats();
+ ASSERT_EQ(2, stats.collections);
+ ASSERT_EQ(1, stats.clonedCollections);
+ ASSERT_EQ(2, stats.collectionStats.size());
+ ASSERT_EQ(_dbName + ".a", stats.collectionStats[0].ns);
+ ASSERT_EQ(_dbName + ".b", stats.collectionStats[1].ns);
+ ASSERT_EQ(2, stats.collectionStats[0].indexes);
+ ASSERT_EQ(0, stats.collectionStats[1].indexes);
+ ASSERT_EQ(_clock.now(), stats.collectionStats[0].end);
+ ASSERT_EQ(_clock.now(), stats.collectionStats[1].start);
+ ASSERT_EQ(Date_t(), stats.collectionStats[1].end);
+ _clock.advance(Minutes(1));
+
+ // Finish
+ collClonerBeforeFailPoint->setMode(FailPoint::off, 0);
+ clonerThread.join();
+
+ stats = cloner->getStats();
+ ASSERT_EQ(_dbName, stats.dbname);
+ ASSERT_EQ(_clock.now(), stats.end);
+ ASSERT_EQ(2, stats.collections);
+ ASSERT_EQ(2, stats.clonedCollections);
+ ASSERT_EQ(2, stats.collectionStats.size());
+ ASSERT_EQ(_dbName + ".a", stats.collectionStats[0].ns);
+ ASSERT_EQ(_dbName + ".b", stats.collectionStats[1].ns);
+ ASSERT_EQ(2, stats.collectionStats[0].indexes);
+ ASSERT_EQ(1, stats.collectionStats[1].indexes);
+ ASSERT_EQ(_clock.now(), stats.collectionStats[1].end);
+}
+
} // namespace repl
} // namespace mongo
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 69e7f3df5e4..924b3fa57b2 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -4188,8 +4188,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
ASSERT_EQUALS(progress["initialSyncOplogStart"].timestamp(), Timestamp(1, 1)) << progress;
ASSERT_BSONOBJ_EQ(progress.getObjectField("initialSyncAttempts"), BSONObj());
ASSERT_EQUALS(progress.getIntField("appliedOps"), 0) << progress;
- ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"),
- BSON("databasesCloned" << 0 << "databaseCount" << 0));
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
// Inject the listDatabases failure.
_mockServer->setCommandReply(
@@ -4246,8 +4245,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) {
ASSERT_EQUALS(progress["initialSyncStart"].type(), Date) << progress;
ASSERT_EQUALS(progress["initialSyncOplogStart"].timestamp(), Timestamp(1, 1)) << progress;
ASSERT_EQUALS(progress.getIntField("appliedOps"), 0) << progress;
- ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"),
- BSON("databasesCloned" << 0 << "databaseCount" << 0));
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
BSONObj attempts = progress["initialSyncAttempts"].Obj();
ASSERT_EQUALS(attempts.nFields(), 1) << attempts;
@@ -4444,73 +4442,86 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressOmitsClonerStatsIfClonerStatsExc
_syncSourceSelector->setChooseNewSyncSourceResult_forTest(HostAndPort("localhost", 27017));
ASSERT_OK(initialSyncer->startup(opCtx.get(), 2U));
- const std::size_t numCollections = 20000U;
- const std::size_t numDatabases = 10;
+ const std::size_t numCollections = 200000U;
auto net = getNet();
int baseRollbackId = 1;
{
+ auto collectionClonerFailPoint = globalFailPointRegistry().find("hangAfterClonerStage");
+ auto timesEntered = collectionClonerFailPoint->setMode(FailPoint::alwaysOn,
+ 0,
+ BSON("cloner"
+ << "CollectionCloner"
+ << "stage"
+ << "count"));
+ ON_BLOCK_EXIT(
+ [collectionClonerFailPoint]() { collectionClonerFailPoint->setMode(FailPoint::off); });
- executor::NetworkInterfaceMock::InNetworkGuard guard(net);
-
- // Base rollback ID.
- net->scheduleSuccessfulResponse(makeRollbackCheckerResponse(baseRollbackId));
+ {
- // Oplog entry associated with the defaultBeginFetchingTimestamp.
- processSuccessfulLastOplogEntryFetcherResponse({makeOplogEntryObj(1)});
+ executor::NetworkInterfaceMock::InNetworkGuard guard(net);
- // Send an empty optime as the response to the beginFetchingOptime find request, which will
- // cause the beginFetchingTimestamp to be set to the defaultBeginFetchingTimestamp.
- auto request = net->scheduleSuccessfulResponse(
- makeCursorResponse(0LL, NamespaceString::kSessionTransactionsTableNamespace, {}, true));
- assertRemoteCommandNameEquals("find", request);
- net->runReadyNetworkOperations();
+ // Base rollback ID.
+ net->scheduleSuccessfulResponse(makeRollbackCheckerResponse(baseRollbackId));
- // Oplog entry associated with the beginApplyingTimestamp.
- processSuccessfulLastOplogEntryFetcherResponse({makeOplogEntryObj(1)});
+ // Oplog entry associated with the defaultBeginFetchingTimestamp.
+ processSuccessfulLastOplogEntryFetcherResponse({makeOplogEntryObj(1)});
- // Set up the cloner data. This must be done before providing the FCV to avoid races.
- // listDatabases
- std::vector<std::string> dbNames;
- for (std::size_t i = 0; i < numDatabases; i++) {
- char name[2] = {static_cast<char>('a' + i), 0};
- dbNames.push_back(name);
- }
- NamespaceString nss("a.a");
- _mockServer->setCommandReply("listDatabases", makeListDatabasesResponse(dbNames));
-
- // listCollections
- std::vector<BSONObj> collectionInfos;
- for (std::size_t i = 0; i < numCollections; ++i) {
- CollectionOptions options;
- const std::string collName = str::stream() << "coll-" << i;
- options.uuid = UUID::gen();
- collectionInfos.push_back(BSON("name" << collName << "type"
- << "collection"
- << "options" << options.toBSON() << "info"
- << BSON("uuid" << *options.uuid)));
- }
- _mockServer->setCommandReply(
- "listCollections", makeCursorResponse(0LL, nss.getCommandNS(), collectionInfos).data);
+ // Send an empty optime as the response to the beginFetchingOptime find request, which
+ // will cause the beginFetchingTimestamp to be set to the defaultBeginFetchingTimestamp.
+ auto request = net->scheduleSuccessfulResponse(makeCursorResponse(
+ 0LL, NamespaceString::kSessionTransactionsTableNamespace, {}, true));
+ assertRemoteCommandNameEquals("find", request);
+ net->runReadyNetworkOperations();
- // All document counts are 0.
- _mockServer->setCommandReply("count", BSON("n" << 0 << "ok" << 1));
+ // Oplog entry associated with the beginApplyingTimestamp.
+ processSuccessfulLastOplogEntryFetcherResponse({makeOplogEntryObj(1)});
- // listIndexes for all collections.
- _mockServer->setCommandReply(
- "listIndexes",
- makeCursorResponse(
- 0LL,
- NamespaceString(nss.getCommandNS()),
- {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
- << "_id_"
- << "ns" << nss.ns())})
- .data);
+ // Set up the cloner data. This must be done before providing the FCV to avoid races.
+ // listDatabases
+ NamespaceString nss("a.a");
+ _mockServer->setCommandReply("listDatabases",
+ makeListDatabasesResponse({nss.db().toString()}));
+
+ // listCollections for "a"
+ // listCollections data has to be broken up or it will trigger BSONObjTooLarge
+ // spuriously. We want it to be triggered for the stats, not the listCollections.
+ std::vector<BSONObj> collectionInfos[4];
+ for (std::size_t i = 0; i < numCollections; ++i) {
+ CollectionOptions options;
+ const std::string collName = str::stream() << "coll-" << i;
+ options.uuid = UUID::gen();
+ collectionInfos[(i * 4) / numCollections].push_back(
+ BSON("name" << collName << "type"
+ << "collection"
+ << "options" << options.toBSON() << "info"
+ << BSON("uuid" << *options.uuid)));
+ }
+ const bool notFirstBatch = false;
+ _mockServer->setCommandReply(
+ "listCollections",
+ {makeCursorResponse(1LL, nss.getCommandNS(), collectionInfos[0]).data,
+ makeCursorResponse(1LL, nss.getCommandNS(), collectionInfos[1], notFirstBatch)
+ .data,
+ makeCursorResponse(1LL, nss.getCommandNS(), collectionInfos[2], notFirstBatch)
+ .data,
+ makeCursorResponse(0LL, nss.getCommandNS(), collectionInfos[3], notFirstBatch)
+ .data});
+
+ // All document counts are 0.
+ _mockServer->setCommandReply("count", BSON("n" << 0 << "ok" << 1));
+
+ // listIndexes for all collections.
+ _mockServer->setCommandReply(
+ "listIndexes",
+ makeCursorResponse(
+ 0LL,
+ NamespaceString(nss.getCommandNS()),
+ {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name"
+ << "_id_"
+ << "ns" << nss.ns())})
+ .data);
- {
- // Ensure second lastOplogFetch doesn't happen until we're ready for it.
- FailPointEnableBlock clonerFailpoint("hangAfterClonerStage",
- kListDatabasesFailPointData);
// Feature Compatibility Version.
processSuccessfulFCVFetcherResponseLastStable();
@@ -4521,18 +4532,20 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressOmitsClonerStatsIfClonerStatsExc
ASSERT_TRUE(request.cmdObj.getBoolField("oplogReplay"));
net->runReadyNetworkOperations();
}
- }
- getInitialSyncer().waitForCloner_forTest();
- // This returns a valid document because we omit the cloner stats when they do not fit in a
- // BSON document.
- auto progress = initialSyncer->getInitialSyncProgress();
- ASSERT_EQUALS(progress["initialSyncStart"].type(), Date) << progress;
- ASSERT_FALSE(progress.hasField("databases")) << progress;
+ // Wait to reach the CollectionCloner, when stats should be populated;
+ collectionClonerFailPoint->waitForTimesEntered(timesEntered + 1);
- // Initial sync will attempt to log stats again at shutdown in a callback, where it should not
- // terminate because we now return a valid stats document.
- ASSERT_OK(initialSyncer->shutdown());
+ // This returns a valid document because we omit the cloner stats when they do not fit in a
+ // BSON document.
+ auto progress = initialSyncer->getInitialSyncProgress();
+ ASSERT_EQUALS(progress["initialSyncStart"].type(), Date) << progress;
+ ASSERT_FALSE(progress.hasField("databases")) << progress;
+
+ // Initial sync will attempt to log stats again at shutdown in a callback, where it should
+ // not terminate because we now return a valid stats document.
+ ASSERT_OK(initialSyncer->shutdown());
+ }
// Deliver cancellation signal to callbacks.
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();