summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-07-08 11:08:09 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-07-09 14:56:45 -0400
commit9ebff75f250b2a90093c3c22f22cd0780a0b3a18 (patch)
tree5e7042e4e1313ca8987128c141267842df79aca1
parent65419d7fd381e8fb62beea5c165ef4caa1c5f8cf (diff)
downloadmongo-9ebff75f250b2a90093c3c22f22cd0780a0b3a18.tar.gz
SERVER-19111 Move change log checking methods to the common fixture
This change moves the change log record contents checking methods to the common catalog manager replica set test fixture.
-rw-r--r--src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp3
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp2
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp164
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp51
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp104
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.cpp64
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h18
7 files changed, 186 insertions, 220 deletions
diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
index 0017a2a5213..d16c9efed68 100644
--- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
+++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp
@@ -570,8 +570,7 @@ StatusWith<DatabaseType> CatalogManagerLegacy::getDatabase(const std::string& db
BSONObj dbObj = conn->findOne(DatabaseType::ConfigNS, BSON(DatabaseType::name(dbName)));
if (dbObj.isEmpty()) {
conn.done();
- return Status(ErrorCodes::DatabaseNotFound,
- stream() << "database " << dbName << " not found");
+ return {ErrorCodes::DatabaseNotFound, stream() << "database " << dbName << " not found"};
}
conn.done();
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index a6cae6a1703..1d48acfeee1 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -279,7 +279,7 @@ StatusWith<DatabaseType> CatalogManagerReplicaSet::getDatabase(const std::string
const auto& docs = findStatus.getValue();
if (docs.empty()) {
- return {ErrorCodes::NamespaceNotFound, stream() << "database " << dbName << " not found"};
+ return {ErrorCodes::DatabaseNotFound, stream() << "database " << dbName << " not found"};
}
invariant(docs.size() == 1);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
index d9e9122b59a..e32a34b7964 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_log_change_test.cpp
@@ -38,7 +38,6 @@
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
-#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
@@ -58,160 +57,79 @@ using unittest::assertGet;
static const stdx::chrono::seconds kFutureTimeout{5};
-class LogChangeTest : public CatalogManagerReplSetTestFixture {
-public:
- void expectChangeLogCreate(const BSONObj& response) {
- onCommand([&response](const RemoteCommandRequest& request) {
- ASSERT_EQUALS("config", request.dbname);
- BSONObj expectedCreateCmd = BSON("create" << ChangeLogType::ConfigNS << "capped" << true
- << "size" << 1024 * 1024 * 10);
- ASSERT_EQUALS(expectedCreateCmd, request.cmdObj);
-
- return response;
- });
- }
-
- void expectChangeLogInsert(const ChangeLogType& expectedChangeLog) {
- onCommand([&expectedChangeLog](const RemoteCommandRequest& request) {
- ASSERT_EQUALS("config", request.dbname);
-
- BatchedInsertRequest actualBatchedInsert;
- std::string errmsg;
- ASSERT_TRUE(actualBatchedInsert.parseBSON(request.dbname, request.cmdObj, &errmsg));
- ASSERT_EQUALS(ChangeLogType::ConfigNS, actualBatchedInsert.getNS().ns());
- auto inserts = actualBatchedInsert.getDocuments();
- ASSERT_EQUALS(1U, inserts.size());
- BSONObj insert = inserts.front();
-
- auto changeLogResult = ChangeLogType::fromBSON(insert);
- ASSERT_OK(changeLogResult.getStatus());
- ChangeLogType& actualChangeLog = changeLogResult.getValue();
-
- ASSERT_EQUALS(expectedChangeLog.getClientAddr(), actualChangeLog.getClientAddr());
- ASSERT_EQUALS(expectedChangeLog.getDetails(), actualChangeLog.getDetails());
- ASSERT_EQUALS(expectedChangeLog.getNS(), actualChangeLog.getNS());
- ASSERT_EQUALS(expectedChangeLog.getServer(), actualChangeLog.getServer());
- ASSERT_EQUALS(expectedChangeLog.getTime(), actualChangeLog.getTime());
- ASSERT_EQUALS(expectedChangeLog.getWhat(), actualChangeLog.getWhat());
-
- // Handle changeId specially because there's no way to know what OID was generated
- std::string changeId = actualChangeLog.getChangeId();
- size_t firstDash = changeId.find("-");
- size_t lastDash = changeId.rfind("-");
- std::string serverPiece = changeId.substr(0, firstDash);
- std::string timePiece = changeId.substr(firstDash + 1, lastDash - firstDash - 1);
- std::string oidPiece = changeId.substr(lastDash + 1);
-
- ASSERT_EQUALS(serverPiece, expectedChangeLog.getServer());
- ASSERT_EQUALS(timePiece, expectedChangeLog.getTime().toString());
-
- OID generatedOID;
- // Just make sure this doesn't throws and assume the OID is valid
- generatedOID.init(oidPiece);
-
- BatchedCommandResponse response;
- response.setOk(true);
-
- return response.toBSON();
- });
- }
-};
-
-TEST_F(LogChangeTest, LogChangeNoRetryAfterSuccessfulCreate) {
+using LogChangeTest = CatalogManagerReplSetTestFixture;
+
+TEST_F(LogChangeTest, NoRetryAfterSuccessfulCreate) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChangeLogType expectedChangeLog;
- expectedChangeLog.setServer(network()->getHostName());
- expectedChangeLog.setClientAddr("client");
- expectedChangeLog.setTime(network()->now());
- expectedChangeLog.setNS("foo.bar");
- expectedChangeLog.setWhat("moved a chunk");
- expectedChangeLog.setDetails(BSON("min" << 3 << "max" << 4));
-
- auto future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ auto future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4));
});
expectChangeLogCreate(BSON("ok" << 1));
- expectChangeLogInsert(expectedChangeLog);
+ expectChangeLogInsert(
+ "client", network()->now(), "moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4));
// Now wait for the logChange call to return
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5));
});
- expectChangeLogInsert(expectedChangeLog);
+ expectChangeLogInsert("client",
+ network()->now(),
+ "moved a second chunk",
+ "foo.bar",
+ BSON("min" << 4 << "max" << 5));
// Now wait for the logChange call to return
future.timed_get(kFutureTimeout);
}
-TEST_F(LogChangeTest, LogActionNoRetryCreateIfAlreadyExists) {
+TEST_F(LogChangeTest, NoRetryCreateIfAlreadyExists) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChangeLogType expectedChangeLog;
- expectedChangeLog.setServer(network()->getHostName());
- expectedChangeLog.setClientAddr("client");
- expectedChangeLog.setTime(network()->now());
- expectedChangeLog.setNS("foo.bar");
- expectedChangeLog.setWhat("moved a chunk");
- expectedChangeLog.setDetails(BSON("min" << 3 << "max" << 4));
-
- auto future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ auto future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4));
});
BSONObjBuilder createResponseBuilder;
Command::appendCommandStatus(createResponseBuilder,
Status(ErrorCodes::NamespaceExists, "coll already exists"));
expectChangeLogCreate(createResponseBuilder.obj());
- expectChangeLogInsert(expectedChangeLog);
+ expectChangeLogInsert(
+ "client", network()->now(), "moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4));
// Now wait for the logAction call to return
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5));
});
- expectChangeLogInsert(expectedChangeLog);
+ expectChangeLogInsert("client",
+ network()->now(),
+ "moved a second chunk",
+ "foo.bar",
+ BSON("min" << 4 << "max" << 5));
// Now wait for the logChange call to return
future.timed_get(kFutureTimeout);
}
-TEST_F(LogChangeTest, LogActionCreateFailure) {
+TEST_F(LogChangeTest, CreateFailure) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
- ChangeLogType expectedChangeLog;
- expectedChangeLog.setServer(network()->getHostName());
- expectedChangeLog.setClientAddr("client");
- expectedChangeLog.setTime(network()->now());
- expectedChangeLog.setNS("foo.bar");
- expectedChangeLog.setWhat("moved a chunk");
- expectedChangeLog.setDetails(BSON("min" << 3 << "max" << 4));
-
- auto future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ auto future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4));
});
BSONObjBuilder createResponseBuilder;
@@ -223,15 +141,17 @@ TEST_F(LogChangeTest, LogActionCreateFailure) {
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we *do* attempt to create the collection
- future = launchAsync([this, &expectedChangeLog] {
- catalogManager()->logChange(expectedChangeLog.getClientAddr(),
- expectedChangeLog.getWhat(),
- expectedChangeLog.getNS(),
- expectedChangeLog.getDetails());
+ future = launchAsync([this] {
+ catalogManager()->logChange(
+ "client", "moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5));
});
expectChangeLogCreate(BSON("ok" << 1));
- expectChangeLogInsert(expectedChangeLog);
+ expectChangeLogInsert("client",
+ network()->now(),
+ "moved a second chunk",
+ "foo.bar",
+ BSON("min" << 4 << "max" << 5));
// Now wait for the logChange call to return
future.timed_get(kFutureTimeout);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
index 8b81f667055..cc1d5783480 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
@@ -39,7 +39,6 @@
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
-#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
@@ -85,45 +84,6 @@ public:
});
}
- void expectLogChange(const string& clientAddress,
- const string& what,
- const string& ns,
- const BSONObj& detail) {
- onCommand([&](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
- BSONObj expectedCreateCmd = BSON("create" << ChangeLogType::ConfigNS << "capped" << true
- << "size" << 1024 * 1024 * 10);
- ASSERT_EQUALS(expectedCreateCmd, request.cmdObj);
-
- return BSON("ok" << 1);
- });
-
- onCommand([&](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
-
- BatchedInsertRequest actualBatchedInsert;
- std::string errmsg;
- ASSERT_TRUE(actualBatchedInsert.parseBSON(request.dbname, request.cmdObj, &errmsg));
- ASSERT_EQUALS(ChangeLogType::ConfigNS, actualBatchedInsert.getNS().ns());
- auto inserts = actualBatchedInsert.getDocuments();
- ASSERT_EQUALS(1U, inserts.size());
- BSONObj insert = inserts.front();
-
- auto actualChangeLog = assertGet(ChangeLogType::fromBSON(insert));
- ASSERT_EQUALS(clientAddress, actualChangeLog.getClientAddr());
- ASSERT_EQUALS(what, actualChangeLog.getWhat());
- ASSERT_EQUALS(ns, actualChangeLog.getNS());
- ASSERT_EQUALS(detail, actualChangeLog.getDetails());
-
- BatchedCommandResponse response;
- response.setOk(true);
-
- return response.toBSON();
- });
- }
-
void setUp() {
CatalogManagerReplSetTestFixture::setUp();
configTargeter()->setFindHostReturnValue(configHost);
@@ -235,7 +195,12 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
return vector<BSONObj>{remainingShard.toBSON()};
});
- expectLogChange(clientHost.toString(), "removeShard.start", "", BSON("shard" << shardName));
+ expectChangeLogCreate(BSON("ok" << 1));
+ expectChangeLogInsert(clientHost.toString(),
+ network()->now(),
+ "removeShard.start",
+ "",
+ BSON("shard" << shardName));
future.timed_get(kFutureTimeout);
}
@@ -375,7 +340,9 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
return vector<BSONObj>{remainingShard.toBSON()};
});
- expectLogChange(clientHost.toString(), "removeShard", "", BSON("shard" << shardName));
+ expectChangeLogCreate(BSON("ok" << 1));
+ expectChangeLogInsert(
+ clientHost.toString(), network()->now(), "removeShard", "", BSON("shard" << shardName));
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index 3186e5ecf52..019bb6a1dbf 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -67,7 +67,9 @@ using std::vector;
using stdx::chrono::milliseconds;
using unittest::assertGet;
-TEST_F(CatalogManagerReplSetTestFixture, GetCollectionExisting) {
+using CatalogManagerReplSetTest = CatalogManagerReplSetTestFixture;
+
+TEST_F(CatalogManagerReplSetTest, GetCollectionExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType expectedColl;
@@ -100,7 +102,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetCollectionExisting) {
ASSERT_EQ(expectedColl.toBSON(), actualColl.toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetCollectionNotExisting) {
+TEST_F(CatalogManagerReplSetTest, GetCollectionNotExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -114,7 +116,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetCollectionNotExisting) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetDatabaseExisting) {
+TEST_F(CatalogManagerReplSetTest, GetDatabaseExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
DatabaseType expectedDb;
@@ -144,12 +146,12 @@ TEST_F(CatalogManagerReplSetTestFixture, GetDatabaseExisting) {
ASSERT_EQ(expectedDb.toBSON(), actualDb.toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetDatabaseNotExisting) {
+TEST_F(CatalogManagerReplSetTest, GetDatabaseNotExisting) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
auto dbResult = catalogManager()->getDatabase("NonExistent");
- ASSERT_EQ(dbResult.getStatus(), ErrorCodes::NamespaceNotFound);
+ ASSERT_EQ(dbResult.getStatus(), ErrorCodes::DatabaseNotFound);
});
onFindCommand([](const RemoteCommandRequest& request) { return vector<BSONObj>{}; });
@@ -157,7 +159,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetDatabaseNotExisting) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateCollection) {
+TEST_F(CatalogManagerReplSetTest, UpdateCollection) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType collection;
@@ -200,7 +202,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateCollection) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMaster) {
+TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMaster) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType collection;
@@ -230,7 +232,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMaster) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMasterFromTargeter) {
+TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMasterFromTargeter) {
configTargeter()->setFindHostReturnValue(Status(ErrorCodes::NotMaster, "not master"));
CollectionType collection;
@@ -249,7 +251,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMasterFromTargeter)
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMasterRetrySuccess) {
+TEST_F(CatalogManagerReplSetTest, UpdateCollectionNotMasterRetrySuccess) {
HostAndPort host1("TestHost1");
HostAndPort host2("TestHost2");
configTargeter()->setFindHostReturnValue(host1);
@@ -308,7 +310,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateCollectionNotMasterRetrySuccess)
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetAllShardsValid) {
+TEST_F(CatalogManagerReplSetTest, GetAllShardsValid) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ShardType s1;
@@ -357,7 +359,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetAllShardsValid) {
}
}
-TEST_F(CatalogManagerReplSetTestFixture, GetAllShardsWithInvalidShard) {
+TEST_F(CatalogManagerReplSetTest, GetAllShardsWithInvalidShard) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -383,7 +385,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetAllShardsWithInvalidShard) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSWithSortAndLimit) {
+TEST_F(CatalogManagerReplSetTest, GetChunksForNSWithSortAndLimit) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
OID oid = OID::gen();
@@ -440,7 +442,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSWithSortAndLimit) {
ASSERT_EQ(chunkB.toBSON(), chunks[1].toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSNoSortNoLimit) {
+TEST_F(CatalogManagerReplSetTest, GetChunksForNSNoSortNoLimit) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ChunkVersion queryChunkVersion({1, 2, OID::gen()});
@@ -476,7 +478,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSNoSortNoLimit) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSInvalidChunk) {
+TEST_F(CatalogManagerReplSetTest, GetChunksForNSInvalidChunk) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ChunkVersion queryChunkVersion({1, 2, OID::gen()});
@@ -517,7 +519,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetChunksForNSInvalidChunk) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementReadCommand) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementReadCommand) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -543,7 +545,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementReadCommand) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementReadCommandUnsatisfiedReadPref) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementReadCommandUnsatisfiedReadPref) {
configTargeter()->setFindHostReturnValue(
Status(ErrorCodes::FailedToSatisfyReadPreference, "no nodes up"));
@@ -555,7 +557,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementReadCommandUnsatisfied
ASSERT_EQUALS(ErrorCodes::FailedToSatisfyReadPreference, commandStatus);
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandDistLockHeld) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandDistLockHeld) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
distLock()->expectLock(
@@ -579,7 +581,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandDistLockHe
ASSERT_EQUALS(ErrorCodes::LockBusy, Command::getStatusFromCommandResult(response));
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandSuccess) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandSuccess) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
distLock()->expectLock(
@@ -621,7 +623,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandSuccess) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandNotMaster) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMaster) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
distLock()->expectLock(
@@ -660,7 +662,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandNotMaster)
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandNotMasterRetrySuccess) {
+TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMasterRetrySuccess) {
HostAndPort host1("TestHost1");
HostAndPort host2("TestHost2");
@@ -716,7 +718,7 @@ TEST_F(CatalogManagerReplSetTestFixture, RunUserManagementWriteCommandNotMasterR
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsBalancerDoc) {
+TEST_F(CatalogManagerReplSetTest, GetGlobalSettingsBalancerDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
// sample balancer doc
@@ -744,7 +746,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsBalancerDoc) {
ASSERT_EQ(actualBalSettings.toBSON(), st1.toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsChunkSizeDoc) {
+TEST_F(CatalogManagerReplSetTest, GetGlobalSettingsChunkSizeDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
// sample chunk size doc
@@ -772,7 +774,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsChunkSizeDoc) {
ASSERT_EQ(actualBalSettings.toBSON(), st1.toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsInvalidDoc) {
+TEST_F(CatalogManagerReplSetTest, GetGlobalSettingsInvalidDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -799,7 +801,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsInvalidDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsNonExistent) {
+TEST_F(CatalogManagerReplSetTest, GetGlobalSettingsNonExistent) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -824,7 +826,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetGlobalSettingsNonExistent) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsValidResultsNoDb) {
+TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsNoDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType coll1;
@@ -880,7 +882,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsValidResultsNoDb) {
ASSERT_EQ(coll3.toBSON(), actualColls[2].toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsValidResultsWithDb) {
+TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsWithDb) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
CollectionType coll1;
@@ -929,7 +931,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsValidResultsWithDb) {
ASSERT_EQ(coll2.toBSON(), actualColls[1].toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsInvalidCollectionType) {
+TEST_F(CatalogManagerReplSetTest, GetCollectionsInvalidCollectionType) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -972,7 +974,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetCollectionsInvalidCollectionType) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetDatabasesForShardValid) {
+TEST_F(CatalogManagerReplSetTest, GetDatabasesForShardValid) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
DatabaseType dbt1;
@@ -1010,7 +1012,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetDatabasesForShardValid) {
ASSERT_EQ(dbt2.getName(), actualDbNames[1]);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetDatabasesForShardInvalidDoc) {
+TEST_F(CatalogManagerReplSetTest, GetDatabasesForShardInvalidDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -1035,7 +1037,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetDatabasesForShardInvalidDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollection) {
+TEST_F(CatalogManagerReplSetTest, GetTagsForCollection) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
TagsType tagA;
@@ -1077,7 +1079,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollection) {
ASSERT_EQ(tagB.toBSON(), tags[1].toBSON());
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollectionNoTags) {
+TEST_F(CatalogManagerReplSetTest, GetTagsForCollectionNoTags) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -1094,7 +1096,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollectionNoTags) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollectionInvalidTag) {
+TEST_F(CatalogManagerReplSetTest, GetTagsForCollectionInvalidTag) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
auto future = launchAsync([this] {
@@ -1124,7 +1126,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagsForCollectionInvalidTag) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkOneTagFound) {
+TEST_F(CatalogManagerReplSetTest, GetTagForChunkOneTagFound) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ChunkType chunk;
@@ -1164,7 +1166,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkOneTagFound) {
ASSERT_EQ("tag", tagStr);
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkNoTagFound) {
+TEST_F(CatalogManagerReplSetTest, GetTagForChunkNoTagFound) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ChunkType chunk;
@@ -1198,7 +1200,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkNoTagFound) {
ASSERT_EQ("", tagStr); // empty string returned when tag document not found
}
-TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkInvalidTagDoc) {
+TEST_F(CatalogManagerReplSetTest, GetTagForChunkInvalidTagDoc) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
ChunkType chunk;
@@ -1236,7 +1238,7 @@ TEST_F(CatalogManagerReplSetTestFixture, GetTagForChunkInvalidTagDoc) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateDatabase) {
+TEST_F(CatalogManagerReplSetTest, UpdateDatabase) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
DatabaseType dbt;
@@ -1276,7 +1278,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateDatabase) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, UpdateDatabaseHostUnreachable) {
+TEST_F(CatalogManagerReplSetTest, UpdateDatabaseHostUnreachable) {
HostAndPort host1("TestHost1");
configTargeter()->setFindHostReturnValue(host1);
@@ -1305,7 +1307,7 @@ TEST_F(CatalogManagerReplSetTestFixture, UpdateDatabaseHostUnreachable) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, ApplyChunkOpsDeprecated) {
+TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecated) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONArray updateOps = BSON_ARRAY(BSON("update1"
@@ -1334,7 +1336,7 @@ TEST_F(CatalogManagerReplSetTestFixture, ApplyChunkOpsDeprecated) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, ApplyChunkOpsDeprecatedCommandFailed) {
+TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedCommandFailed) {
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONArray updateOps = BSON_ARRAY(BSON("update1"
@@ -1366,7 +1368,7 @@ TEST_F(CatalogManagerReplSetTestFixture, ApplyChunkOpsDeprecatedCommandFailed) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseSuccess) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseSuccess) {
const string dbname = "databaseToCreate";
const HostAndPort configHost("TestHost1");
configTargeter()->setFindHostReturnValue(configHost);
@@ -1492,7 +1494,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseSuccess) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDistLockHeld) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseDistLockHeld) {
const string dbname = "databaseToCreate";
@@ -1512,7 +1514,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDistLockHeld) {
ASSERT_EQUALS(ErrorCodes::LockBusy, status);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDBExists) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseDBExists) {
const string dbname = "databaseToCreate";
@@ -1547,7 +1549,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDBExists) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDBExistsDifferentCase) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseDBExistsDifferentCase) {
const string dbname = "databaseToCreate";
const string dbnameDiffCase = "databasetocreate";
@@ -1583,7 +1585,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDBExistsDifferentCase) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseNoShards) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseNoShards) {
const string dbname = "databaseToCreate";
@@ -1624,7 +1626,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseNoShards) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDuplicateKeyOnInsert) {
+TEST_F(CatalogManagerReplSetTest, createDatabaseDuplicateKeyOnInsert) {
const string dbname = "databaseToCreate";
const HostAndPort configHost("TestHost1");
configTargeter()->setFindHostReturnValue(configHost);
@@ -1751,7 +1753,7 @@ TEST_F(CatalogManagerReplSetTestFixture, createDatabaseDuplicateKeyOnInsert) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingNoDBExists) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) {
vector<ShardType> shards;
ShardType shard;
shard.setName("shard0");
@@ -1842,7 +1844,7 @@ TEST_F(CatalogManagerReplSetTestFixture, EnableShardingNoDBExists) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingLockBusy) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingLockBusy) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
distLock()->expectLock(
@@ -1853,7 +1855,7 @@ TEST_F(CatalogManagerReplSetTestFixture, EnableShardingLockBusy) {
ASSERT_EQ(ErrorCodes::LockBusy, status.code());
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExistsWithDifferentCase) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsWithDifferentCase) {
vector<ShardType> shards;
ShardType shard;
shard.setName("shard0");
@@ -1882,7 +1884,7 @@ TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExistsWithDifferentCase
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExists) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingDBExists) {
vector<ShardType> shards;
ShardType shard;
shard.setName("shard0");
@@ -1937,7 +1939,7 @@ TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExists) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExistsInvalidFormat) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingDBExistsInvalidFormat) {
vector<ShardType> shards;
ShardType shard;
shard.setName("shard0");
@@ -1966,7 +1968,7 @@ TEST_F(CatalogManagerReplSetTestFixture, EnableShardingDBExistsInvalidFormat) {
future.timed_get(kFutureTimeout);
}
-TEST_F(CatalogManagerReplSetTestFixture, EnableShardingNoDBExistsNoShards) {
+TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExistsNoShards) {
configTargeter()->setFindHostReturnValue(HostAndPort("config:123"));
distLock()->expectLock(
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.cpp
index ef80012e52e..555b49ce931 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.cpp
@@ -44,6 +44,7 @@
#include "mongo/executor/network_interface_mock.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
+#include "mongo/s/catalog/type_changelog.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -55,6 +56,7 @@ namespace mongo {
using executor::NetworkInterfaceMock;
using executor::NetworkTestEnv;
+using unittest::assertGet;
using std::vector;
@@ -200,7 +202,7 @@ void CatalogManagerReplSetTestFixture::setupShards(const std::vector<ShardType>&
}
void CatalogManagerReplSetTestFixture::expectInserts(const NamespaceString nss,
- std::vector<BSONObj> expected) {
+ const std::vector<BSONObj>& expected) {
onCommand([&nss, &expected](const RemoteCommandRequest& request) {
ASSERT_EQUALS(nss.db(), request.dbname);
@@ -227,4 +229,64 @@ void CatalogManagerReplSetTestFixture::expectInserts(const NamespaceString nss,
});
}
+void CatalogManagerReplSetTestFixture::expectChangeLogCreate(const BSONObj& response) {
+ onCommand([&response](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS("config", request.dbname);
+ BSONObj expectedCreateCmd = BSON("create" << ChangeLogType::ConfigNS << "capped" << true
+ << "size" << 1024 * 1024 * 10);
+ ASSERT_EQUALS(expectedCreateCmd, request.cmdObj);
+
+ return response;
+ });
+}
+
+void CatalogManagerReplSetTestFixture::expectChangeLogInsert(const std::string& clientAddress,
+ Date_t timestamp,
+ const std::string& what,
+ const std::string& ns,
+ const BSONObj& detail) {
+ onCommand([this, &clientAddress, timestamp, &what, &ns, &detail](
+ const RemoteCommandRequest& request) {
+ ASSERT_EQUALS("config", request.dbname);
+
+ BatchedInsertRequest actualBatchedInsert;
+ std::string errmsg;
+ ASSERT_TRUE(actualBatchedInsert.parseBSON(request.dbname, request.cmdObj, &errmsg));
+ ASSERT_EQUALS(ChangeLogType::ConfigNS, actualBatchedInsert.getNS().ns());
+
+ auto inserts = actualBatchedInsert.getDocuments();
+ ASSERT_EQUALS(1U, inserts.size());
+
+ const ChangeLogType& actualChangeLog = assertGet(ChangeLogType::fromBSON(inserts.front()));
+
+ ASSERT_EQUALS(clientAddress, actualChangeLog.getClientAddr());
+ ASSERT_EQUALS(detail, actualChangeLog.getDetails());
+ ASSERT_EQUALS(ns, actualChangeLog.getNS());
+ ASSERT_EQUALS(shardRegistry()->getNetwork()->getHostName(), actualChangeLog.getServer());
+ ASSERT_EQUALS(timestamp, actualChangeLog.getTime());
+ ASSERT_EQUALS(what, actualChangeLog.getWhat());
+
+ // Handle changeId specially because there's no way to know what OID was generated
+ std::string changeId = actualChangeLog.getChangeId();
+ size_t firstDash = changeId.find("-");
+ size_t lastDash = changeId.rfind("-");
+
+ const std::string serverPiece = changeId.substr(0, firstDash);
+ const std::string timePiece = changeId.substr(firstDash + 1, lastDash - firstDash - 1);
+ const std::string oidPiece = changeId.substr(lastDash + 1);
+
+ ASSERT_EQUALS(shardRegistry()->getNetwork()->getHostName(), serverPiece);
+ ASSERT_EQUALS(timestamp.toString(), timePiece);
+
+ OID generatedOID;
+ // Just make sure this doesn't throws and assume the OID is valid
+ generatedOID.init(oidPiece);
+
+ BatchedCommandResponse response;
+ response.setOk(true);
+
+ return response.toBSON();
+ });
+}
+
} // namespace mongo
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h
index 9c1ed37be78..9a866322431 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h
@@ -102,7 +102,23 @@ protected:
* Wait for a single insert request and ensures that the items being inserted exactly match the
* expected items. Responds with a success status.
*/
- void expectInserts(const NamespaceString nss, std::vector<BSONObj> expected);
+ void expectInserts(const NamespaceString nss, const std::vector<BSONObj>& expected);
+
+ /**
+ * Wait for an operation, which creates the sharding change log collection and return the
+ * specified response.
+ */
+ void expectChangeLogCreate(const BSONObj& response);
+
+ /**
+ * Wait for a single insert in the change log collection with the specified contents and return
+ * a successful response.
+ */
+ void expectChangeLogInsert(const std::string& clientAddress,
+ Date_t timestamp,
+ const std::string& what,
+ const std::string& ns,
+ const BSONObj& detail);
void setUp() override;