summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2016-09-13 15:33:30 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2016-09-15 15:40:11 -0400
commit5b0901ea6a1b9b90d2cee8263b8c60013d7c6979 (patch)
tree521e4f207f6c844a47c0112f4b142ae3af09f64f
parenteec91f97a655486892f6c75342adcffc68109f0e (diff)
downloadmongo-5b0901ea6a1b9b90d2cee8263b8c60013d7c6979.tar.gz
SERVER-25458 unit tests for ShardingState::initializeShardingAwarenessIfNeeded
-rw-r--r--src/mongo/db/db.cpp8
-rw-r--r--src/mongo/db/s/sharding_state.cpp12
-rw-r--r--src/mongo/db/s/sharding_state.h7
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp291
4 files changed, 302 insertions, 16 deletions
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 35b1250a81a..4722cf846a8 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -736,8 +736,12 @@ ExitCode _initAndListen(int listenPort) {
<< startupWarningsLog;
}
- uassertStatusOK(ShardingState::get(startupOpCtx.get())
- ->initializeShardingAwarenessIfNeeded(startupOpCtx.get()));
+ auto shardingInitialized =
+ uassertStatusOK(ShardingState::get(startupOpCtx.get())
+ ->initializeShardingAwarenessIfNeeded(startupOpCtx.get()));
+ if (shardingInitialized) {
+ reloadShardRegistryUntilSuccess(startupOpCtx.get());
+ }
if (!storageGlobalParams.readOnly) {
logStartup(startupOpCtx.get());
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 2eb14e6ba21..7bc1e42aac4 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -570,7 +570,7 @@ void ShardingState::_signalInitializationComplete(Status status) {
_initializationFinishedCondition.notify_all();
}
-Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn) {
+StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn) {
// In sharded readOnly mode, we ignore the shardIdentity document on disk and instead *require*
// a shardIdentity document to be passed through --overrideShardIdentity.
if (storageGlobalParams.readOnly) {
@@ -589,7 +589,7 @@ Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn)
if (!status.isOK()) {
return status;
}
- return reloadShardRegistryUntilSuccess(txn);
+ return true;
} else {
// Error if --overrideShardIdentity is used but *not* started with --shardsvr.
if (!serverGlobalParams.overrideShardIdentity.isEmpty()) {
@@ -600,7 +600,7 @@ Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn)
"through --overrideShardIdentity: "
<< serverGlobalParams.overrideShardIdentity};
}
- return Status::OK();
+ return false;
}
}
// In sharded *non*-readOnly mode, error if --overrideShardIdentity is provided. Use the
@@ -639,7 +639,7 @@ Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn)
<< NamespaceString::kConfigCollectionNamespace
<< ". This most likely means this server has not yet been added to a "
"sharded cluster.";
- return Status::OK();
+ return false;
}
auto swShardIdentity = ShardIdentityType::fromBSON(shardIdentityBSON);
if (!swShardIdentity.isOK()) {
@@ -649,7 +649,7 @@ Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn)
if (!status.isOK()) {
return status;
}
- return reloadShardRegistryUntilSuccess(txn);
+ return true;
} else {
// Warn if a shardIdentity document is found on disk but *not* started with --shardsvr.
if (!shardIdentityBSON.isEmpty()) {
@@ -658,7 +658,7 @@ Status ShardingState::initializeShardingAwarenessIfNeeded(OperationContext* txn)
<< NamespaceString::kConfigCollectionNamespace << ": "
<< shardIdentityBSON;
}
- return Status::OK();
+ return false;
}
}
}
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index d9acc7a7eb9..bd06604b79f 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -259,8 +259,13 @@ public:
* on disk, if there is one.
* If started with --shardsvr in queryableBackupMode, initializes sharding awareness from the
* shardIdentity document passed through the --overrideShardIdentity startup parameter.
+ *
+ * If returns true, the ShardingState::_globalInit method was called, meaning all the core
+ * classes for sharding were initialized, but no networking calls were made yet (with the
+ * exception of the duplicate ShardRegistry reload in ShardRegistry::startup() (see
+ * SERVER-26123). Outgoing networking calls to cluster members can now be made.
*/
- Status initializeShardingAwarenessIfNeeded(OperationContext* txn);
+ StatusWith<bool> initializeShardingAwarenessIfNeeded(OperationContext* txn);
/**
* Check if a command is one of the whitelisted commands that can be accepted with shardVersion
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index 5367ea1933e..1faaa5c839f 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -31,18 +31,21 @@
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/client/replica_set_monitor.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/sharding_mongod_test_fixture.h"
-#include "mongo/util/clock_source_mock.h"
-#include "mongo/util/tick_source_mock.h"
namespace mongo {
+using executor::RemoteCommandRequest;
+
class ShardingStateTest : public MongodTestFixture {
public:
ShardingState* shardingState() {
@@ -50,14 +53,13 @@ public:
}
protected:
+ // Used to write to set up local collections before exercising server logic.
+ std::unique_ptr<DBDirectClient> _dbDirectClient;
+
void setUp() override {
+ serverGlobalParams.clusterRole = ClusterRole::None;
MongodTestFixture::setUp();
- auto serviceContext = getServiceContext();
- serviceContext->setFastClockSource(stdx::make_unique<ClockSourceMock>());
- serviceContext->setPreciseClockSource(stdx::make_unique<ClockSourceMock>());
- serviceContext->setTickSource(stdx::make_unique<TickSourceMock>());
-
// When sharding initialization is triggered, initialize sharding state as a shard server.
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
_shardingState.setGlobalInitMethodForTest([&](OperationContext* txn,
@@ -75,11 +77,20 @@ protected:
return Status::OK();
});
+
+ _dbDirectClient = stdx::make_unique<DBDirectClient>(operationContext());
}
void tearDown() override {
+ _dbDirectClient.reset();
+
+ // Some test cases modify the readOnly value, but the teardown calls below depend on
+ // readOnly being false, so we reset the value here rather than in setUp().
+ storageGlobalParams.readOnly = false;
+
// ShardingState initialize can modify ReplicaSetMonitor state.
ReplicaSetMonitor::cleanup();
+
MongodTestFixture::tearDown();
}
@@ -276,4 +287,270 @@ TEST_F(ShardingStateTest, InitializeAgainWithDifferentClusterIdFails) {
ASSERT_EQ("config/a:1,b:2", shardingState()->getConfigServer(operationContext()).toString());
}
+
+// The below tests check for compatible startup parameters for --shardsvr, --overrideShardIdentity,
+// and queryableBackup (readOnly) mode.
+
+// readOnly and --shardsvr
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndNoOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndInvalidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ serverGlobalParams.overrideShardIdentity = BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid");
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, swShardingInitialized.getStatus().code());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndShardServerAndValidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName("a");
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_TRUE(swShardingInitialized.getValue());
+}
+
+// readOnly and not --shardsvr
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndNoOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_FALSE(swShardingInitialized.getValue());
+}
+
+TEST_F(
+ ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndInvalidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ serverGlobalParams.overrideShardIdentity = BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid");
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededReadOnlyAndNotShardServerAndValidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = true;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName("a");
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+}
+
+// not readOnly and --overrideShardIdentity
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndInvalidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.overrideShardIdentity = BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid");
+
+ // Should error regardless of cluster role.
+
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndValidOverrideShardIdentity) {
+ storageGlobalParams.readOnly = false;
+
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName("a");
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ serverGlobalParams.overrideShardIdentity = shardIdentity.toBSON();
+
+ // Should error regardless of cluster role.
+
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::InvalidOptions, swShardingInitialized.getStatus().code());
+}
+
+// not readOnly and --shardsvr
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndNoShardIdentity) {
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_FALSE(swShardingInitialized.getValue());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndInvalidShardIdentity) {
+
+ // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole
+ // is ShardServer, the OpObserver for inserts will prevent the insert from occurring, since the
+ // shardIdentity doc is invalid.
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ BSONObj invalidShardIdentity = BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid");
+ _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ invalidShardIdentity);
+
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_EQUALS(ErrorCodes::UnsupportedFormat, swShardingInitialized.getStatus().code());
+}
+
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndShardServerAndValidShardIdentity) {
+
+ // Insert the shardIdentity doc to disk before setting the clusterRole, since if the clusterRole
+ // is ShardServer, the OpObserver for inserts will trigger sharding initialization from the
+ // inserted doc.
+ serverGlobalParams.clusterRole = ClusterRole::None;
+
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName("a");
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ BSONObj validShardIdentity = shardIdentity.toBSON();
+
+ _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ validShardIdentity);
+
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::ShardServer;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_TRUE(swShardingInitialized.getValue());
+}
+
+// not readOnly and not --shardsvr
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndNoShardIdentity) {
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_FALSE(swShardingInitialized.getValue());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndInvalidShardIdentity) {
+ _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ BSON("_id"
+ << "shardIdentity"
+ << "configsvrConnectionString"
+ << "invalid"));
+
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ // The shardIdentity doc on disk, even if invalid, is ignored if ClusterRole is None.
+ // This is to allow fixing the shardIdentity doc by starting without --shardsvr.
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_FALSE(swShardingInitialized.getValue());
+}
+
+TEST_F(ShardingStateTest,
+ InitializeShardingAwarenessIfNeededNotReadOnlyAndNotShardServerAndValidShardIdentity) {
+ storageGlobalParams.readOnly = false;
+ serverGlobalParams.clusterRole = ClusterRole::None;
+ serverGlobalParams.overrideShardIdentity = BSONObj();
+
+ ShardIdentityType shardIdentity;
+ shardIdentity.setConfigsvrConnString(
+ ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
+ shardIdentity.setShardName("a");
+ shardIdentity.setClusterId(OID::gen());
+ ASSERT_OK(shardIdentity.validate());
+ BSONObj validShardIdentity = shardIdentity.toBSON();
+
+ _dbDirectClient->insert(NamespaceString::kConfigCollectionNamespace.toString(),
+ validShardIdentity);
+
+ // The shardIdentity doc on disk is ignored if ClusterRole is None.
+ auto swShardingInitialized =
+ shardingState()->initializeShardingAwarenessIfNeeded(operationContext());
+ ASSERT_OK(swShardingInitialized);
+ ASSERT_FALSE(swShardingInitialized.getValue());
+}
} // namespace mongo