summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShane Harvey <shane.harvey@mongodb.com>2016-03-25 17:13:17 -0400
committerShane Harvey <shane.harvey@mongodb.com>2016-04-13 14:19:55 -0400
commit26b55942cc467bca2cc2b935e517b443cf16c550 (patch)
tree770b6987c9ffa3796135a735deeb9299d842ba5c
parent0b490582031c2be63239ac0885801739946a2a78 (diff)
downloadmongo-26b55942cc467bca2cc2b935e517b443cf16c550.tar.gz
SERVER-6823 Enable Access control without downtime.
Add --tryClusterAuth flag that enables communicatation between nodes running with and without auth.
-rw-r--r--jstests/auth/copyauth.js112
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile.js56
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js37
-rw-r--r--jstests/ssl/libs/ssl_helpers.js53
-rw-r--r--jstests/ssl/upgrade_allowssl_noauth_to_x509_ssl.js45
-rw-r--r--src/mongo/client/authenticate.cpp29
-rw-r--r--src/mongo/client/connection_pool.cpp7
-rw-r--r--src/mongo/db/auth/internal_user_auth.h4
-rw-r--r--src/mongo/db/cloner.cpp3
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp7
-rw-r--r--src/mongo/db/repl/isself.cpp6
-rw-r--r--src/mongo/db/repl/oplogreader.cpp12
-rw-r--r--src/mongo/db/server_options.h1
-rw-r--r--src/mongo/db/server_options_helpers.cpp29
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp2
-rw-r--r--src/mongo/shell/shardingtest.js43
16 files changed, 358 insertions, 88 deletions
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
index 3627038abbd..2ba4466dac6 100644
--- a/jstests/auth/copyauth.js
+++ b/jstests/auth/copyauth.js
@@ -14,6 +14,8 @@ var baseName = "jstests_clone_copyauth";
*
* clusterType - type of cluster to start. Options are "sharded", "repl", or "single".
* startWithAuth - whether to start the cluster with authentication.
+ * startWithTryClusterAuth - whether to start the cluster with --tryClusterAuth (startWithAuth must
+ *also be true).
*
* Member variables:
*
@@ -26,18 +28,22 @@ var baseName = "jstests_clone_copyauth";
*
* stop() - stop and cleanup whatever nodes the helper spawned when it was created.
*/
-function ClusterSpawnHelper(clusterType, startWithAuth) {
+function ClusterSpawnHelper(clusterType, startWithAuth, startWithTryClusterAuth) {
+ var singleNodeConfig = {};
+ if (startWithAuth) {
+ singleNodeConfig.keyFile = "jstests/libs/key1";
+ if (startWithTryClusterAuth) {
+ singleNodeConfig.tryClusterAuth = "";
+ }
+ }
if (clusterType === "sharded") {
var shardingTestConfig = {
name: baseName + "_source",
- mongos: 1,
- shards: 1,
- config: 1
+ keyFile: singleNodeConfig.keyFile,
+ mongos: [singleNodeConfig],
+ shards: [singleNodeConfig],
+ config: [singleNodeConfig]
};
- if (startWithAuth) {
- shardingTestConfig.auth = "";
- shardingTestConfig.keyFile = "jstests/libs/key1";
- }
var shardingTest = new ShardingTest(shardingTestConfig);
this.conn = shardingTest.s;
this.connString = this.conn.host;
@@ -45,12 +51,8 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
var replSetTestConfig = {
name: baseName + "_source",
nodes: 3,
- nodeOptions: {}
+ nodeOptions: singleNodeConfig
};
- if (startWithAuth) {
- replSetTestConfig.nodeOptions.auth = "";
- replSetTestConfig.nodeOptions.keyFile = "jstests/libs/key1";
- }
var replSetTest = new ReplSetTest(replSetTestConfig);
replSetTest.startSet();
replSetTest.initiate();
@@ -66,10 +68,6 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
this.conn = replSetTest.getPrimary();
this.connString = replSetTest.getURL();
} else {
- var singleNodeConfig = {};
- if (startWithAuth) {
- singleNodeConfig.auth = "";
- }
this.conn = MongoRunner.runMongod(singleNodeConfig);
this.connString = this.conn.host;
}
@@ -102,8 +100,14 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
*/
function copydbBetweenClustersTest(configObj) {
// First sanity check the arguments in our configObj
- var requiredKeys =
- ['sourceClusterType', 'isSourceUsingAuth', 'targetClusterType', 'isTargetUsingAuth'];
+ var requiredKeys = [
+ 'sourceClusterType',
+ 'isSourceUsingAuth',
+ 'targetClusterType',
+ 'isTargetUsingAuth',
+ 'isSourceUsingTryClusterAuth',
+ 'isTargetUsingTryClusterAuth'
+ ];
var i;
for (i = 0; i < requiredKeys.length; i++) {
@@ -112,7 +116,9 @@ function copydbBetweenClustersTest(configObj) {
}
// 1. Get a connection to the source database, insert data and setup auth if applicable
- source = new ClusterSpawnHelper(configObj.sourceClusterType, configObj.isSourceUsingAuth);
+ source = new ClusterSpawnHelper(configObj.sourceClusterType,
+ configObj.isSourceUsingAuth,
+ configObj.isSourceUsingTryClusterAuth);
if (configObj.isSourceUsingAuth) {
// Create a super user so we can create a regular user and not be locked out afterwards
@@ -129,9 +135,16 @@ function copydbBetweenClustersTest(configObj) {
source.conn.getDB(baseName).createUser({user: "foo", pwd: "bar", roles: ["dbOwner"]});
source.conn.getDB("admin").logout();
- assert.throws(function() {
+
+ var readWhenLoggedOut = function() {
source.conn.getDB(baseName)[baseName].findOne();
- });
+ };
+ if (configObj.isSourceUsingTryClusterAuth) {
+ // tryClusterAuth does not turn on access control
+ assert.doesNotThrow(readWhenLoggedOut);
+ } else {
+ assert.throws(readWhenLoggedOut);
+ }
} else {
source.conn.getDB(baseName)[baseName].save({i: 1});
assert.eq(1, source.conn.getDB(baseName)[baseName].count());
@@ -139,14 +152,24 @@ function copydbBetweenClustersTest(configObj) {
}
// 2. Get a connection to the target database, and set up auth if necessary
- target = new ClusterSpawnHelper(configObj.targetClusterType, configObj.isTargetUsingAuth);
+ target = new ClusterSpawnHelper(configObj.targetClusterType,
+ configObj.isTargetUsingAuth,
+ configObj.isTargetUsingTryClusterAuth);
if (configObj.isTargetUsingAuth) {
target.conn.getDB("admin")
.createUser({user: "targetSuperUser", pwd: "targetSuperUser", roles: ["root"]});
- assert.throws(function() {
+
+ var readWhenLoggedOut = function() {
target.conn.getDB(baseName)[baseName].findOne();
- });
+ };
+ if (configObj.isTargetUsingTryClusterAuth) {
+ // tryClusterAuth does not turn on access control
+ assert.doesNotThrow(readWhenLoggedOut);
+ } else {
+ assert.throws(readWhenLoggedOut);
+ }
+
target.conn.getDB("admin").auth("targetSuperUser", "targetSuperUser");
}
@@ -176,8 +199,10 @@ function copydbBetweenClustersTest(configObj) {
var sourceClusterTypeValues = ["single", "repl", "sharded"];
var isSourceUsingAuthValues = [true, false];
+ var isSourceUsingTryClusterAuthValues = [true, false];
var targetClusterTypeValues = ["single", "repl", "sharded"];
var isTargetUsingAuthValues = [true, false];
+ var isTargetUsingTryClusterAuthValues = [true, false];
for (var i = 0; i < sourceClusterTypeValues.length; i++) {
for (var j = 0; j < isSourceUsingAuthValues.length; j++) {
for (var k = 0; k < targetClusterTypeValues.length; k++) {
@@ -200,19 +225,36 @@ function copydbBetweenClustersTest(configObj) {
if (sourceClusterTypeValues[i] === "repl" &&
isSourceUsingAuthValues[j] === false &&
targetClusterTypeValues[k] === "sharded" &&
- isTargetUsingAuthValues[l] == true) {
+ isTargetUsingAuthValues[l] === true) {
// SERVER-18103
continue;
}
- var testCase = {
- 'sourceClusterType': sourceClusterTypeValues[i],
- 'isSourceUsingAuth': isSourceUsingAuthValues[j],
- 'targetClusterType': targetClusterTypeValues[k],
- 'isTargetUsingAuth': isTargetUsingAuthValues[l]
- };
- print("Running copydb with auth test:");
- printjson(testCase);
- copydbBetweenClustersTest(testCase);
+
+ for (var m = 0; m < isSourceUsingTryClusterAuthValues.length; m++) {
+ if (isSourceUsingTryClusterAuthValues[m] === true &&
+ isSourceUsingAuthValues[j] === false) {
+ // tryClusterAuth requires auth parameters
+ continue;
+ }
+ for (var n = 0; n < isTargetUsingTryClusterAuthValues.length; n++) {
+ if (isTargetUsingTryClusterAuthValues[n] === true &&
+ isTargetUsingAuthValues[l] === false) {
+ // tryClusterAuth requires auth parameters
+ continue;
+ }
+ var testCase = {
+ 'sourceClusterType': sourceClusterTypeValues[i],
+ 'isSourceUsingAuth': isSourceUsingAuthValues[j],
+ 'targetClusterType': targetClusterTypeValues[k],
+ 'isTargetUsingAuth': isTargetUsingAuthValues[l],
+ 'isSourceUsingTryClusterAuth': isSourceUsingTryClusterAuthValues[m],
+ 'isTargetUsingTryClusterAuth': isTargetUsingTryClusterAuthValues[n]
+ };
+ print("Running copydb with auth test:");
+ printjson(testCase);
+ copydbBetweenClustersTest(testCase);
+ }
+ }
}
}
}
diff --git a/jstests/auth/upgrade_noauth_to_keyfile.js b/jstests/auth/upgrade_noauth_to_keyfile.js
new file mode 100644
index 00000000000..2e22ad797e6
--- /dev/null
+++ b/jstests/auth/upgrade_noauth_to_keyfile.js
@@ -0,0 +1,56 @@
+/**
+ * This test checks the upgrade path from noauth to keyFile.
+ */
+
+load('jstests/multiVersion/libs/multi_rs.js');
+
+(function() {
+ 'use strict';
+ var keyFilePath = 'jstests/libs/key1';
+
+ // Disable auth explicitly
+ var noAuthOptions = {
+ noauth: ''
+ };
+
+ // Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
+ var tryClusterAuthOptions = {
+ noauth: undefined,
+ clusterAuthMode: 'keyFile',
+ keyFile: keyFilePath,
+ tryClusterAuth: ''
+ };
+ var keyFileOptions = {
+ clusterAuthMode: 'keyFile',
+ keyFile: keyFilePath,
+ tryClusterAuth: undefined
+ };
+
+ var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuthOptions});
+ rst.startSet();
+ rst.initiate();
+
+ var rstConn1 = rst.getPrimary();
+
+ // Create a user to login as when auth is enabled later
+ rstConn1.getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
+
+ rstConn1.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(1, rstConn1.getDB('test').a.count(), 'Error interacting with replSet');
+
+ print('=== UPGRADE noauth -> tryClusterAuth/keyFile ===');
+ rst.upgradeSet(tryClusterAuthOptions);
+ var rstConn2 = rst.getPrimary();
+ rstConn2.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(2, rstConn2.getDB('test').a.count(), 'Error interacting with replSet');
+
+ print('=== UPGRADE tryClusterAuth/keyFile -> keyFile ===');
+ rst.upgradeSet(keyFileOptions, 'root', 'root');
+
+ // upgradeSet leaves its connections logged in as root
+ var rstConn3 = rst.getPrimary();
+ rstConn3.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(3, rstConn3.getDB('test').a.count(), 'Error interacting with replSet');
+
+ rst.stopSet();
+}());
diff --git a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
new file mode 100644
index 00000000000..f36c545f551
--- /dev/null
+++ b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
@@ -0,0 +1,37 @@
+// Tests access control upgrade on a sharded cluster
+// The purpose is to verify the connectivity between mongos, config server, and the shards
+
+load('jstests/ssl/libs/ssl_helpers.js');
+
+(function() {
+ 'use strict';
+
+ // Disable auth explicitly
+ var noAuthOptions = {
+ noauth: ''
+ };
+ var tryClusterAuthOptions = {
+ clusterAuthMode: 'keyFile',
+ keyFile: KEYFILE,
+ tryClusterAuth: ''
+ };
+ var keyFileOptions = {
+ clusterAuthMode: 'keyFile',
+ keyFile: KEYFILE
+ };
+
+ print('=== Testing no-auth/tryClusterAuth cluster ===');
+ mixedShardTest(noAuthOptions, tryClusterAuthOptions, true);
+ mixedShardTest(tryClusterAuthOptions, noAuthOptions, true);
+
+ print('=== Testing tryClusterAuth/tryClusterAuth cluster ===');
+ mixedShardTest(tryClusterAuthOptions, tryClusterAuthOptions, true);
+
+ print('=== Testing tryClusterAuth/keyFile cluster ===');
+ mixedShardTest(keyFileOptions, tryClusterAuthOptions, true);
+ mixedShardTest(tryClusterAuthOptions, keyFileOptions, true);
+
+ print('=== Testing no-auth/keyFile cluster fails ===');
+ mixedShardTest(noAuthOptions, keyFileOptions, false);
+ mixedShardTest(keyFileOptions, noAuthOptions, false);
+}());
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index 5fab2f1f030..8067076610e 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -57,21 +57,70 @@ var replShouldFail = function(name, opt1, opt2) {
};
/**
+ * Test that $lookup works with a sharded source collection. This is tested because of
+ * the connections opened between mongos/shards and between the shards themselves.
+ */
+function testShardedLookup(shardingTest) {
+ var st = shardingTest;
+ assert(st.adminCommand({enableSharding: "lookupTest"}),
+ "error enabling sharding for this configuration");
+ assert(st.adminCommand({shardCollection: "lookupTest.foo", key: {_id: "hashed"}}),
+ "error sharding collection for this configuration");
+
+ var lookupdb = st.getDB("lookupTest");
+
+ // insert a few docs to ensure there are documents on multiple shards.
+ var fooBulk = lookupdb.foo.initializeUnorderedBulkOp();
+ var barBulk = lookupdb.bar.initializeUnorderedBulkOp();
+ var lookupShouldReturn = [];
+ for (var i = 0; i < 64; i++) {
+ fooBulk.insert({_id: i});
+ barBulk.insert({_id: i});
+ lookupShouldReturn.push({_id: i, bar_docs: [{_id: i}]});
+ }
+ assert.writeOK(fooBulk.execute());
+ assert.writeOK(barBulk.execute());
+
+ var docs = lookupdb.foo.aggregate([
+ {$sort: {_id: 1}},
+ {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar_docs"}}
+ ]).toArray();
+ assert.eq(lookupShouldReturn, docs, "error $lookup failed in this configuration");
+ assert.commandWorked(lookupdb.dropDatabase());
+}
+
+/**
* Takes in two mongod/mongos configuration options and runs a basic
* sharding test to see if they can work together...
*/
function mixedShardTest(options1, options2, shouldSucceed) {
try {
- var st = new ShardingTest(
- {mongos: [options1], config: [options1], shards: [options1, options2]});
+ // Start ShardingTest with enableBalancer because ShardingTest attempts to turn
+ // off the balancer otherwise, which it will not be authorized to do if auth is enabled.
+ // Once SERVER-14017 is fixed the "enableBalancer" line can be removed.
+ var st = new ShardingTest({
+ mongos: [options1],
+ config: [options1],
+ shards: [options1, options2],
+ other: {enableBalancer: true}
+ });
+
+ // Create admin user in case the options include auth
+ st.admin.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
+ st.admin.auth('admin', 'pwd');
+
st.stopBalancer();
+ // Test that $lookup works because it causes outgoing connections to be opened
+ testShardedLookup(st);
+
// Test mongos talking to config servers
var r = st.adminCommand({enableSharding: "test"});
assert.eq(r, true, "error enabling sharding for this configuration");
st.ensurePrimaryShard("test", "shard0000");
r = st.adminCommand({movePrimary: 'test', to: 'shard0001'});
+ assert.eq(r, true, "error movePrimary failed for this configuration");
var db1 = st.getDB("test");
r = st.adminCommand({shardCollection: "test.col", key: {_id: 1}});
diff --git a/jstests/ssl/upgrade_allowssl_noauth_to_x509_ssl.js b/jstests/ssl/upgrade_allowssl_noauth_to_x509_ssl.js
new file mode 100644
index 00000000000..d4047b67173
--- /dev/null
+++ b/jstests/ssl/upgrade_allowssl_noauth_to_x509_ssl.js
@@ -0,0 +1,45 @@
+/**
+ * This test checks the upgrade path from noauth/allowSSL to x509/requireSSL
+ */
+
+load('jstests/ssl/libs/ssl_helpers.js');
+
+(function() {
+ 'use strict';
+
+ // Disable auth explicitly
+ var noAuthAllowSSL = Object.merge(allowSSL, {noauth: ''});
+
+ // Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
+ var tryX509preferSSL =
+ Object.merge(preferSSL, {noauth: undefined, tryClusterAuth: '', clusterAuthMode: 'x509'});
+ var x509RequireSSL =
+ Object.merge(requireSSL, {tryClusterAuth: undefined, clusterAuthMode: 'x509'});
+
+ var rst = new ReplSetTest({name: 'noauthSet', nodes: 3, nodeOptions: noAuthAllowSSL});
+ rst.startSet();
+ rst.initiate();
+
+ var rstConn1 = rst.getPrimary();
+ // Create a user to login when auth is enabled later
+ rstConn1.getDB('admin').createUser({user: 'root', pwd: 'root', roles: ['root']});
+
+ rstConn1.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(1, rstConn1.getDB('test').a.count(), 'Error interacting with replSet');
+
+ print('=== UPGRADE no-auth/allowSSL -> try X509/preferSSL ===');
+ rst.upgradeSet(tryX509preferSSL);
+ var rstConn2 = rst.getPrimary();
+ rstConn2.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(2, rstConn2.getDB('test').a.count(), 'Error interacting with replSet');
+
+ print('=== UPGRADE try X509/preferSSL -> X509/requireSSL ===');
+ rst.upgradeSet(x509RequireSSL, 'root', 'root');
+
+ // upgradeSet leaves its connections logged in as root
+ var rstConn3 = rst.getPrimary();
+ rstConn3.getDB('test').a.insert({a: 1, str: 'TESTTESTTEST'});
+ assert.eq(3, rstConn3.getDB('test').a.count(), 'Error interacting with replSet');
+
+ rst.stopSet();
+}());
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 995095c1462..477ae40dbb9 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -37,7 +37,9 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/config.h"
+#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/util/log.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/net/ssl_options.h"
#include "mongo/util/password_digest.h"
@@ -239,12 +241,30 @@ void authX509(RunCommandHook runCommand,
// General Auth
//
+bool isFailedAuthOk(const AuthResponse& response) {
+ return (response == ErrorCodes::AuthenticationFailed && serverGlobalParams.tryClusterAuth);
+}
+
void auth(RunCommandHook runCommand,
const BSONObj& params,
StringData hostname,
StringData clientName,
AuthCompletionHandler handler) {
std::string mechanism;
+ auto authCompletionHandler = [handler](AuthResponse response) {
+ if (isFailedAuthOk(response)) {
+ // If auth failed in tryClusterAuth, just pretend it succeeded.
+ log() << "Failed to authenticate in tryClusterAuth, falling back to no "
+ "authentication.";
+
+ // We need to mock a successful AuthResponse.
+ return handler(
+ AuthResponse(RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(0))));
+ }
+
+ // otherwise, call handler
+ return handler(std::move(response));
+ };
auto response = bsonExtractStringField(params, saslCommandMechanismFieldName, &mechanism);
if (!response.isOK())
return handler(std::move(response));
@@ -255,15 +275,15 @@ void auth(RunCommandHook runCommand,
}
if (mechanism == kMechanismMongoCR)
- return authMongoCR(runCommand, params, handler);
+ return authMongoCR(runCommand, params, authCompletionHandler);
#ifdef MONGO_CONFIG_SSL
else if (mechanism == kMechanismMongoX509)
- return authX509(runCommand, params, clientName, handler);
+ return authX509(runCommand, params, clientName, authCompletionHandler);
#endif
else if (saslClientAuthenticate != nullptr)
- return saslClientAuthenticate(runCommand, hostname, params, handler);
+ return saslClientAuthenticate(runCommand, hostname, params, authCompletionHandler);
return handler({ErrorCodes::AuthenticationFailed,
mechanism + " mechanism support not compiled into client library."});
@@ -286,12 +306,13 @@ void asyncAuth(RunCommandHook runCommand,
clientName,
[runCommand, params, hostname, clientName, handler](AuthResponse response) {
// If auth failed, try again with fallback params when appropriate
- if (needsFallback(response))
+ if (needsFallback(response)) {
return auth(runCommand,
std::move(getFallbackAuthParams(params)),
hostname,
clientName,
handler);
+ }
// otherwise, call handler
return handler(std::move(response));
diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp
index 1f23c4d294c..8173cf393e3 100644
--- a/src/mongo/client/connection_pool.cpp
+++ b/src/mongo/client/connection_pool.cpp
@@ -187,11 +187,8 @@ ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
uassertStatusOK(conn->connect(target));
conn->port().tag |= _messagingPortTags;
- if (getGlobalAuthorizationManager()->isAuthEnabled()) {
- uassert(ErrorCodes::AuthenticationFailed,
- "Missing credentials for authenticating as internal user",
- isInternalAuthSet());
- conn->auth(getInternalUserAuthParamsWithFallback());
+ if (isInternalAuthSet()) {
+ conn->authenticateInternalUser();
}
if (_hook) {
diff --git a/src/mongo/db/auth/internal_user_auth.h b/src/mongo/db/auth/internal_user_auth.h
index 6bec9404985..edb8480232d 100644
--- a/src/mongo/db/auth/internal_user_auth.h
+++ b/src/mongo/db/auth/internal_user_auth.h
@@ -32,7 +32,9 @@ namespace mongo {
class BSONObj;
/**
- * @return true if internal authentication parameters has been set up
+ * @return true if internal authentication parameters has been set up. Note this does not
+ * imply that auth is enabled. For instance, with the --tryClusterAuth flag this will
+ * be set and auth will be disabled.
*/
bool isInternalAuthSet();
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index cce155af8cd..e7789c2ac92 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -470,8 +470,7 @@ Status Cloner::copyDb(OperationContext* txn,
return Status(ErrorCodes::HostUnreachable, errmsg);
}
- if (getGlobalAuthorizationManager()->isAuthEnabled() &&
- !con->authenticateInternalUser()) {
+ if (isInternalAuthSet() && !con->authenticateInternalUser()) {
return Status(ErrorCodes::AuthenticationFailed,
"Unable to authenticate as internal user");
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 1345b5b8aca..5fa705d383b 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -350,9 +350,10 @@ bool initializeServerGlobalState() {
}
}
- // Auto-enable auth except if clusterAuthMode is not set.
- // clusterAuthMode is automatically set if a --keyFile parameter is provided.
- if (clusterAuthMode != ServerGlobalParams::ClusterAuthMode_undefined) {
+ // Auto-enable auth unless we are in mixed auth/no-auth or clusterAuthMode was not provided.
+ // clusterAuthMode defaults to "keyFile" if a --keyFile parameter is provided.
+ if (clusterAuthMode != ServerGlobalParams::ClusterAuthMode_undefined &&
+ !serverGlobalParams.tryClusterAuth) {
getGlobalAuthorizationManager()->setAuthEnabled(true);
}
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index a4fc217481d..87cba6fe03b 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -202,10 +202,8 @@ bool isSelf(const HostAndPort& hostAndPort) {
return false;
}
- if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) {
- if (!conn.authenticateInternalUser()) {
- return false;
- }
+ if (isInternalAuthSet() && !conn.authenticateInternalUser()) {
+ return false;
}
BSONObj out;
bool ok = conn.simpleCommand("admin", &out, "_isSelf");
diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp
index c1b6e37f82f..28401834b65 100644
--- a/src/mongo/db/repl/oplogreader.cpp
+++ b/src/mongo/db/repl/oplogreader.cpp
@@ -68,12 +68,11 @@ static ServerStatusMetricField<Counter64> displayReadersCreated("repl.network.re
bool replAuthenticate(DBClientBase* conn) {
- if (!getGlobalAuthorizationManager()->isAuthEnabled())
- return true;
-
- if (!isInternalAuthSet())
+ if (isInternalAuthSet())
+ return conn->authenticateInternalUser();
+ if (getGlobalAuthorizationManager()->isAuthEnabled())
return false;
- return conn->authenticateInternalUser();
+ return true;
}
const Seconds OplogReader::kSocketTimeout(30);
@@ -94,8 +93,7 @@ bool OplogReader::connect(const HostAndPort& host) {
_conn = shared_ptr<DBClientConnection>(
new DBClientConnection(false, durationCount<Seconds>(kSocketTimeout)));
string errmsg;
- if (!_conn->connect(host, errmsg) ||
- (getGlobalAuthorizationManager()->isAuthEnabled() && !replAuthenticate(_conn.get()))) {
+ if (!_conn->connect(host, errmsg) || !replAuthenticate(_conn.get())) {
resetConnection();
error() << errmsg << endl;
return false;
diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h
index 9407d40a5b5..2e0e4acc180 100644
--- a/src/mongo/db/server_options.h
+++ b/src/mongo/db/server_options.h
@@ -110,6 +110,7 @@ struct ServerGlobalParams {
AuthState authState = AuthState::kUndefined;
+ bool tryClusterAuth = false; // --tryClusterAuth, mixed mode for rolling auth upgrade
AtomicInt32 clusterAuthMode; // --clusterAuthMode, the internal cluster auth mode
enum ClusterAuthModes {
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 0b93c4bd3f8..98fa47f8400 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -276,6 +276,7 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
.setSources(moe::SourceAllLegacy)
.incompatibleWith("auth")
.incompatibleWith("keyFile")
+ .incompatibleWith("tryClusterAuth")
.incompatibleWith("clusterAuthMode");
options->addOptionChaining(
@@ -295,6 +296,16 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
.setSources(moe::SourceYAMLConfig);
options->addOptionChaining(
+ "security.tryClusterAuth",
+ "tryClusterAuth",
+ moe::Switch,
+ "For rolling access control upgrade. Attempt to authenticate over outgoing "
+ "connections and proceed regardless of success. Accept incoming connections "
+ "with or without authentication.")
+ .setSources(moe::SourceAllLegacy)
+ .incompatibleWith("noauth");
+
+ options->addOptionChaining(
"security.clusterAuthMode",
"clusterAuthMode",
moe::String,
@@ -787,6 +798,10 @@ Status storeServerOptions(const moe::Environment& params, const std::vector<std:
serverGlobalParams.isHttpInterfaceEnabled = params["net.http.enabled"].as<bool>();
}
+ if (params.count("security.tryClusterAuth")) {
+ serverGlobalParams.tryClusterAuth = params["security.tryClusterAuth"].as<bool>();
+ }
+
if (params.count("security.clusterAuthMode")) {
std::string clusterAuthMode = params["security.clusterAuthMode"].as<std::string>();
@@ -952,8 +967,9 @@ Status storeServerOptions(const moe::Environment& params, const std::vector<std:
boost::filesystem::absolute(params["security.keyFile"].as<string>()).generic_string();
}
- if (params.count("security.authorization") &&
- params["security.authorization"].as<std::string>() == "disabled") {
+ if (serverGlobalParams.tryClusterAuth ||
+ (params.count("security.authorization") &&
+ params["security.authorization"].as<std::string>() == "disabled")) {
serverGlobalParams.authState = ServerGlobalParams::AuthState::kDisabled;
} else if (params.count("security.authorization") &&
params["security.authorization"].as<std::string>() == "enabled") {
@@ -994,10 +1010,17 @@ Status storeServerOptions(const moe::Environment& params, const std::vector<std:
}
}
}
+
if (!params.count("security.clusterAuthMode") && params.count("security.keyFile")) {
serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_keyFile);
}
-
+ int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
+ if (serverGlobalParams.tryClusterAuth &&
+ (clusterAuthMode != ServerGlobalParams::ClusterAuthMode_keyFile &&
+ clusterAuthMode != ServerGlobalParams::ClusterAuthMode_x509)) {
+ return Status(ErrorCodes::BadValue,
+ "--tryClusterAuth must be used with keyFile or x509 authentication");
+ }
#ifdef MONGO_CONFIG_SSL
ret = storeSSLServerOptions(params);
if (!ret.isOK()) {
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index 270bcd4320a..b838c50e10c 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -60,7 +60,7 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
// Authenticate as the first thing we do
// NOTE: Replica set authentication allows authentication against *any* online host
- if (getGlobalAuthorizationManager()->isAuthEnabled()) {
+ if (isInternalAuthSet()) {
LOG(2) << "calling onCreate auth for " << conn->toString();
bool result = conn->authenticateInternalUser();
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index dd476258300..354d6498f45 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -1357,30 +1357,31 @@ var ShardingTest = function(params) {
}
}
- if (!otherParams.manualAddShard) {
- this._shardNames = [];
-
- var testName = this._testName;
- var admin = this.admin;
- var shardNames = this._shardNames;
-
- this._connections.forEach(function(z) {
- var n = z.name;
- if (!n) {
- n = z.host;
- if (!n) {
- n = z;
- }
- }
+ try {
+ if (!otherParams.manualAddShard) {
+ this._shardNames = [];
- print("ShardingTest " + testName + " going to add shard : " + n);
+ var testName = this._testName;
+ var admin = this.admin;
+ var shardNames = this._shardNames;
- var result = admin.runCommand({addshard: n});
- assert.commandWorked(result, "Failed to add shard " + n);
+ this._connections.forEach(function(z) {
+ var n = z.name || z.host || z;
- shardNames.push(result.shardAdded);
- z.shardName = result.shardAdded;
- });
+ print("ShardingTest " + testName + " going to add shard : " + n);
+
+ var result = admin.runCommand({addshard: n});
+ assert.commandWorked(result, "Failed to add shard " + n);
+
+ shardNames.push(result.shardAdded);
+ z.shardName = result.shardAdded;
+ });
+ }
+ } catch (e) {
+ // Clean up the running procceses on failure
+ print("Failed to add shards, stopping cluster.");
+ this.stop();
+ throw e;
}
if (jsTestOptions().keyFile) {