diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-05-31 22:22:31 +0300 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-06-02 13:39:33 +0300 |
commit | d900c3b855e66a337ebe6c6f46ab5656d79e1db9 (patch) | |
tree | c15d3cfd1857b49dfe2ab3dce6d01aa477fd4949 /src | |
parent | 2ec8d1e5c03e2000c4cc4eac1d961bbc7b817ab4 (diff) | |
download | mongo-d900c3b855e66a337ebe6c6f46ab5656d79e1db9.tar.gz |
SERVER-23733 Remove the mongos chunkSize startup option
This change removes the chunkSize startup option from mongos in place of
the 'chunksize' setting.
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod.cpp | 11 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod.h | 4 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state_test.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_configuration.cpp | 21 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_configuration.h | 13 | ||||
-rw-r--r-- | src/mongo/s/balancer/balancer_configuration_test.cpp | 10 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/mongos_options.cpp | 15 | ||||
-rw-r--r-- | src/mongo/s/mongos_options.h | 4 | ||||
-rw-r--r-- | src/mongo/s/server.cpp | 10 | ||||
-rw-r--r-- | src/mongo/s/sharding_initialization.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/sharding_initialization.h | 1 | ||||
-rw-r--r-- | src/mongo/s/sharding_test_fixture.cpp | 15 | ||||
-rw-r--r-- | src/mongo/shell/shardingtest.js | 53 | ||||
-rw-r--r-- | src/mongo/shell/utils_auth.js | 4 | ||||
-rw-r--r-- | src/mongo/shell/utils_sh.js | 85 |
17 files changed, 103 insertions, 154 deletions
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 08bbce81b8c..4993e118b80 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -35,7 +35,6 @@ #include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory_impl.h" -#include "mongo/s/balancer/balancer_configuration.h" #include "mongo/s/client/shard_factory.h" #include "mongo/s/client/shard_local.h" #include "mongo/s/client/shard_remote.h" @@ -75,9 +74,9 @@ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS) auto shardFactory = stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory)); - return initializeGlobalShardingState( - configCS, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, std::move(shardFactory), []() { - return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>(); - }); -} + return initializeGlobalShardingState(configCS, std::move(shardFactory), []() { + return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>(); + }); } + +} // namespace mongo diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h index 38309c53cfb..34c515ffcbd 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.h +++ b/src/mongo/db/s/sharding_initialization_mongod.h @@ -31,6 +31,7 @@ #include "mongo/base/status.h" namespace mongo { + class ConnectionString; /** @@ -40,4 +41,5 @@ class ConnectionString; * NOTE: This does not initialize ShardingState, which should only be done for shard servers. */ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS); -} + +} // namespace mongo diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp index 94b26b1f0b7..ecd4fe0af3b 100644 --- a/src/mongo/db/s/sharding_state_test.cpp +++ b/src/mongo/db/s/sharding_state_test.cpp @@ -113,7 +113,7 @@ void initGrid(OperationContext* txn, const ConnectionString& configConnString) { stdx::make_unique<CatalogCache>(), std::move(shardRegistry), stdx::make_unique<ClusterCursorManager>(txn->getServiceContext()->getPreciseClockSource()), - stdx::make_unique<BalancerConfiguration>(ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes), + stdx::make_unique<BalancerConfiguration>(), std::move(executorPool), mockNetwork); } diff --git a/src/mongo/s/balancer/balancer_configuration.cpp b/src/mongo/s/balancer/balancer_configuration.cpp index 8c96ffa03f8..5381b73ae24 100644 --- a/src/mongo/s/balancer/balancer_configuration.cpp +++ b/src/mongo/s/balancer/balancer_configuration.cpp @@ -56,13 +56,9 @@ const char BalancerSettingsType::kKey[] = "balancer"; const char ChunkSizeSettingsType::kKey[] = "chunksize"; const uint64_t ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes{64 * 1024 * 1024}; -BalancerConfiguration::BalancerConfiguration(uint64_t defaultMaxChunkSizeBytes) +BalancerConfiguration::BalancerConfiguration() : _balancerSettings(BalancerSettingsType::createDefault()), - _defaultMaxChunkSizeBytes(defaultMaxChunkSizeBytes) { - // The default must always be created with the max chunk size value prevalidated - invariant(ChunkSizeSettingsType::checkMaxChunkSizeValid(_defaultMaxChunkSizeBytes)); - _maxChunkSizeBytes.store(_defaultMaxChunkSizeBytes); -} + _maxChunkSizeBytes(ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes) {} BalancerConfiguration::~BalancerConfiguration() = default; @@ -128,8 +124,7 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) { } Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) { - ChunkSizeSettingsType settings = - ChunkSizeSettingsType::createDefault(_defaultMaxChunkSizeBytes); + ChunkSizeSettingsType settings = ChunkSizeSettingsType::createDefault(); auto settingsObjStatus = grid.catalogManager(txn)->getGlobalSettings(txn, ChunkSizeSettingsType::kKey); @@ -266,14 +261,8 @@ bool BalancerSettingsType::isTimeInBalancingWindow(const boost::posix_time::ptim ChunkSizeSettingsType::ChunkSizeSettingsType() = default; -ChunkSizeSettingsType ChunkSizeSettingsType::createDefault(int maxChunkSizeBytes) { - // The default must always be created with the max chunk size value prevalidated - invariant(ChunkSizeSettingsType::checkMaxChunkSizeValid(maxChunkSizeBytes)); - - ChunkSizeSettingsType settings; - settings._maxChunkSizeBytes = maxChunkSizeBytes; - - return settings; +ChunkSizeSettingsType ChunkSizeSettingsType::createDefault() { + return ChunkSizeSettingsType(); } StatusWith<ChunkSizeSettingsType> ChunkSizeSettingsType::fromBSON(const BSONObj& obj) { diff --git a/src/mongo/s/balancer/balancer_configuration.h b/src/mongo/s/balancer/balancer_configuration.h index 62ac468fbdd..767966eeb50 100644 --- a/src/mongo/s/balancer/balancer_configuration.h +++ b/src/mongo/s/balancer/balancer_configuration.h @@ -120,7 +120,7 @@ public: // The key under which this setting is stored on the config server static const char kKey[]; - // Default value used for the max chunk size if one is not specified in the balancer + // Default value to use for the max chunk size if one is not specified in the balancer // configuration static const uint64_t kDefaultMaxChunkSizeBytes; @@ -128,7 +128,7 @@ public: * Constructs a settings object with the default values. To be used when no chunk size settings * have been specified. */ - static ChunkSizeSettingsType createDefault(int maxChunkSizeBytes); + static ChunkSizeSettingsType createDefault(); /** * Interprets the BSON content as chunk size settings and extracts the respective values. @@ -160,11 +160,8 @@ public: /** * Primes the balancer configuration with some default values. The effective settings may change * at a later time after a call to refresh(). - * - * defaultMaxChunkSizeBytes indicates the value to be use for the MaxChunkSize parameter if one - * has not been specified in config.settings. This parameter must have been pre-validated. */ - BalancerConfiguration(uint64_t defaultMaxChunkSizeBytes); + BalancerConfiguration(); ~BalancerConfiguration(); /** @@ -219,10 +216,6 @@ private: mutable stdx::mutex _balancerSettingsMutex; BalancerSettingsType _balancerSettings; - // Default value for use for the max chunk size if the setting is not present in the balancer - // configuration - const uint64_t _defaultMaxChunkSizeBytes; - // Max chunk size after which a chunk would be considered jumbo and won't be moved. This value // is read on the critical path after each write operation, that's why it is cached. AtomicUInt64 _maxChunkSizeBytes; diff --git a/src/mongo/s/balancer/balancer_configuration_test.cpp b/src/mongo/s/balancer/balancer_configuration_test.cpp index e31e76499c8..5119b5180b7 100644 --- a/src/mongo/s/balancer/balancer_configuration_test.cpp +++ b/src/mongo/s/balancer/balancer_configuration_test.cpp @@ -95,7 +95,7 @@ protected: TEST_F(BalancerConfigurationTestFixture, NoConfigurationDocuments) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); - BalancerConfiguration config(1024 * 1024ULL); + BalancerConfiguration config; auto future = launchAsync([&] { ASSERT_OK(config.refreshAndCheck(operationContext())); }); @@ -107,13 +107,13 @@ TEST_F(BalancerConfigurationTestFixture, NoConfigurationDocuments) { ASSERT(config.isBalancerActive()); ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, config.getSecondaryThrottle().getSecondaryThrottle()); - ASSERT_EQ(1024 * 1024ULL, config.getMaxChunkSizeBytes()); + ASSERT_EQ(64 * 1024 * 1024ULL, config.getMaxChunkSizeBytes()); } TEST_F(BalancerConfigurationTestFixture, ChunkSizeSettingsDocumentOnly) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); - BalancerConfiguration config(1024 * 1024ULL); + BalancerConfiguration config; auto future = launchAsync([&] { ASSERT_OK(config.refreshAndCheck(operationContext())); }); @@ -131,7 +131,7 @@ TEST_F(BalancerConfigurationTestFixture, ChunkSizeSettingsDocumentOnly) { TEST_F(BalancerConfigurationTestFixture, BalancerSettingsDocumentOnly) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); - BalancerConfiguration config(1024 * 1024ULL); + BalancerConfiguration config; auto future = launchAsync([&] { ASSERT_OK(config.refreshAndCheck(operationContext())); }); @@ -144,7 +144,7 @@ TEST_F(BalancerConfigurationTestFixture, BalancerSettingsDocumentOnly) { ASSERT(!config.isBalancerActive()); ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, config.getSecondaryThrottle().getSecondaryThrottle()); - ASSERT_EQ(1024 * 1024ULL, config.getMaxChunkSizeBytes()); + ASSERT_EQ(64 * 1024 * 1024ULL, config.getMaxChunkSizeBytes()); } TEST(BalancerSettingsType, BalancerDisabled) { diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp index 901f12798ea..a87244bf615 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp @@ -42,7 +42,6 @@ #include "mongo/executor/task_executor.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/server_selection_metadata.h" -#include "mongo/s/balancer/balancer_configuration.h" #include "mongo/s/catalog/dist_lock_manager_mock.h" #include "mongo/s/catalog/replset/catalog_manager_replica_set.h" #include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h" @@ -684,7 +683,7 @@ TEST_F(ShardCollectionTest, withInitialData) { ASSERT_EQUALS(keyPattern.toBSON(), request.cmdObj["keyPattern"].Obj()); ASSERT_EQUALS(keyPattern.getKeyPattern().globalMin(), request.cmdObj["min"].Obj()); ASSERT_EQUALS(keyPattern.getKeyPattern().globalMax(), request.cmdObj["max"].Obj()); - ASSERT_EQUALS(ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + ASSERT_EQUALS(64 * 1024 * 1024ULL, static_cast<uint64_t>(request.cmdObj["maxChunkSizeBytes"].numberLong())); ASSERT_EQUALS(0, request.cmdObj["maxSplitPoints"].numberLong()); ASSERT_EQUALS(0, request.cmdObj["maxChunkObjects"].numberLong()); diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp index 1b16e1f35da..493ca3791ec 100644 --- a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp +++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp @@ -167,8 +167,7 @@ protected: nullptr, std::move(shardRegistry), nullptr, - stdx::make_unique<BalancerConfiguration>( - ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes), + stdx::make_unique<BalancerConfiguration>(), nullptr, nullptr); diff --git a/src/mongo/s/mongos_options.cpp b/src/mongo/s/mongos_options.cpp index e01b19e2d5e..bbf38d23bb7 100644 --- a/src/mongo/s/mongos_options.cpp +++ b/src/mongo/s/mongos_options.cpp @@ -206,21 +206,6 @@ Status storeMongosOptions(const moe::Environment& params, const std::vector<std: return ret; } - if (params.count("sharding.chunkSize")) { - const int maxChunkSizeMB = params["sharding.chunkSize"].as<int>(); - if (maxChunkSizeMB <= 0) { - return Status(ErrorCodes::BadValue, "error: need a positive chunksize"); - } - - const uint64_t maxChunkSizeBytes = maxChunkSizeMB * 1024 * 1024; - - if (!ChunkSizeSettingsType::checkMaxChunkSizeValid(maxChunkSizeBytes)) { - return Status(ErrorCodes::BadValue, "MaxChunkSize invalid"); - } - - mongosGlobalParams.maxChunkSizeBytes = maxChunkSizeBytes; - } - if (params.count("net.port")) { int port = params["net.port"].as<int>(); if (port <= 0 || port > 65535) { diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h index b14c688e6a9..732bcbda9b1 100644 --- a/src/mongo/s/mongos_options.h +++ b/src/mongo/s/mongos_options.h @@ -31,7 +31,6 @@ #include "mongo/base/status.h" #include "mongo/client/connection_string.h" #include "mongo/db/server_options.h" -#include "mongo/s/balancer/balancer_configuration.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" @@ -48,9 +47,6 @@ struct MongosGlobalParams { // The config server connection string ConnectionString configdbs; - // The max chunk size after which a chunk will be considered jumbo - uint64_t maxChunkSizeBytes{ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes}; - // Whether auto-splitting is enabled bool shouldAutoSplit{true}; }; diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp index 8dbacbdc624..3b477ebc053 100644 --- a/src/mongo/s/server.cpp +++ b/src/mongo/s/server.cpp @@ -61,6 +61,7 @@ #include "mongo/executor/task_executor_pool.h" #include "mongo/platform/process_id.h" #include "mongo/s/balancer/balancer.h" +#include "mongo/s/balancer/balancer_configuration.h" #include "mongo/s/catalog/catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_lockpings.h" @@ -299,11 +300,10 @@ static Status initializeSharding(OperationContext* txn) { auto shardFactory = stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory)); - Status status = initializeGlobalShardingState( - mongosGlobalParams.configdbs, - mongosGlobalParams.maxChunkSizeBytes, - std::move(shardFactory), - []() { return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(); }); + Status status = + initializeGlobalShardingState(mongosGlobalParams.configdbs, std::move(shardFactory), []() { + return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongos>(); + }); if (!status.isOK()) { return status; diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp index 1ed199e497b..4e6ff7f6688 100644 --- a/src/mongo/s/sharding_initialization.cpp +++ b/src/mongo/s/sharding_initialization.cpp @@ -129,7 +129,6 @@ std::unique_ptr<TaskExecutorPool> makeTaskExecutorPool( } // namespace Status initializeGlobalShardingState(const ConnectionString& configCS, - uint64_t maxChunkSizeBytes, std::unique_ptr<ShardFactory> shardFactory, rpc::ShardingEgressMetadataHookBuilder hookBuilder) { if (configCS.type() == ConnectionString::INVALID) { @@ -156,7 +155,7 @@ Status initializeGlobalShardingState(const ConnectionString& configCS, stdx::make_unique<CatalogCache>(), std::move(shardRegistry), stdx::make_unique<ClusterCursorManager>(getGlobalServiceContext()->getPreciseClockSource()), - stdx::make_unique<BalancerConfiguration>(maxChunkSizeBytes), + stdx::make_unique<BalancerConfiguration>(), std::move(executorPool), networkPtr); diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h index d31aabf261d..962ff9c1789 100644 --- a/src/mongo/s/sharding_initialization.h +++ b/src/mongo/s/sharding_initialization.h @@ -51,7 +51,6 @@ using ShardingEgressMetadataHookBuilder = * CatalogManager, ShardingRegistry, and grid objects. */ Status initializeGlobalShardingState(const ConnectionString& configCS, - uint64_t maxChunkSizeBytes, std::unique_ptr<ShardFactory> shardFactory, rpc::ShardingEgressMetadataHookBuilder hookBuilder); diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp index ef4916f0885..3c6660fcc61 100644 --- a/src/mongo/s/sharding_test_fixture.cpp +++ b/src/mongo/s/sharding_test_fixture.cpp @@ -169,14 +169,13 @@ void ShardingTestFixture::setUp() { // For now initialize the global grid object. All sharding objects will be accessible from there // until we get rid of it. - grid.init( - std::move(cm), - stdx::make_unique<CatalogCache>(), - std::move(shardRegistry), - stdx::make_unique<ClusterCursorManager>(_service->getPreciseClockSource()), - stdx::make_unique<BalancerConfiguration>(ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes), - std::move(executorPool), - _mockNetwork); + grid.init(std::move(cm), + stdx::make_unique<CatalogCache>(), + std::move(shardRegistry), + stdx::make_unique<ClusterCursorManager>(_service->getPreciseClockSource()), + stdx::make_unique<BalancerConfiguration>(), + std::move(executorPool), + _mockNetwork); } void ShardingTestFixture::tearDown() { diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index be105364261..824ff879007 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -1012,10 +1012,6 @@ var ShardingTest = function(params) { otherParams.useBridge = otherParams.useBridge || false; otherParams.bridgeOptions = otherParams.bridgeOptions || {}; - if (otherParams.chunkSize && numMongos === 0) { - throw Error('Cannot set chunk size without any running mongos instances'); - } - var keyFile = otherParams.keyFile || otherParams.extraOptions.keyFile; var hostName = getHostName(); @@ -1209,7 +1205,7 @@ var ShardingTest = function(params) { rstOptions.nodes = nodeOptions; - // Start the config server + // Start the config server's replica set this.configRS = new ReplSetTest(rstOptions); this.configRS.startSet(startOptions); @@ -1220,7 +1216,23 @@ var ShardingTest = function(params) { this.configRS.initiate(config, null, initiateTimeout); // Wait for master to be elected before starting mongos - this.configRS.getPrimary(); + var csrsPrimary = this.configRS.getPrimary(); + + // If chunkSize has been requested for this test, write the configuration + if (otherParams.chunkSize) { + function setChunkSize() { + assert.writeOK(csrsPrimary.getDB('config').settings.update( + {_id: 'chunksize'}, + {$set: {value: otherParams.chunkSize}}, + {upsert: true, writeConcern: {w: 'majority', wtimeout: 30000}})); + } + + if (keyFile) { + authutil.asCluster(csrsPrimary, keyFile, setChunkSize); + } else { + setChunkSize(); + } + } this._configDB = this.configRS.getURL(); this._configServers = this.configRS.nodes; @@ -1230,7 +1242,7 @@ var ShardingTest = function(params) { this["c" + i] = conn; } - printjson("config servers: " + this._configDB); + printjson('Config servers: ' + this._configDB); var configConnection = _connectWithRetry(this._configDB); @@ -1249,10 +1261,6 @@ var ShardingTest = function(params) { keyFile: keyFile, }; - if (otherParams.chunkSize) { - options.chunkSize = otherParams.chunkSize; - } - if (otherParams.mongosOptions && otherParams.mongosOptions.binVersion) { otherParams.mongosOptions.binVersion = MongoRunner.versionIterator(otherParams.mongosOptions.binVersion); @@ -1301,25 +1309,18 @@ var ShardingTest = function(params) { // If auth is enabled for the test, login the mongos connections as system in order to // configure the instances and then log them out again. - if (keyFile) { - authutil.assertAuthenticate(this._mongos, 'admin', { - user: '__system', - mechanism: 'MONGODB-CR', - pwd: cat(keyFile).replace(/[\011-\015\040]/g, '') - }); - } - try { + function configureCluster() { // Disable the balancer unless it is explicitly turned on if (!otherParams.enableBalancer) { - this.stopBalancer(); + self.stopBalancer(); } // Lower the mongos replica set monitor's threshold for deeming RS shard hosts as // inaccessible in order to speed up tests, which shutdown entire shards and check for // errors. This attempt is best-effort and failure should not have effect on the actual // test execution, just the execution time. - this._mongos.forEach(function(mongos) { + self._mongos.forEach(function(mongos) { var res = mongos.adminCommand({setParameter: 1, replMonitorMaxFailedChecks: 2}); // For tests, which use x509 certificate for authentication, the command above will not @@ -1328,10 +1329,12 @@ var ShardingTest = function(params) { assert.commandWorked(res); } }); - } finally { - if (keyFile) { - authutil.logout(this._mongos, 'admin'); - } + } + + if (keyFile) { + authutil.asCluster(this._mongos, keyFile, configureCluster); + } else { + configureCluster(); } try { diff --git a/src/mongo/shell/utils_auth.js b/src/mongo/shell/utils_auth.js index 6fd913c963a..b105164ea50 100644 --- a/src/mongo/shell/utils_auth.js +++ b/src/mongo/shell/utils_auth.js @@ -67,7 +67,7 @@ var authutil; */ authutil.asCluster = function(conn, keyfile, action) { var ex; - authutil.assertAuthenticate(conn, 'local', { + authutil.assertAuthenticate(conn, 'admin', { user: '__system', mechanism: 'SCRAM-SHA-1', pwd: cat(keyfile).replace(/[\011-\015\040]/g, '') @@ -77,7 +77,7 @@ var authutil; return action(); } finally { try { - authutil.logout(conn, 'local'); + authutil.logout(conn, 'admin'); } catch (ex) { } } diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js index 41ee0e066b0..410bdd53051 100644 --- a/src/mongo/shell/utils_sh.js +++ b/src/mongo/shell/utils_sh.js @@ -236,61 +236,48 @@ sh.waitForPingChange = function(activePings, timeout, interval) { return remainingPings; }; -sh.waitForBalancerOff = function(timeout, interval) { - var pings = sh._getConfigDB().mongos.find().toArray(); - var activePings = []; - for (var i = 0; i < pings.length; i++) { - if (!pings[i].waiting) - activePings.push(pings[i]); - } - - print("Waiting for active hosts..."); - - activePings = sh.waitForPingChange(activePings, 60 * 1000); - - // After 1min, we assume that all hosts with unchanged pings are either - // offline (this is enough time for a full errored balance round, if a network - // issue, which would reload settings) or balancing, which we wait for next - // Legacy hosts we always have to wait for - - print("Waiting for the balancer lock..."); - - // Wait for the balancer lock to become inactive - // We can guess this is stale after 15 mins, but need to double-check manually - try { - sh.waitForDLock("balancer", false, 15 * 60 * 1000); - } catch (e) { - print( - "Balancer still may be active, you must manually verify this is not the case using the config.changelog collection."); - throw Error(e); - } - - print("Waiting again for active hosts after balancer is off..."); - - // Wait a short time afterwards, to catch the host which was balancing earlier - activePings = sh.waitForPingChange(activePings, 5 * 1000); - - // Warn about all the stale host pings remaining - for (var i = 0; i < activePings.length; i++) { - print("Warning : host " + activePings[i]._id + " seems to have been offline since " + - activePings[i].ping); - } - -}; - sh.waitForBalancer = function(onOrNot, timeout, interval) { - - // If we're waiting for the balancer to turn on or switch state or - // go to a particular state + // If we're waiting for the balancer to turn on or switch state or go to a particular state if (onOrNot) { - // Just wait for the balancer lock to change, can't ensure we'll ever see it - // actually locked + // Just wait for the balancer lock to change, can't ensure we'll ever see it actually locked sh.waitForDLock("balancer", undefined, timeout, interval); } else { // Otherwise we need to wait until we're sure balancing stops - sh.waitForBalancerOff(timeout, interval); - } + var activePings = []; + sh._getConfigDB().mongos.find().forEach(function(ping) { + if (!ping.waiting) + activePings.push(ping); + }); + print("Waiting for active hosts..."); + activePings = sh.waitForPingChange(activePings, 60 * 1000); + + // After 1min, we assume that all hosts with unchanged pings are either offline (this is + // enough time for a full errored balance round, if a network issue, which would reload + // settings) or balancing, which we wait for next. Legacy hosts we always have to wait for. + print("Waiting for the balancer lock..."); + + // Wait for the balancer lock to become inactive. We can guess this is stale after 15 mins, + // but need to double-check manually. + try { + sh.waitForDLock("balancer", false, 15 * 60 * 1000); + } catch (e) { + print( + "Balancer still may be active, you must manually verify this is not the case using the config.changelog collection."); + throw Error(e); + } + + print("Waiting again for active hosts after balancer is off..."); + + // Wait a short time afterwards, to catch the host which was balancing earlier + activePings = sh.waitForPingChange(activePings, 5 * 1000); + + // Warn about all the stale host pings remaining + activePings.forEach(function(activePing) { + print("Warning : host " + activePing._id + " seems to have been offline since " + + activePing.ping); + }); + } }; sh.disableBalancing = function(coll) { |