diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-11-19 16:58:45 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2015-11-19 20:20:14 -0500 |
commit | 5b3257d526f8217e303609418da1769275f81d03 (patch) | |
tree | 23df1988812742763033c90d8c80b7523cced557 | |
parent | bbff16c4b196133718b3d3f5cf7ce2095cc6d2b9 (diff) | |
download | mongo-5b3257d526f8217e303609418da1769275f81d03.tar.gz |
SERVER-21527 Cleanup usages of getShard for resolving host to shard
-rw-r--r-- | jstests/sharding/sharded_limit_batchsize.js | 7 | ||||
-rw-r--r-- | jstests/sharding/zero_shard_version.js | 6 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/client/shard_registry.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/client/shard_registry.h | 4 | ||||
-rw-r--r-- | src/mongo/s/client/sharding_network_connection_hook.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_move_primary_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/config.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/config.h | 2 | ||||
-rw-r--r-- | src/mongo/s/sharding_initialization.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/version_manager.cpp | 59 |
11 files changed, 47 insertions, 51 deletions
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js index 051038c4608..57303ed530a 100644 --- a/jstests/sharding/sharded_limit_batchsize.js +++ b/jstests/sharding/sharded_limit_batchsize.js @@ -1,6 +1,8 @@ // Tests for sharded limit + batchSize. Make sure that various combinations // of limit and batchSize with sort return the correct results, and do not issue // unnecessary getmores (see SERVER-14299). +(function() { +'use strict'; /** * Test the correctness of queries with sort and batchSize on a sharded cluster, @@ -49,7 +51,6 @@ var st = new ShardingTest({ shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}} }); -st.stopBalancer(); var db = st.s.getDB("test"); var shardedCol = db.getCollection("sharded_limit_batchsize"); @@ -59,7 +60,7 @@ unshardedCol.drop(); // Enable sharding and pre-split the sharded collection. assert.commandWorked(db.adminCommand({enableSharding: db.getName()})); -db.adminCommand({movePrimary: db.getName(), to: "shard0000"}); +st.ensurePrimaryShard(db.getName(), "shard0000"); db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}}); assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}})); assert.commandWorked(db.adminCommand({moveChunk: shardedCol.getFullName(), @@ -114,3 +115,5 @@ jsTest.log("Running limit tests against non-sharded collection."); testLimit(unshardedCol, st.shard0); st.stop(); + +})(); diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js index 9ee42fe1ae3..8bfd871450f 100644 --- a/jstests/sharding/zero_shard_version.js +++ b/jstests/sharding/zero_shard_version.js @@ -2,13 +2,14 @@ * Tests the setShardVersion logic on the this shard side, specifically when comparing * against a major version of zero or incompatible epochs. */ +(function() { +'use strict'; var st = new ShardingTest({ shards: 2, mongos: 4 }); -st.stopBalancer(); var testDB_s0 = st.s.getDB('test'); testDB_s0.adminCommand({ enableSharding: 'test' }); -testDB_s0.adminCommand({ movePrimary: 'test', to: 'shard0001' }); +st.ensurePrimaryShard('test', 'shard0001'); testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}); var checkShardMajorVersion = function(conn, expectedVersion) { @@ -176,3 +177,4 @@ checkShardMajorVersion(st.d1, 0); st.stop(); +})(); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp index 7a16bb7c7a8..58cf4db7f62 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp @@ -341,7 +341,7 @@ TEST_F(DropColl2ShardTest, FirstShardDropCmdError) { TEST_F(DropColl2ShardTest, SecondShardTargeterError) { auto shard2Targeter = RemoteCommandTargeterMock::get( - shardRegistry()->getShard(operationContext(), shard2().getHost())->getTargeter()); + shardRegistry()->getShard(operationContext(), shard2().getName())->getTargeter()); shard2Targeter->setFindHostReturnValue({ErrorCodes::HostUnreachable, "bad test network"}); auto future = launchAsync([this] { diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index e875f01c6fb..8b9602205c2 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -233,7 +233,7 @@ shared_ptr<Shard> ShardRegistry::getShardNoReload(const ShardId& shardId) { return _findUsingLookUp(shardId); } -shared_ptr<Shard> ShardRegistry::getShardNoReload(const HostAndPort& host) { +shared_ptr<Shard> ShardRegistry::getShardForHostNoReload(const HostAndPort& host) { stdx::lock_guard<stdx::mutex> lk(_mutex); return mapFindWithDefault(_hostLookup, host); } diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h index bf9fadee9b0..e219cd26c7b 100644 --- a/src/mongo/s/client/shard_registry.h +++ b/src/mongo/s/client/shard_registry.h @@ -174,9 +174,9 @@ public: /** * Finds the Shard that the mongod listening at this HostAndPort is a member of. Will not - * refresh the shard registry of otherwise perform any network traffic. + * refresh the shard registry or otherwise perform any network traffic. */ - std::shared_ptr<Shard> getShardNoReload(const HostAndPort& shardHost); + std::shared_ptr<Shard> getShardForHostNoReload(const HostAndPort& shardHost); /** * Returns shared pointer to the shard object representing the config servers. diff --git a/src/mongo/s/client/sharding_network_connection_hook.cpp b/src/mongo/s/client/sharding_network_connection_hook.cpp index 09e02572f6d..6652f6a19e1 100644 --- a/src/mongo/s/client/sharding_network_connection_hook.cpp +++ b/src/mongo/s/client/sharding_network_connection_hook.cpp @@ -50,7 +50,7 @@ Status ShardingNetworkConnectionHook::validateHost( Status ShardingNetworkConnectionHook::validateHostImpl( const HostAndPort& remoteHost, const executor::RemoteCommandResponse& isMasterReply) { - auto shard = grid.shardRegistry()->getShardNoReload(remoteHost); + auto shard = grid.shardRegistry()->getShardForHostNoReload(remoteHost); if (!shard) { return {ErrorCodes::ShardNotFound, str::stream() << "No shard found for host: " << remoteHost.toString()}; @@ -106,7 +106,7 @@ Status ShardingNetworkConnectionHook::validateHostImpl( StatusWith<boost::optional<executor::RemoteCommandRequest>> ShardingNetworkConnectionHook::makeRequest(const HostAndPort& remoteHost) { - auto shard = grid.shardRegistry()->getShardNoReload(remoteHost); + auto shard = grid.shardRegistry()->getShardForHostNoReload(remoteHost); if (!shard) { return {ErrorCodes::ShardNotFound, str::stream() << "No shard found for host: " << remoteHost.toString()}; diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp index 86a2f090203..145c982de48 100644 --- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp @@ -188,7 +188,7 @@ public: ScopedDbConnection fromconn(fromShard->getConnString()); - config->setPrimary(txn, toShard->getConnString().toString()); + config->setPrimary(txn, toShard->getId()); config->reload(txn); if (shardedColls.empty()) { diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp index c4ac9e12fcc..4566aeb0db8 100644 --- a/src/mongo/s/config.cpp +++ b/src/mongo/s/config.cpp @@ -428,11 +428,9 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn, return ci.getCM(); } -void DBConfig::setPrimary(OperationContext* txn, const std::string& s) { - const auto shard = grid.shardRegistry()->getShard(txn, s); - +void DBConfig::setPrimary(OperationContext* txn, const ShardId& newPrimaryId) { stdx::lock_guard<stdx::mutex> lk(_lock); - _primaryId = shard->getId(); + _primaryId = newPrimaryId; _save(txn); } diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h index 68a2ca18ae8..4eb80b3001d 100644 --- a/src/mongo/s/config.h +++ b/src/mongo/s/config.h @@ -164,7 +164,7 @@ public: */ const ShardId& getShardId(OperationContext* txn, const std::string& ns); - void setPrimary(OperationContext* txn, const std::string& s); + void setPrimary(OperationContext* txn, const ShardId& newPrimaryId); /** * Returns true if it is successful at loading the DBConfig, false if the database is not found, diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp index 4de5ecf99e6..7d5da250958 100644 --- a/src/mongo/s/sharding_initialization.cpp +++ b/src/mongo/s/sharding_initialization.cpp @@ -76,7 +76,7 @@ public: audit::writeImpersonatedUsersToMetadata(metadataBob); // Add config server optime to metadata sent to shards. - auto shard = grid.shardRegistry()->getShardNoReload(target); + auto shard = grid.shardRegistry()->getShardForHostNoReload(target); if (!shard) { return Status(ErrorCodes::ShardNotFound, str::stream() << "Shard not found for server: " << target.toString()); @@ -97,7 +97,7 @@ public: try { saveGLEStats(metadataObj, replySource.toString()); - auto shard = grid.shardRegistry()->getShardNoReload(replySource); + auto shard = grid.shardRegistry()->getShardForHostNoReload(replySource); if (!shard) { return Status::OK(); } diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp index bac9d93a209..4fb45bdbccb 100644 --- a/src/mongo/s/version_manager.cpp +++ b/src/mongo/s/version_manager.cpp @@ -109,7 +109,7 @@ private: * Sends the setShardVersion command on the specified connection. */ bool setShardVersion(OperationContext* txn, - DBClientBase& conn, + DBClientBase* conn, const string& ns, const ConnectionString& configServer, ChunkVersion version, @@ -119,7 +119,12 @@ bool setShardVersion(OperationContext* txn, ShardId shardId; ConnectionString shardCS; { - const auto shard = grid.shardRegistry()->getShard(txn, conn.getServerAddress()); + const auto shard = grid.shardRegistry()->getShardForHostNoReload( + uassertStatusOK(HostAndPort::parse(conn->getServerAddress()))); + uassert(ErrorCodes::ShardNotFound, + str::stream() << conn->getServerAddress() << " is not recognized as a shard", + shard); + shardId = shard->getId(); shardCS = shard->getConnString(); } @@ -133,15 +138,14 @@ bool setShardVersion(OperationContext* txn, } else { SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioning( configServer, shardId, shardCS, NamespaceString(ns), version, authoritative); - cmd = ssv.toBSON(); } - LOG(1) << " setShardVersion " << shardId << " " << conn.getServerAddress() << " " << ns + LOG(1) << " setShardVersion " << shardId << " " << conn->getServerAddress() << " " << ns << " " << cmd << (manager ? string(str::stream() << " " << manager->getSequenceNumber()) : ""); - return conn.runCommand("admin", cmd, result, 0); + return conn->runCommand("admin", cmd, result, 0); } /** @@ -185,12 +189,9 @@ DBClientBase* getVersionable(DBClientBase* conn) { * mongos-specific behavior on mongod (auditing and replication information in commands) */ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) { - bool ok; - BSONObj result; - DBClientBase* conn = NULL; try { // May throw if replica set primary is down - conn = getVersionable(conn_in); + DBClientBase* const conn = getVersionable(conn_in); dassert(conn); // errors thrown above // Check to see if we've already initialized this connection. This avoids sending @@ -199,24 +200,20 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) { return false; } - // Check to see if this is actually a shard and not a single config server - // NOTE: Config servers are registered only by the name "config" in the shard cache, not - // by host, so lookup by host will fail unless the host is also a shard. - const auto shard = grid.shardRegistry()->getShard(txn, conn->getServerAddress()); - if (!shard) { - return false; - } + BSONObj result; + const bool ok = setShardVersion(txn, + conn, + "", + grid.shardRegistry()->getConfigServerConnectionString(), + ChunkVersion(), + NULL, + true, + result); - LOG(1) << "initializing shard connection to " << shard->toString(); + LOG(3) << "initial sharding result : " << result; - ok = setShardVersion(txn, - *conn, - "", - grid.shardRegistry()->getConfigServerConnectionString(), - ChunkVersion(), - NULL, - true, - result); + connectionShardStatus.setSequence(conn, "", 0); + return ok; } catch (const DBException&) { // NOTE: Replica sets may fail to initShardVersion because future calls relying on // correct versioning must later call checkShardVersion on the primary. @@ -237,11 +234,6 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) { return false; } - - LOG(3) << "initial sharding result : " << result; - - connectionShardStatus.setSequence(conn, "", 0); - return ok; } /** @@ -279,7 +271,7 @@ bool checkShardVersion(OperationContext* txn, return false; } - DBClientBase* conn = getVersionable(conn_in); + DBClientBase* const conn = getVersionable(conn_in); verify(conn); // errors thrown above shared_ptr<DBConfig> conf = status.getValue(); @@ -303,7 +295,8 @@ bool checkShardVersion(OperationContext* txn, return false; } - const auto shard = grid.shardRegistry()->getShard(txn, conn->getServerAddress()); + const auto shard = grid.shardRegistry()->getShardForHostNoReload( + uassertStatusOK(HostAndPort::parse(conn->getServerAddress()))); uassert(ErrorCodes::ShardNotFound, str::stream() << conn->getServerAddress() << " is not recognized as a shard", shard); @@ -359,7 +352,7 @@ bool checkShardVersion(OperationContext* txn, BSONObj result; if (setShardVersion(txn, - *conn, + conn, ns, grid.shardRegistry()->getConfigServerConnectionString(), version, |