summaryrefslogtreecommitdiff
path: root/src/mongo/s/version_manager.cpp
diff options
context:
space:
mode:
authorGreg Studer <greg@10gen.com>2014-10-02 10:10:48 -0400
committerGreg Studer <greg@10gen.com>2014-10-13 10:11:51 -0400
commit42d276dace045fe888cdb45f00dc0bf97fa13bd5 (patch)
tree6f266b8ac713295cdccd0a0f9015244b177eaa6d /src/mongo/s/version_manager.cpp
parenta14adbe5793af16b70fb1895ab0a7f65c753bfb8 (diff)
downloadmongo-42d276dace045fe888cdb45f00dc0bf97fa13bd5.tar.gz
SERVER-15375 remove initShardVersion
Diffstat (limited to 'src/mongo/s/version_manager.cpp')
-rw-r--r--src/mongo/s/version_manager.cpp167
1 files changed, 60 insertions, 107 deletions
diff --git a/src/mongo/s/version_manager.cpp b/src/mongo/s/version_manager.cpp
index e0be9a67c5e..c0a60e86cd8 100644
--- a/src/mongo/s/version_manager.cpp
+++ b/src/mongo/s/version_manager.cpp
@@ -47,21 +47,37 @@ namespace mongo {
// Global version manager
VersionManager versionManager;
- // when running in sharded mode, use chunk shard version control
+ /**
+ * Tracking information, per-connection, of the latest chunk manager iteration or sequence
+ * number that was used to send a shard version over this connection.
+ * When the chunk manager is replaced, implying new versions were loaded, the chunk manager
+ * sequence number is iterated by 1 and connections need to re-send shard versions.
+ */
struct ConnectionShardStatus {
- typedef unsigned long long S;
-
ConnectionShardStatus()
: _mutex( "ConnectionShardStatus" ) {
}
- S getSequence( DBClientBase * conn , const string& ns ) {
- scoped_lock lk( _mutex );
- return _map[conn->getConnectionId()][ns];
+ bool getSequence(DBClientBase * conn,
+ const string& ns,
+ unsigned long long* sequence) {
+
+ scoped_lock lk(_mutex);
+
+ SequenceMap::const_iterator seenConnIt = _map.find(conn->getConnectionId());
+ if (seenConnIt == _map.end())
+ return false;
+
+ map<string, unsigned long long>::const_iterator seenNSIt = seenConnIt->second.find(ns);
+ if (seenNSIt == seenConnIt->second.end())
+ return false;
+
+ *sequence = seenNSIt->second;
+ return true;
}
- void setSequence( DBClientBase * conn , const string& ns , const S& s ) {
+ void setSequence( DBClientBase * conn , const string& ns , const unsigned long long& s ) {
scoped_lock lk( _mutex );
_map[conn->getConnectionId()][ns] = s;
}
@@ -75,7 +91,8 @@ namespace mongo {
mongo::mutex _mutex;
// a map from a connection into ChunkManager's sequence number for each namespace
- map<unsigned long long, map<string,unsigned long long> > _map;
+ typedef map<unsigned long long, map<string,unsigned long long> > SequenceMap;
+ SequenceMap _map;
} connectionShardStatus;
@@ -113,73 +130,6 @@ namespace mongo {
return NULL;
}
- extern OID serverID;
-
- bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){
-
- bool ok;
- DBClientBase* conn = NULL;
- try {
- // May throw if replica set primary is down
- conn = getVersionable( conn_in );
- dassert( conn ); // errors thrown above
-
- BSONObjBuilder cmdBuilder;
-
- cmdBuilder.append( "setShardVersion" , "" );
- cmdBuilder.appendBool( "init", true );
- cmdBuilder.append( "configdb" , configServer.modelServer() );
- cmdBuilder.appendOID( "serverID" , &serverID );
- cmdBuilder.appendBool( "authoritative" , true );
-
- BSONObj cmd = cmdBuilder.obj();
-
- LOG(1) << "initializing shard connection to " << conn->toString() << endl;
- LOG(2) << "initial sharding settings : " << cmd << endl;
-
- ok = conn->runCommand("admin", cmd, result, 0);
- }
- catch( const DBException& ) {
-
- if ( conn_in->type() != ConnectionString::SET ) {
- throw;
- }
-
- // NOTE: Only old-style cluster operations will talk via DBClientReplicaSets - using
- // checkShardVersion is required (which includes initShardVersion information) if these
- // connections are used.
-
- OCCASIONALLY {
- warning() << "failed to initialize new replica set connection version, "
- << "will initialize on first use" << endl;
- }
-
- return true;
- }
-
- // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1
- // Result is false, but will still initialize serverID and configdb
- if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ ||
- result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ ))
- {
- ok = true;
- }
-
- // Record the connection wire version if sent in the response, initShardVersion is a
- // handshake for mongos->mongod connections.
- if ( !result["minWireVersion"].eoo() ) {
-
- int minWireVersion = result["minWireVersion"].numberInt();
- int maxWireVersion = result["maxWireVersion"].numberInt();
- conn->setWireVersions( minWireVersion, maxWireVersion );
- }
-
- LOG(3) << "initial sharding result : " << result << endl;
-
- return ok;
-
- }
-
bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){
DBConfigPtr conf = grid.getDBConfig( ns );
@@ -211,19 +161,19 @@ namespace mongo {
unsigned long long officialSequenceNumber = 0;
+ ShardPtr primary;
ChunkManagerPtr manager;
- const bool isSharded = conf->isSharded( ns );
- if ( isSharded ) {
- manager = conf->getChunkManagerIfExists( ns , authoritative );
- // It's possible the chunk manager was reset since we checked whether sharded was true,
- // so must check this here.
- if( manager ) officialSequenceNumber = manager->getSequenceNumber();
- }
+ if (authoritative)
+ conf->getChunkManagerIfExists(ns, true);
- // Check this manager against the reference manager
- if( isSharded && manager ){
+ conf->getChunkManagerOrPrimary(ns, manager, primary);
- Shard shard = Shard::make( conn->getServerAddress() );
+ if (manager)
+ officialSequenceNumber = manager->getSequenceNumber();
+
+ // Check this manager against the reference manager
+ if( manager ){
+ Shard shard = Shard::make(conn->getServerAddress());
if( refManager && ! refManager->compatibleWith( manager, shard ) ){
throw SendStaleConfigException( ns, str::stream() << "manager (" << manager->getVersion( shard ).toString() << " : " << manager->getSequenceNumber() << ") "
<< "not compatible with reference manager (" << refManager->getVersion( shard ).toString() << " : " << refManager->getSequenceNumber() << ") "
@@ -232,7 +182,7 @@ namespace mongo {
}
}
else if( refManager ){
- Shard shard = Shard::make( conn->getServerAddress() );
+ Shard shard = Shard::make(conn->getServerAddress());
string msg( str::stream() << "not sharded ("
<< ( (manager.get() == 0) ? string( "<none>" ) :
str::stream() << manager->getSequenceNumber() )
@@ -245,32 +195,34 @@ namespace mongo {
refManager->getVersion( shard ), ChunkVersion( 0, 0, OID() ));
}
- // has the ChunkManager been reloaded since the last time we updated the connection-level version?
- // (ie., last time we issued the setShardVersions below)
- unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
- if ( sequenceNumber == officialSequenceNumber ) {
+ // Do not send setShardVersion to collections on the config servers - this causes problems
+ // when config servers are also shards and get SSV with conflicting names.
+ // TODO: Make config servers regular shards
+ if (primary && primary->getName() == "config") {
return false;
}
- ChunkVersion version = ChunkVersion( 0, 0, OID() );
- if ( isSharded && manager ) {
- version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
+ // Has the ChunkManager been reloaded since the last time we updated the shard version over
+ // this connection? If we've never updated the shard version, do so now.
+ unsigned long long sequenceNumber = 0;
+ if (connectionShardStatus.getSequence(conn, ns, &sequenceNumber)) {
+ if (sequenceNumber == officialSequenceNumber) {
+ return false;
+ }
}
- if( ! version.isSet() ){
- LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
- ( ! isSharded ? "no longer sharded" :
- ( ! manager ? "no chunk manager found" :
- "version is zero" ) ) << endl;
- }
+ // Now that we're sure we're sending SSV, get the shard we're sending it to
+ Shard shard = Shard::make(conn->getServerAddress());
+
+ ChunkVersion version = ChunkVersion(0, 0, OID());
+ if (manager)
+ version = manager->getVersion(shard);
- LOG(2).stream()
- << " have to set shard version for conn: " << conn->getServerAddress() << " ns:" << ns
- << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
- << " version: " << version << " manager: " << manager.get()
- << endl;
+ LOG(1) << "setting shard version of " << version << " for " << ns << " on shard "
+ << shard.toString();
- const string versionableServerAddress(conn->getServerAddress());
+ LOG(3) << "last version sent with chunk manager iteration " << sequenceNumber
+ << ", current chunk manager iteration is " << officialSequenceNumber;
BSONObj result;
if ( setShardVersion( *conn , ns , version , manager , authoritative , result ) ) {
@@ -310,7 +262,7 @@ namespace mongo {
const int maxNumTries = 7;
if ( tryNumber < maxNumTries ) {
LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
- << "going to retry checkShardVersion host: " << versionableServerAddress << " " << result << endl;
+ << "going to retry checkShardVersion shard: " << shard.toString() << " " << result;
sleepmillis( 10 * tryNumber );
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
@@ -318,7 +270,8 @@ namespace mongo {
return true;
}
- string errmsg = str::stream() << "setShardVersion failed host: " << versionableServerAddress << " " << result;
+ string errmsg = str::stream() << "setShardVersion failed shard: " << shard.toString()
+ << " " << result;
log() << " " << errmsg << endl;
massert( 10429 , errmsg , 0 );
return true;