summaryrefslogtreecommitdiff
path: root/src/mongo/s/metadata_loader.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/metadata_loader.cpp')
-rw-r--r--src/mongo/s/metadata_loader.cpp427
1 files changed, 202 insertions, 225 deletions
diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/s/metadata_loader.cpp
index 74ca1597a7d..1a0758f5734 100644
--- a/src/mongo/s/metadata_loader.cpp
+++ b/src/mongo/s/metadata_loader.cpp
@@ -44,265 +44,242 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::make_pair;
- using std::map;
- using std::pair;
- using std::string;
-
- /**
- * This is an adapter so we can use config diffs - mongos and mongod do them slightly
- * differently.
- *
- * The mongod adapter here tracks only a single shard, and stores ranges by (min, max).
- */
- class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj, string> {
- public:
- SCMConfigDiffTracker(const string& currShard) : _currShard( currShard ) { }
-
- virtual bool isTracked(const ChunkType& chunk) const {
- return chunk.getShard() == _currShard;
- }
-
- virtual pair<BSONObj, BSONObj> rangeFor(const ChunkType& chunk) const {
- return make_pair(chunk.getMin(), chunk.getMax());
- }
-
- virtual string shardFor( const string& name ) const {
- return name;
- }
-
- virtual string nameFrom( const string& shard ) const {
- return shard;
- }
+using std::unique_ptr;
+using std::endl;
+using std::make_pair;
+using std::map;
+using std::pair;
+using std::string;
- string _currShard;
- };
-
- //
- // MetadataLoader implementation
- //
+/**
+ * This is an adapter so we can use config diffs - mongos and mongod do them slightly
+ * differently.
+ *
+ * The mongod adapter here tracks only a single shard, and stores ranges by (min, max).
+ */
+class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj, string> {
+public:
+ SCMConfigDiffTracker(const string& currShard) : _currShard(currShard) {}
- MetadataLoader::MetadataLoader() { }
+ virtual bool isTracked(const ChunkType& chunk) const {
+ return chunk.getShard() == _currShard;
+ }
- MetadataLoader::~MetadataLoader() { }
+ virtual pair<BSONObj, BSONObj> rangeFor(const ChunkType& chunk) const {
+ return make_pair(chunk.getMin(), chunk.getMax());
+ }
- Status MetadataLoader::makeCollectionMetadata(CatalogManager* catalogManager,
- const string& ns,
- const string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata) const
- {
- Status status = _initCollection(catalogManager, ns, shard, metadata);
- if (!status.isOK() || metadata->getKeyPattern().isEmpty()) {
- return status;
- }
+ virtual string shardFor(const string& name) const {
+ return name;
+ }
- return initChunks(catalogManager, ns, shard, oldMetadata, metadata );
+ virtual string nameFrom(const string& shard) const {
+ return shard;
}
- Status MetadataLoader::_initCollection(CatalogManager* catalogManager,
- const string& ns,
- const string& shard,
- CollectionMetadata* metadata) const {
+ string _currShard;
+};
- auto coll = catalogManager->getCollection(ns);
- if (!coll.isOK()) {
- return coll.getStatus();
- }
+//
+// MetadataLoader implementation
+//
- CollectionType collInfo = coll.getValue();
- if (collInfo.getDropped()) {
- return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "could not load metadata, collection "
- << ns << " was dropped");
- }
+MetadataLoader::MetadataLoader() {}
- metadata->_keyPattern = collInfo.getKeyPattern().toBSON();
- metadata->fillKeyPatternFields();
- metadata->_shardVersion = ChunkVersion(0, 0, collInfo.getEpoch());
- metadata->_collVersion = ChunkVersion(0, 0, collInfo.getEpoch());
+MetadataLoader::~MetadataLoader() {}
- return Status::OK();
+Status MetadataLoader::makeCollectionMetadata(CatalogManager* catalogManager,
+ const string& ns,
+ const string& shard,
+ const CollectionMetadata* oldMetadata,
+ CollectionMetadata* metadata) const {
+ Status status = _initCollection(catalogManager, ns, shard, metadata);
+ if (!status.isOK() || metadata->getKeyPattern().isEmpty()) {
+ return status;
}
- Status MetadataLoader::initChunks(CatalogManager* catalogManager,
- const string& ns,
- const string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata) const
- {
- map<string, ChunkVersion> versionMap;
+ return initChunks(catalogManager, ns, shard, oldMetadata, metadata);
+}
- // Preserve the epoch
- versionMap[shard] = metadata->_shardVersion;
- OID epoch = metadata->getCollVersion().epoch();
- bool fullReload = true;
-
- // Check to see if we should use the old version or not.
- if ( oldMetadata ) {
-
- // If our epochs are compatible, it's useful to use the old metadata for diffs
- if ( oldMetadata->getCollVersion().hasEqualEpoch( epoch ) ) {
-
- fullReload = false;
- invariant( oldMetadata->isValid() );
-
- versionMap[shard] = oldMetadata->_shardVersion;
- metadata->_collVersion = oldMetadata->_collVersion;
+Status MetadataLoader::_initCollection(CatalogManager* catalogManager,
+ const string& ns,
+ const string& shard,
+ CollectionMetadata* metadata) const {
+ auto coll = catalogManager->getCollection(ns);
+ if (!coll.isOK()) {
+ return coll.getStatus();
+ }
- // TODO: This could be made more efficient if copying not required, but
- // not as frequently reloaded as in mongos.
- metadata->_chunksMap = oldMetadata->_chunksMap;
+ CollectionType collInfo = coll.getValue();
+ if (collInfo.getDropped()) {
+ return Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "could not load metadata, collection " << ns
+ << " was dropped");
+ }
- LOG( 2 ) << "loading new chunks for collection " << ns
- << " using old metadata w/ version " << oldMetadata->getShardVersion()
- << " and " << metadata->_chunksMap.size() << " chunks" << endl;
- }
- else {
- warning() << "reloading collection metadata for " << ns << " with new epoch "
- << epoch.toString() << ", the current epoch is "
- << oldMetadata->getCollVersion().epoch().toString() << endl;
- }
+ metadata->_keyPattern = collInfo.getKeyPattern().toBSON();
+ metadata->fillKeyPatternFields();
+ metadata->_shardVersion = ChunkVersion(0, 0, collInfo.getEpoch());
+ metadata->_collVersion = ChunkVersion(0, 0, collInfo.getEpoch());
+
+ return Status::OK();
+}
+
+Status MetadataLoader::initChunks(CatalogManager* catalogManager,
+ const string& ns,
+ const string& shard,
+ const CollectionMetadata* oldMetadata,
+ CollectionMetadata* metadata) const {
+ map<string, ChunkVersion> versionMap;
+
+ // Preserve the epoch
+ versionMap[shard] = metadata->_shardVersion;
+ OID epoch = metadata->getCollVersion().epoch();
+ bool fullReload = true;
+
+ // Check to see if we should use the old version or not.
+ if (oldMetadata) {
+ // If our epochs are compatible, it's useful to use the old metadata for diffs
+ if (oldMetadata->getCollVersion().hasEqualEpoch(epoch)) {
+ fullReload = false;
+ invariant(oldMetadata->isValid());
+
+ versionMap[shard] = oldMetadata->_shardVersion;
+ metadata->_collVersion = oldMetadata->_collVersion;
+
+ // TODO: This could be made more efficient if copying not required, but
+ // not as frequently reloaded as in mongos.
+ metadata->_chunksMap = oldMetadata->_chunksMap;
+
+ LOG(2) << "loading new chunks for collection " << ns
+ << " using old metadata w/ version " << oldMetadata->getShardVersion() << " and "
+ << metadata->_chunksMap.size() << " chunks" << endl;
+ } else {
+ warning() << "reloading collection metadata for " << ns << " with new epoch "
+ << epoch.toString() << ", the current epoch is "
+ << oldMetadata->getCollVersion().epoch().toString() << endl;
}
+ }
- // Exposes the new metadata's range map and version to the "differ," who
- // would ultimately be responsible of filling them up.
- SCMConfigDiffTracker differ( shard );
- differ.attach( ns, metadata->_chunksMap, metadata->_collVersion, versionMap );
-
- try {
- std::vector<ChunkType> chunks;
- Status status = catalogManager->getChunks(differ.configDiffQuery(), 0, &chunks);
- if (!status.isOK()) {
- if (status == ErrorCodes::HostUnreachable) {
- // Make our metadata invalid
- metadata->_collVersion = ChunkVersion( 0, 0, OID() );
- metadata->_chunksMap.clear();
- }
- return status;
- }
-
- //
- // The diff tracker should always find at least one chunk (the highest chunk we saw
- // last time). If not, something has changed on the config server (potentially between
- // when we read the collection data and when we read the chunks data).
- //
- int diffsApplied = differ.calculateConfigDiff(chunks);
- if ( diffsApplied > 0 ) {
- // Chunks found, return ok
- LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
- << " with version " << metadata->_collVersion;
-
- metadata->_shardVersion = versionMap[shard];
- metadata->fillRanges();
-
- invariant( metadata->isValid() );
- return Status::OK();
- }
- else if ( diffsApplied == 0 ) {
-
- // No chunks found, the collection is dropping or we're confused
- // If this is a full reload, assume it is a drop for backwards compatibility
- // TODO: drop the config.collections entry *before* the chunks and eliminate this
- // ambiguity
-
- string errMsg =
- str::stream() << "no chunks found when reloading " << ns
- << ", previous version was "
- << metadata->_collVersion.toString()
- << ( fullReload ? ", this is a drop" : "" );
-
- warning() << errMsg << endl;
-
- metadata->_collVersion = ChunkVersion( 0, 0, OID() );
- metadata->_chunksMap.clear();
+ // Exposes the new metadata's range map and version to the "differ," who
+ // would ultimately be responsible of filling them up.
+ SCMConfigDiffTracker differ(shard);
+ differ.attach(ns, metadata->_chunksMap, metadata->_collVersion, versionMap);
- return fullReload ? Status( ErrorCodes::NamespaceNotFound, errMsg ) :
- Status( ErrorCodes::RemoteChangeDetected, errMsg );
- }
- else {
- // Invalid chunks found, our epoch may have changed because we dropped/recreated
- // the collection.
- string errMsg = str::stream() << "invalid chunks found when reloading " << ns
- << ", previous version was "
- << metadata->_collVersion.toString()
- << ", this should be rare";
- warning() << errMsg;
-
- metadata->_collVersion = ChunkVersion( 0, 0, OID() );
+ try {
+ std::vector<ChunkType> chunks;
+ Status status = catalogManager->getChunks(differ.configDiffQuery(), 0, &chunks);
+ if (!status.isOK()) {
+ if (status == ErrorCodes::HostUnreachable) {
+ // Make our metadata invalid
+ metadata->_collVersion = ChunkVersion(0, 0, OID());
metadata->_chunksMap.clear();
-
- return Status(ErrorCodes::RemoteChangeDetected, errMsg);
}
+ return status;
}
- catch (const DBException& e) {
- // We deliberately do not return connPtr to the pool, since it was involved with the
- // error here.
- return Status(ErrorCodes::HostUnreachable,
- str::stream() << "problem querying chunks metadata" << causedBy(e));
+
+ //
+ // The diff tracker should always find at least one chunk (the highest chunk we saw
+ // last time). If not, something has changed on the config server (potentially between
+ // when we read the collection data and when we read the chunks data).
+ //
+ int diffsApplied = differ.calculateConfigDiff(chunks);
+ if (diffsApplied > 0) {
+ // Chunks found, return ok
+ LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
+ << " with version " << metadata->_collVersion;
+
+ metadata->_shardVersion = versionMap[shard];
+ metadata->fillRanges();
+
+ invariant(metadata->isValid());
+ return Status::OK();
+ } else if (diffsApplied == 0) {
+ // No chunks found, the collection is dropping or we're confused
+ // If this is a full reload, assume it is a drop for backwards compatibility
+ // TODO: drop the config.collections entry *before* the chunks and eliminate this
+ // ambiguity
+
+ string errMsg = str::stream()
+ << "no chunks found when reloading " << ns << ", previous version was "
+ << metadata->_collVersion.toString() << (fullReload ? ", this is a drop" : "");
+
+ warning() << errMsg << endl;
+
+ metadata->_collVersion = ChunkVersion(0, 0, OID());
+ metadata->_chunksMap.clear();
+
+ return fullReload ? Status(ErrorCodes::NamespaceNotFound, errMsg)
+ : Status(ErrorCodes::RemoteChangeDetected, errMsg);
+ } else {
+ // Invalid chunks found, our epoch may have changed because we dropped/recreated
+ // the collection.
+ string errMsg = str::stream()
+ << "invalid chunks found when reloading " << ns << ", previous version was "
+ << metadata->_collVersion.toString() << ", this should be rare";
+ warning() << errMsg;
+
+ metadata->_collVersion = ChunkVersion(0, 0, OID());
+ metadata->_chunksMap.clear();
+
+ return Status(ErrorCodes::RemoteChangeDetected, errMsg);
}
+ } catch (const DBException& e) {
+ // We deliberately do not return connPtr to the pool, since it was involved with the
+ // error here.
+ return Status(ErrorCodes::HostUnreachable,
+ str::stream() << "problem querying chunks metadata" << causedBy(e));
}
+}
+
+Status MetadataLoader::promotePendingChunks(const CollectionMetadata* afterMetadata,
+ CollectionMetadata* remoteMetadata) const {
+ // Ensure pending chunks are applicable
+ bool notApplicable = (NULL == afterMetadata || NULL == remoteMetadata) ||
+ (afterMetadata->getShardVersion() > remoteMetadata->getShardVersion()) ||
+ (afterMetadata->getShardVersion().epoch() != remoteMetadata->getShardVersion().epoch());
+ if (notApplicable)
+ return Status::OK();
- Status MetadataLoader::promotePendingChunks( const CollectionMetadata* afterMetadata,
- CollectionMetadata* remoteMetadata ) const {
-
- // Ensure pending chunks are applicable
- bool notApplicable =
- ( NULL == afterMetadata || NULL == remoteMetadata ) ||
- ( afterMetadata->getShardVersion() > remoteMetadata->getShardVersion() ) ||
- ( afterMetadata->getShardVersion().epoch() !=
- remoteMetadata->getShardVersion().epoch() );
- if ( notApplicable ) return Status::OK();
-
- // The chunks from remoteMetadata are the latest version, and the pending chunks
- // from afterMetadata are the latest version. If no trickery is afoot, pending chunks
- // should match exactly zero or one loaded chunk.
+ // The chunks from remoteMetadata are the latest version, and the pending chunks
+ // from afterMetadata are the latest version. If no trickery is afoot, pending chunks
+ // should match exactly zero or one loaded chunk.
- remoteMetadata->_pendingMap = afterMetadata->_pendingMap;
+ remoteMetadata->_pendingMap = afterMetadata->_pendingMap;
- // Resolve our pending chunks against the chunks we've loaded
- for ( RangeMap::iterator it = remoteMetadata->_pendingMap.begin();
- it != remoteMetadata->_pendingMap.end(); ) {
+ // Resolve our pending chunks against the chunks we've loaded
+ for (RangeMap::iterator it = remoteMetadata->_pendingMap.begin();
+ it != remoteMetadata->_pendingMap.end();) {
+ if (!rangeMapOverlaps(remoteMetadata->_chunksMap, it->first, it->second)) {
+ ++it;
+ continue;
+ }
- if ( !rangeMapOverlaps( remoteMetadata->_chunksMap, it->first, it->second ) ) {
- ++it;
- continue;
- }
+ // Our pending range overlaps at least one chunk
- // Our pending range overlaps at least one chunk
+ if (rangeMapContains(remoteMetadata->_chunksMap, it->first, it->second)) {
+ // Chunk was promoted from pending, successful migration
+ LOG(2) << "verified chunk " << rangeToString(it->first, it->second)
+ << " was migrated earlier to this shard" << endl;
- if ( rangeMapContains( remoteMetadata->_chunksMap, it->first, it->second ) ) {
+ remoteMetadata->_pendingMap.erase(it++);
+ } else {
+ // Something strange happened, maybe manual editing of config?
+ RangeVector overlap;
+ getRangeMapOverlap(remoteMetadata->_chunksMap, it->first, it->second, &overlap);
- // Chunk was promoted from pending, successful migration
- LOG( 2 ) << "verified chunk " << rangeToString( it->first, it->second )
- << " was migrated earlier to this shard" << endl;
+ string errMsg = str::stream()
+ << "the remote metadata changed unexpectedly, pending range "
+ << rangeToString(it->first, it->second)
+ << " does not exactly overlap loaded chunks " << overlapToString(overlap);
- remoteMetadata->_pendingMap.erase( it++ );
- }
- else {
-
- // Something strange happened, maybe manual editing of config?
- RangeVector overlap;
- getRangeMapOverlap( remoteMetadata->_chunksMap,
- it->first,
- it->second,
- &overlap );
-
- string errMsg = str::stream()
- << "the remote metadata changed unexpectedly, pending range "
- << rangeToString( it->first, it->second )
- << " does not exactly overlap loaded chunks "
- << overlapToString( overlap );
-
- return Status( ErrorCodes::RemoteChangeDetected, errMsg );
- }
+ return Status(ErrorCodes::RemoteChangeDetected, errMsg);
}
-
- return Status::OK();
}
+ return Status::OK();
+}
+
-} // namespace mongo
+} // namespace mongo