diff options
Diffstat (limited to 'src/mongo/db/s/collection_metadata.cpp')
-rw-r--r-- | src/mongo/db/s/collection_metadata.cpp | 225 |
1 files changed, 75 insertions, 150 deletions
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index e3c95a06e93..fbda424de67 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -41,66 +41,81 @@ namespace mongo { -using std::unique_ptr; -using std::make_pair; -using std::string; -using std::vector; -using str::stream; - -CollectionMetadata::CollectionMetadata() - : _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()), - _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()), - _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {} - -CollectionMetadata::CollectionMetadata(const BSONObj& keyPattern, ChunkVersion collectionVersion) - : _collVersion(collectionVersion), - _shardVersion(ChunkVersion(0, 0, collectionVersion.epoch())), - _keyPattern(keyPattern.getOwned()), +CollectionMetadata::CollectionMetadata(const BSONObj& keyPattern, + ChunkVersion collectionVersion, + ChunkVersion shardVersion, + RangeMap shardChunksMap) + : _keyPattern(keyPattern.getOwned()), + _collVersion(collectionVersion), + _shardVersion(shardVersion), + _chunksMap(std::move(shardChunksMap)), _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()), - _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()), - _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {} + _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) { -CollectionMetadata::~CollectionMetadata() = default; + invariant(!_keyPattern.isEmpty()); + invariant(_collVersion.epoch() == _shardVersion.epoch()); + invariant(_collVersion.isSet()); + invariant(_collVersion >= _shardVersion); -unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusChunk( - const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& chunkVersion) const { - invariant(chunkVersion.epoch() == _shardVersion.epoch()); - invariant(chunkVersion.isSet()); - invariant(chunkVersion > _shardVersion); - invariant(minKey.woCompare(maxKey) < 0); - invariant(!rangeMapOverlaps(_chunksMap, minKey, maxKey)); - - unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); - metadata->_keyPattern = _keyPattern.getOwned(); - metadata->fillKeyPatternFields(); - metadata->_pendingMap = _pendingMap; - metadata->_chunksMap = _chunksMap; - metadata->_chunksMap.insert( - make_pair(minKey.getOwned(), CachedChunkInfo(maxKey.getOwned(), chunkVersion))); - metadata->_shardVersion = chunkVersion; - metadata->_collVersion = chunkVersion > _collVersion ? chunkVersion : _collVersion; - metadata->fillRanges(); - - invariant(metadata->isValid()); - return metadata; + // Parse the shard keys into the states 'keys' and 'keySet' members. + for (const auto& current : _keyPattern) { + auto newFieldRef(stdx::make_unique<FieldRef>()); + newFieldRef->parse(current.fieldNameStringData()); + + _keyFields.emplace_back(std::move(newFieldRef)); + } + + if (_chunksMap.empty()) { + invariant(!_shardVersion.isSet()); + return; + } + + invariant(_shardVersion.isSet()); + + // Load the chunk information, coallesceing their ranges. The version for this shard would be + // the highest version for any of the chunks. + BSONObj min, max; + + for (const auto& entry : _chunksMap) { + BSONObj currMin = entry.first; + BSONObj currMax = entry.second.getMaxKey(); + + // Coalesce the chunk's bounds in ranges if they are adjacent chunks + if (min.isEmpty()) { + min = currMin; + max = currMax; + continue; + } + + if (SimpleBSONObjComparator::kInstance.evaluate(max == currMin)) { + max = currMax; + continue; + } + + _rangesMap.emplace(min, CachedChunkInfo(max, ChunkVersion::IGNORED())); + + min = currMin; + max = currMax; + } + + invariant(!min.isEmpty()); + invariant(!max.isEmpty()); + + _rangesMap.emplace(min, CachedChunkInfo(max, ChunkVersion::IGNORED())); } +CollectionMetadata::~CollectionMetadata() = default; + std::unique_ptr<CollectionMetadata> CollectionMetadata::cloneMinusPending( const ChunkType& chunk) const { invariant(rangeMapContains(_pendingMap, chunk.getMin(), chunk.getMax())); - unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); - metadata->_keyPattern = _keyPattern.getOwned(); - metadata->fillKeyPatternFields(); + auto metadata(stdx::make_unique<CollectionMetadata>( + _keyPattern, getCollVersion(), getShardVersion(), getChunks())); metadata->_pendingMap = _pendingMap; - metadata->_pendingMap.erase(chunk.getMin()); - metadata->_chunksMap = _chunksMap; - metadata->_rangesMap = _rangesMap; - metadata->_shardVersion = _shardVersion; - metadata->_collVersion = _collVersion; + metadata->_pendingMap.erase(chunk.getMin()); - invariant(metadata->isValid()); return metadata; } @@ -108,14 +123,9 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( const ChunkType& chunk) const { invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())); - unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); - metadata->_keyPattern = _keyPattern.getOwned(); - metadata->fillKeyPatternFields(); + auto metadata(stdx::make_unique<CollectionMetadata>( + _keyPattern, getCollVersion(), getShardVersion(), getChunks())); metadata->_pendingMap = _pendingMap; - metadata->_chunksMap = _chunksMap; - metadata->_rangesMap = _rangesMap; - metadata->_shardVersion = _shardVersion; - metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending chunks // aren't officially tracked yet and something may have changed on servers we do not see yet. @@ -138,25 +148,18 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( // The pending map entry cannot contain a specific chunk version because we don't know what // version would be generated for it at commit time. That's why we insert an IGNORED value. - metadata->_pendingMap.insert( - make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED()))); + metadata->_pendingMap.emplace(chunk.getMin(), + CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED())); - invariant(metadata->isValid()); return metadata; } bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const { - // For now, collections don't move. So if the collection is not sharded, assume - // the document with the given key can be accessed. - if (_keyPattern.isEmpty()) { - return true; - } - - if (_rangesMap.size() <= 0) { + if (_rangesMap.empty()) { return false; } - RangeMap::const_iterator it = _rangesMap.upper_bound(key); + auto it = _rangesMap.upper_bound(key); if (it != _rangesMap.begin()) it--; @@ -164,21 +167,15 @@ bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const { } bool CollectionMetadata::keyIsPending(const BSONObj& key) const { - // If we aren't sharded, then the key is never pending (though it belongs-to-me) - if (_keyPattern.isEmpty()) { - return false; - } - - if (_pendingMap.size() <= 0) { + if (_pendingMap.empty()) { return false; } - RangeMap::const_iterator it = _pendingMap.upper_bound(key); + auto it = _pendingMap.upper_bound(key); if (it != _pendingMap.begin()) it--; - bool isPending = rangeContains(it->first, it->second.getMaxKey(), key); - return isPending; + return rangeContains(it->first, it->second.getMaxKey(), key); } bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const { @@ -279,15 +276,12 @@ void CollectionMetadata::toBSONPending(BSONArrayBuilder& bb) const { } } -string CollectionMetadata::toStringBasic() const { - return stream() << "collection version: " << _collVersion.toString() - << ", shard version: " << _shardVersion.toString(); +std::string CollectionMetadata::toStringBasic() const { + return str::stream() << "collection version: " << _collVersion.toString() + << ", shard version: " << _shardVersion.toString(); } bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRange* range) const { - if (_keyPattern.isEmpty()) - return false; - BSONObj lookupKey = origLookupKey; BSONObj maxKey = getMaxKey(); // so we don't keep rebuilding while (lookupKey.woCompare(maxKey) < 0) { @@ -382,29 +376,6 @@ BSONObj CollectionMetadata::getMaxKey() const { return maxKeyB.obj(); } -bool CollectionMetadata::isValid() const { - if (_shardVersion > _collVersion) - return false; - if (_collVersion.majorVersion() == 0) - return false; - if (_collVersion.epoch() != _shardVersion.epoch()) - return false; - - if (_shardVersion.majorVersion() > 0) { - // Must be chunks - if (_rangesMap.size() == 0 || _chunksMap.size() == 0) - return false; - } else { - // No chunks - if (_shardVersion.minorVersion() > 0) - return false; - if (_rangesMap.size() > 0 || _chunksMap.size() > 0) - return false; - } - - return true; -} - bool CollectionMetadata::isValidKey(const BSONObj& key) const { BSONObjIterator it(_keyPattern); while (it.more()) { @@ -415,50 +386,4 @@ bool CollectionMetadata::isValidKey(const BSONObj& key) const { return key.nFields() == _keyPattern.nFields(); } -void CollectionMetadata::fillRanges() { - if (_chunksMap.empty()) - return; - - // Load the chunk information, coallesceing their ranges. The version for this shard would be - // the highest version for any of the chunks. - BSONObj min, max; - for (const auto& entry : _chunksMap) { - BSONObj currMin = entry.first; - BSONObj currMax = entry.second.getMaxKey(); - - // coalesce the chunk's bounds in ranges if they are adjacent chunks - if (min.isEmpty()) { - min = currMin; - max = currMax; - continue; - } - if (SimpleBSONObjComparator::kInstance.evaluate(max == currMin)) { - max = currMax; - continue; - } - - _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED()))); - - min = currMin; - max = currMax; - } - - invariant(!min.isEmpty()); - invariant(!max.isEmpty()); - - _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED()))); -} - -void CollectionMetadata::fillKeyPatternFields() { - // Parse the shard keys into the states 'keys' and 'keySet' members. - BSONObjIterator patternIter = _keyPattern.begin(); - while (patternIter.more()) { - BSONElement current = patternIter.next(); - - _keyFields.push_back(stdx::make_unique<FieldRef>()); - FieldRef* const newFieldRef = _keyFields.back().get(); - newFieldRef->parse(current.fieldNameStringData()); - } -} - } // namespace mongo |