summaryrefslogtreecommitdiff
path: root/src/mongo/s/collection_metadata.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s/collection_metadata.cpp')
-rw-r--r--src/mongo/s/collection_metadata.cpp1120
1 files changed, 543 insertions, 577 deletions
diff --git a/src/mongo/s/collection_metadata.cpp b/src/mongo/s/collection_metadata.cpp
index 98f62c90a01..05d448dc239 100644
--- a/src/mongo/s/collection_metadata.cpp
+++ b/src/mongo/s/collection_metadata.cpp
@@ -38,431 +38,398 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::make_pair;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::make_pair;
+using std::string;
+using std::vector;
- using mongoutils::str::stream;
+using mongoutils::str::stream;
- CollectionMetadata::CollectionMetadata() { }
+CollectionMetadata::CollectionMetadata() {}
- CollectionMetadata::~CollectionMetadata() { }
+CollectionMetadata::~CollectionMetadata() {}
- CollectionMetadata* CollectionMetadata::cloneMigrate( const ChunkType& chunk,
- const ChunkVersion& newShardVersion,
- string* errMsg ) const {
- // The error message string is optional.
- string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+CollectionMetadata* CollectionMetadata::cloneMigrate(const ChunkType& chunk,
+ const ChunkVersion& newShardVersion,
+ string* errMsg) const {
+ // The error message string is optional.
+ string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- // Check that we have the exact chunk that will be subtracted.
- if ( !rangeMapContains( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
+ // Check that we have the exact chunk that will be subtracted.
+ if (!rangeMapContains(_chunksMap, chunk.getMin(), chunk.getMax())) {
+ *errMsg = stream() << "cannot remove chunk "
+ << rangeToString(chunk.getMin(), chunk.getMax())
+ << ", this shard does not contain the chunk";
- *errMsg = stream() << "cannot remove chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << ", this shard does not contain the chunk";
+ if (rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())) {
+ RangeVector overlap;
+ getRangeMapOverlap(_chunksMap, chunk.getMin(), chunk.getMax(), &overlap);
- if ( rangeMapOverlaps( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
+ *errMsg += stream() << " and it overlaps " << overlapToString(overlap);
+ }
- RangeVector overlap;
- getRangeMapOverlap( _chunksMap, chunk.getMin(), chunk.getMax(), &overlap );
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- *errMsg += stream() << " and it overlaps " << overlapToString( overlap );
- }
+ // If left with no chunks, check that the version is zero.
+ if (_chunksMap.size() == 1) {
+ if (newShardVersion.isSet()) {
+ *errMsg = stream() << "cannot set shard version to non-zero value "
+ << newShardVersion.toString() << " when removing last chunk "
+ << rangeToString(chunk.getMin(), chunk.getMax());
warning() << *errMsg << endl;
return NULL;
}
+ }
+ // Can't move version backwards when subtracting chunks. This is what guarantees that
+ // no read or write would be taken once we subtract data from the current shard.
+ else if (newShardVersion <= _shardVersion) {
+ *errMsg = stream() << "cannot remove chunk "
+ << rangeToString(chunk.getMin(), chunk.getMax())
+ << " because the new shard version " << newShardVersion.toString()
+ << " is not greater than the current shard version "
+ << _shardVersion.toString();
+
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- // If left with no chunks, check that the version is zero.
- if (_chunksMap.size() == 1) {
- if (newShardVersion.isSet()) {
-
- *errMsg = stream() << "cannot set shard version to non-zero value "
- << newShardVersion.toString() << " when removing last chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() );
-
- warning() << *errMsg << endl;
- return NULL;
- }
- }
- // Can't move version backwards when subtracting chunks. This is what guarantees that
- // no read or write would be taken once we subtract data from the current shard.
- else if (newShardVersion <= _shardVersion) {
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_chunksMap.erase(chunk.getMin());
+ metadata->_shardVersion = newShardVersion;
+ metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
+ metadata->fillRanges();
+
+ invariant(metadata->isValid());
+ return metadata.release();
+}
+
+CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk,
+ const ChunkVersion& newShardVersion,
+ string* errMsg) const {
+ // The error message string is optional.
+ string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- *errMsg = stream() << "cannot remove chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << " because the new shard version " << newShardVersion.toString()
- << " is not greater than the current shard version "
- << _shardVersion.toString();
+ // It is acceptable to move version backwards (e.g., undoing a migration that went bad
+ // during commit) but only cloning away the last chunk may reset the version to 0.
+ if (!newShardVersion.isSet()) {
+ *errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
+ << " with zero shard version";
- warning() << *errMsg << endl;
- return NULL;
- }
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- unique_ptr<CollectionMetadata> metadata( new CollectionMetadata );
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_chunksMap.erase( chunk.getMin() );
- metadata->_shardVersion = newShardVersion;
- metadata->_collVersion =
- newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return metadata.release();
- }
-
- CollectionMetadata* CollectionMetadata::clonePlusChunk( const ChunkType& chunk,
- const ChunkVersion& newShardVersion,
- string* errMsg ) const {
- // The error message string is optional.
- string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+ invariant(chunk.getMin().woCompare(chunk.getMax()) < 0);
- // It is acceptable to move version backwards (e.g., undoing a migration that went bad
- // during commit) but only cloning away the last chunk may reset the version to 0.
- if (!newShardVersion.isSet()) {
+ // Check that there isn't any chunk on the interval to be added.
+ if (rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())) {
+ RangeVector overlap;
+ getRangeMapOverlap(_chunksMap, chunk.getMin(), chunk.getMax(), &overlap);
- *errMsg = stream() << "cannot add chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << " with zero shard version";
+ *errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
+ << " because the chunk overlaps " << overlapToString(overlap);
- warning() << *errMsg << endl;
- return NULL;
- }
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- invariant( chunk.getMin().woCompare(chunk.getMax()) < 0 );
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_chunksMap.insert(make_pair(chunk.getMin().getOwned(), chunk.getMax().getOwned()));
+ metadata->_shardVersion = newShardVersion;
+ metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
+ metadata->fillRanges();
+
+ invariant(metadata->isValid());
+ return metadata.release();
+}
+
+CollectionMetadata* CollectionMetadata::cloneMinusPending(const ChunkType& pending,
+ string* errMsg) const {
+ // The error message string is optional.
+ string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- // Check that there isn't any chunk on the interval to be added.
- if ( rangeMapOverlaps( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
+ // Check that we have the exact chunk that will be subtracted.
+ if (!rangeMapContains(_pendingMap, pending.getMin(), pending.getMax())) {
+ *errMsg = stream() << "cannot remove pending chunk "
+ << rangeToString(pending.getMin(), pending.getMax())
+ << ", this shard does not contain the chunk";
+ if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
RangeVector overlap;
- getRangeMapOverlap( _chunksMap, chunk.getMin(), chunk.getMax(), &overlap );
-
- *errMsg = stream() << "cannot add chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << " because the chunk overlaps " << overlapToString( overlap );
+ getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &overlap);
- warning() << *errMsg << endl;
- return NULL;
+ *errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
- unique_ptr<CollectionMetadata> metadata( new CollectionMetadata );
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_chunksMap.insert( make_pair( chunk.getMin().getOwned(),
- chunk.getMax().getOwned() ) );
- metadata->_shardVersion = newShardVersion;
- metadata->_collVersion =
- newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return metadata.release();
- }
-
- CollectionMetadata* CollectionMetadata::cloneMinusPending( const ChunkType& pending,
- string* errMsg ) const {
- // The error message string is optional.
- string dummy;
- if ( errMsg == NULL ) {
- errMsg = &dummy;
- }
-
- // Check that we have the exact chunk that will be subtracted.
- if ( !rangeMapContains( _pendingMap, pending.getMin(), pending.getMax() ) ) {
-
- *errMsg = stream() << "cannot remove pending chunk "
- << rangeToString( pending.getMin(), pending.getMax() )
- << ", this shard does not contain the chunk";
-
- if ( rangeMapOverlaps( _pendingMap, pending.getMin(), pending.getMax() ) ) {
-
- RangeVector overlap;
- getRangeMapOverlap( _pendingMap, pending.getMin(), pending.getMax(), &overlap );
-
- *errMsg += stream() << " and it overlaps " << overlapToString( overlap );
- }
-
- warning() << *errMsg << endl;
- return NULL;
- }
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- unique_ptr<CollectionMetadata> metadata( new CollectionMetadata );
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_pendingMap.erase( pending.getMin() );
- metadata->_chunksMap = this->_chunksMap;
- metadata->_rangesMap = this->_rangesMap;
- metadata->_shardVersion = _shardVersion;
- metadata->_collVersion = _collVersion;
-
- invariant(metadata->isValid());
- return metadata.release();
- }
-
- CollectionMetadata* CollectionMetadata::clonePlusPending( const ChunkType& pending,
- string* errMsg ) const {
- // The error message string is optional.
- string dummy;
- if ( errMsg == NULL ) {
- errMsg = &dummy;
- }
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_pendingMap.erase(pending.getMin());
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_rangesMap = this->_rangesMap;
+ metadata->_shardVersion = _shardVersion;
+ metadata->_collVersion = _collVersion;
+
+ invariant(metadata->isValid());
+ return metadata.release();
+}
+
+CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pending,
+ string* errMsg) const {
+ // The error message string is optional.
+ string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- if ( rangeMapOverlaps( _chunksMap, pending.getMin(), pending.getMax() ) ) {
+ if (rangeMapOverlaps(_chunksMap, pending.getMin(), pending.getMax())) {
+ RangeVector overlap;
+ getRangeMapOverlap(_chunksMap, pending.getMin(), pending.getMax(), &overlap);
- RangeVector overlap;
- getRangeMapOverlap( _chunksMap, pending.getMin(), pending.getMax(), &overlap );
+ *errMsg = stream() << "cannot add pending chunk "
+ << rangeToString(pending.getMin(), pending.getMax())
+ << " because the chunk overlaps " << overlapToString(overlap);
- *errMsg = stream() << "cannot add pending chunk "
- << rangeToString( pending.getMin(), pending.getMax() )
- << " because the chunk overlaps " << overlapToString( overlap );
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- warning() << *errMsg << endl;
- return NULL;
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_rangesMap = this->_rangesMap;
+ metadata->_shardVersion = _shardVersion;
+ metadata->_collVersion = _collVersion;
+
+ // If there are any pending chunks on the interval to be added this is ok, since pending
+ // chunks aren't officially tracked yet and something may have changed on servers we do not
+ // see yet.
+ // We remove any chunks we overlap, the remote request starting a chunk migration must have
+ // been authoritative.
+
+ if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
+ RangeVector pendingOverlap;
+ getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap);
+
+ warning() << "new pending chunk " << rangeToString(pending.getMin(), pending.getMax())
+ << " overlaps existing pending chunks " << overlapToString(pendingOverlap)
+ << ", a migration may not have completed" << endl;
+
+ for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) {
+ metadata->_pendingMap.erase(it->first);
}
+ }
- unique_ptr<CollectionMetadata> metadata( new CollectionMetadata );
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_rangesMap = this->_rangesMap;
- metadata->_shardVersion = _shardVersion;
- metadata->_collVersion = _collVersion;
-
- // If there are any pending chunks on the interval to be added this is ok, since pending
- // chunks aren't officially tracked yet and something may have changed on servers we do not
- // see yet.
- // We remove any chunks we overlap, the remote request starting a chunk migration must have
- // been authoritative.
-
- if ( rangeMapOverlaps( _pendingMap, pending.getMin(), pending.getMax() ) ) {
-
- RangeVector pendingOverlap;
- getRangeMapOverlap( _pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap );
-
- warning() << "new pending chunk " << rangeToString( pending.getMin(), pending.getMax() )
- << " overlaps existing pending chunks " << overlapToString( pendingOverlap )
- << ", a migration may not have completed" << endl;
-
- for ( RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end();
- ++it ) {
- metadata->_pendingMap.erase( it->first );
- }
- }
+ metadata->_pendingMap.insert(make_pair(pending.getMin(), pending.getMax()));
- metadata->_pendingMap.insert( make_pair( pending.getMin(), pending.getMax() ) );
+ invariant(metadata->isValid());
+ return metadata.release();
+}
- invariant(metadata->isValid());
- return metadata.release();
+CollectionMetadata* CollectionMetadata::cloneSplit(const ChunkType& chunk,
+ const vector<BSONObj>& splitKeys,
+ const ChunkVersion& newShardVersion,
+ string* errMsg) const {
+ // The error message string is optional.
+ string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- CollectionMetadata* CollectionMetadata::cloneSplit( const ChunkType& chunk,
- const vector<BSONObj>& splitKeys,
- const ChunkVersion& newShardVersion,
- string* errMsg ) const {
- // The error message string is optional.
- string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+ // The version required in both resulting chunks could be simply an increment in the
+ // minor portion of the current version. However, we are enforcing uniqueness over the
+ // attributes <ns, version> of the configdb collection 'chunks'. So in practice, a
+ // migrate somewhere may force this split to pick up a version that has the major
+ // portion higher than the one that this shard has been using.
+ //
+ // TODO drop the uniqueness constraint and tighten the check below so that only the
+ // minor portion of version changes
+ if (newShardVersion <= _shardVersion) {
+ *errMsg = stream() << "cannot split chunk " << rangeToString(chunk.getMin(), chunk.getMax())
+ << ", new shard version " << newShardVersion.toString()
+ << " is not greater than current version " << _shardVersion.toString();
+
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- // The version required in both resulting chunks could be simply an increment in the
- // minor portion of the current version. However, we are enforcing uniqueness over the
- // attributes <ns, version> of the configdb collection 'chunks'. So in practice, a
- // migrate somewhere may force this split to pick up a version that has the major
- // portion higher than the one that this shard has been using.
- //
- // TODO drop the uniqueness constraint and tighten the check below so that only the
- // minor portion of version changes
- if (newShardVersion <= _shardVersion) {
+ // Check that we have the exact chunk that will be subtracted.
+ if (!rangeMapContains(_chunksMap, chunk.getMin(), chunk.getMax())) {
+ *errMsg = stream() << "cannot split chunk " << rangeToString(chunk.getMin(), chunk.getMax())
+ << ", this shard does not contain the chunk";
- *errMsg = stream() << "cannot split chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << ", new shard version "
- << newShardVersion.toString()
- << " is not greater than current version "
- << _shardVersion.toString();
+ if (rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())) {
+ RangeVector overlap;
+ getRangeMapOverlap(_chunksMap, chunk.getMin(), chunk.getMax(), &overlap);
- warning() << *errMsg << endl;
- return NULL;
+ *errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
- // Check that we have the exact chunk that will be subtracted.
- if ( !rangeMapContains( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
+ warning() << *errMsg << endl;
+ return NULL;
+ }
+ // Check that the split key is valid
+ for (vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it) {
+ if (!rangeContains(chunk.getMin(), chunk.getMax(), *it)) {
*errMsg = stream() << "cannot split chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() )
- << ", this shard does not contain the chunk";
-
- if ( rangeMapOverlaps( _chunksMap, chunk.getMin(), chunk.getMax() ) ) {
-
- RangeVector overlap;
- getRangeMapOverlap( _chunksMap, chunk.getMin(), chunk.getMax(), &overlap );
-
- *errMsg += stream() << " and it overlaps " << overlapToString( overlap );
- }
+ << rangeToString(chunk.getMin(), chunk.getMax()) << " at key "
+ << *it;
warning() << *errMsg << endl;
return NULL;
}
-
- // Check that the split key is valid
- for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it )
- {
- if (!rangeContains(chunk.getMin(), chunk.getMax(), *it)) {
-
- *errMsg = stream() << "cannot split chunk "
- << rangeToString( chunk.getMin(), chunk.getMax() ) << " at key "
- << *it;
-
- warning() << *errMsg << endl;
- return NULL;
- }
- }
-
- unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_shardVersion = newShardVersion; // will increment 2nd, 3rd,... chunks below
-
- BSONObj startKey = chunk.getMin();
- for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end();
- ++it ) {
- BSONObj split = *it;
- invariant(split.woCompare(startKey) > 0);
- metadata->_chunksMap[startKey] = split.getOwned();
- metadata->_chunksMap.insert( make_pair( split.getOwned(), chunk.getMax().getOwned() ));
- metadata->_shardVersion.incMinor();
- startKey = split;
- }
-
- metadata->_collVersion =
- metadata->_shardVersion > _collVersion ? metadata->_shardVersion : _collVersion;
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return metadata.release();
}
- CollectionMetadata* CollectionMetadata::cloneMerge( const BSONObj& minKey,
- const BSONObj& maxKey,
- const ChunkVersion& newShardVersion,
- string* errMsg ) const {
-
- if (newShardVersion <= _shardVersion) {
-
- *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey )
- << ", new shard version " << newShardVersion.toString()
- << " is not greater than current version "
- << _shardVersion.toString();
-
- warning() << *errMsg << endl;
- return NULL;
- }
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_shardVersion = newShardVersion; // will increment 2nd, 3rd,... chunks below
+
+ BSONObj startKey = chunk.getMin();
+ for (vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it) {
+ BSONObj split = *it;
+ invariant(split.woCompare(startKey) > 0);
+ metadata->_chunksMap[startKey] = split.getOwned();
+ metadata->_chunksMap.insert(make_pair(split.getOwned(), chunk.getMax().getOwned()));
+ metadata->_shardVersion.incMinor();
+ startKey = split;
+ }
- RangeVector overlap;
- getRangeMapOverlap( _chunksMap, minKey, maxKey, &overlap );
+ metadata->_collVersion =
+ metadata->_shardVersion > _collVersion ? metadata->_shardVersion : _collVersion;
+ metadata->fillRanges();
+
+ invariant(metadata->isValid());
+ return metadata.release();
+}
+
+CollectionMetadata* CollectionMetadata::cloneMerge(const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const ChunkVersion& newShardVersion,
+ string* errMsg) const {
+ if (newShardVersion <= _shardVersion) {
+ *errMsg = stream() << "cannot merge range " << rangeToString(minKey, maxKey)
+ << ", new shard version " << newShardVersion.toString()
+ << " is not greater than current version " << _shardVersion.toString();
+
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- if ( overlap.empty() || overlap.size() == 1 ) {
+ RangeVector overlap;
+ getRangeMapOverlap(_chunksMap, minKey, maxKey, &overlap);
- *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey )
- << ( overlap.empty() ? ", no chunks found in this range" :
- ", only one chunk found in this range" );
+ if (overlap.empty() || overlap.size() == 1) {
+ *errMsg = stream() << "cannot merge range " << rangeToString(minKey, maxKey)
+ << (overlap.empty() ? ", no chunks found in this range"
+ : ", only one chunk found in this range");
- warning() << *errMsg << endl;
- return NULL;
- }
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- bool validStartEnd = true;
- bool validNoHoles = true;
- if ( overlap.begin()->first.woCompare( minKey ) != 0 ) {
- // First chunk doesn't start with minKey
- validStartEnd = false;
- }
- else if ( overlap.rbegin()->second.woCompare( maxKey ) != 0 ) {
- // Last chunk doesn't end with maxKey
- validStartEnd = false;
- }
- else {
- // Check that there are no holes
- BSONObj prevMaxKey = minKey;
- for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) {
- if ( it->first.woCompare( prevMaxKey ) != 0 ) {
- validNoHoles = false;
- break;
- }
- prevMaxKey = it->second;
+ bool validStartEnd = true;
+ bool validNoHoles = true;
+ if (overlap.begin()->first.woCompare(minKey) != 0) {
+ // First chunk doesn't start with minKey
+ validStartEnd = false;
+ } else if (overlap.rbegin()->second.woCompare(maxKey) != 0) {
+ // Last chunk doesn't end with maxKey
+ validStartEnd = false;
+ } else {
+ // Check that there are no holes
+ BSONObj prevMaxKey = minKey;
+ for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) {
+ if (it->first.woCompare(prevMaxKey) != 0) {
+ validNoHoles = false;
+ break;
}
+ prevMaxKey = it->second;
}
+ }
- if ( !validStartEnd || !validNoHoles ) {
+ if (!validStartEnd || !validNoHoles) {
+ *errMsg = stream() << "cannot merge range " << rangeToString(minKey, maxKey)
+ << ", overlapping chunks " << overlapToString(overlap)
+ << (!validStartEnd ? " do not have the same min and max key"
+ : " are not all adjacent");
- *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey )
- << ", overlapping chunks " << overlapToString( overlap )
- << ( !validStartEnd ? " do not have the same min and max key" :
- " are not all adjacent" );
+ warning() << *errMsg << endl;
+ return NULL;
+ }
- warning() << *errMsg << endl;
- return NULL;
- }
+ unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
+ metadata->_keyPattern = this->_keyPattern;
+ metadata->_keyPattern.getOwned();
+ metadata->fillKeyPatternFields();
+ metadata->_pendingMap = this->_pendingMap;
+ metadata->_chunksMap = this->_chunksMap;
+ metadata->_rangesMap = this->_rangesMap;
+ metadata->_shardVersion = newShardVersion;
+ metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
+
+ for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) {
+ metadata->_chunksMap.erase(it->first);
+ }
- unique_ptr<CollectionMetadata> metadata( new CollectionMetadata );
- metadata->_keyPattern = this->_keyPattern;
- metadata->_keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_rangesMap = this->_rangesMap;
- metadata->_shardVersion = newShardVersion;
- metadata->_collVersion =
- newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
-
- for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) {
- metadata->_chunksMap.erase( it->first );
- }
+ metadata->_chunksMap.insert(make_pair(minKey, maxKey));
- metadata->_chunksMap.insert( make_pair( minKey, maxKey ) );
+ invariant(metadata->isValid());
+ return metadata.release();
+}
- invariant(metadata->isValid());
- return metadata.release();
+bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
+ // For now, collections don't move. So if the collection is not sharded, assume
+ // the document with the given key can be accessed.
+ if (_keyPattern.isEmpty()) {
+ return true;
}
- bool CollectionMetadata::keyBelongsToMe( const BSONObj& key ) const {
- // For now, collections don't move. So if the collection is not sharded, assume
- // the document with the given key can be accessed.
- if ( _keyPattern.isEmpty() ) {
- return true;
- }
-
- if ( _rangesMap.size() <= 0 ) {
- return false;
- }
+ if (_rangesMap.size() <= 0) {
+ return false;
+ }
- RangeMap::const_iterator it = _rangesMap.upper_bound( key );
- if ( it != _rangesMap.begin() ) it--;
+ RangeMap::const_iterator it = _rangesMap.upper_bound(key);
+ if (it != _rangesMap.begin())
+ it--;
- bool good = rangeContains( it->first, it->second, key );
+ bool good = rangeContains(it->first, it->second, key);
#if 0
// DISABLED because of SERVER-11175 - huge amount of logging
@@ -477,286 +444,285 @@ namespace mongo {
}
#endif
- return good;
- }
-
- bool CollectionMetadata::keyIsPending( const BSONObj& key ) const {
- // If we aren't sharded, then the key is never pending (though it belongs-to-me)
- if ( _keyPattern.isEmpty() ) {
- return false;
- }
-
- if ( _pendingMap.size() <= 0 ) {
- return false;
- }
-
- RangeMap::const_iterator it = _pendingMap.upper_bound( key );
- if ( it != _pendingMap.begin() ) it--;
+ return good;
+}
- bool isPending = rangeContains( it->first, it->second, key );
- return isPending;
+bool CollectionMetadata::keyIsPending(const BSONObj& key) const {
+ // If we aren't sharded, then the key is never pending (though it belongs-to-me)
+ if (_keyPattern.isEmpty()) {
+ return false;
}
- bool CollectionMetadata::getNextChunk( const BSONObj& lookupKey, ChunkType* chunk ) const {
- RangeMap::const_iterator upperChunkIt = _chunksMap.upper_bound(lookupKey);
- RangeMap::const_iterator lowerChunkIt = upperChunkIt;
+ if (_pendingMap.size() <= 0) {
+ return false;
+ }
- if (upperChunkIt != _chunksMap.begin()) {
- --lowerChunkIt;
- }
- else {
- lowerChunkIt = _chunksMap.end();
- }
+ RangeMap::const_iterator it = _pendingMap.upper_bound(key);
+ if (it != _pendingMap.begin())
+ it--;
- if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(lookupKey) > 0) {
- chunk->setMin(lowerChunkIt->first);
- chunk->setMax(lowerChunkIt->second);
- return true;
- }
+ bool isPending = rangeContains(it->first, it->second, key);
+ return isPending;
+}
- if (upperChunkIt != _chunksMap.end()) {
- chunk->setMin(upperChunkIt->first);
- chunk->setMax(upperChunkIt->second);
- return true;
- }
+bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const {
+ RangeMap::const_iterator upperChunkIt = _chunksMap.upper_bound(lookupKey);
+ RangeMap::const_iterator lowerChunkIt = upperChunkIt;
- return false;
+ if (upperChunkIt != _chunksMap.begin()) {
+ --lowerChunkIt;
+ } else {
+ lowerChunkIt = _chunksMap.end();
}
- BSONObj CollectionMetadata::toBSON() const {
- BSONObjBuilder bb;
- toBSON( bb );
- return bb.obj();
+ if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(lookupKey) > 0) {
+ chunk->setMin(lowerChunkIt->first);
+ chunk->setMax(lowerChunkIt->second);
+ return true;
}
- void CollectionMetadata::toBSONChunks( BSONArrayBuilder& bb ) const {
-
- if ( _chunksMap.empty() ) return;
-
- for (RangeMap::const_iterator it = _chunksMap.begin(); it != _chunksMap.end(); ++it ) {
- BSONArrayBuilder chunkBB( bb.subarrayStart() );
- chunkBB.append( it->first );
- chunkBB.append( it->second );
- chunkBB.done();
- }
+ if (upperChunkIt != _chunksMap.end()) {
+ chunk->setMin(upperChunkIt->first);
+ chunk->setMax(upperChunkIt->second);
+ return true;
}
- void CollectionMetadata::toBSONPending( BSONArrayBuilder& bb ) const {
+ return false;
+}
- if ( _pendingMap.empty() ) return;
-
- for (RangeMap::const_iterator it = _pendingMap.begin(); it != _pendingMap.end(); ++it ) {
- BSONArrayBuilder pendingBB( bb.subarrayStart() );
- pendingBB.append( it->first );
- pendingBB.append( it->second );
- pendingBB.done();
- }
- }
+BSONObj CollectionMetadata::toBSON() const {
+ BSONObjBuilder bb;
+ toBSON(bb);
+ return bb.obj();
+}
- void CollectionMetadata::toBSON( BSONObjBuilder& bb ) const {
+void CollectionMetadata::toBSONChunks(BSONArrayBuilder& bb) const {
+ if (_chunksMap.empty())
+ return;
- _collVersion.addToBSON( bb, "collVersion" );
- _shardVersion.addToBSON( bb, "shardVersion" );
- bb.append( "keyPattern", _keyPattern );
+ for (RangeMap::const_iterator it = _chunksMap.begin(); it != _chunksMap.end(); ++it) {
+ BSONArrayBuilder chunkBB(bb.subarrayStart());
+ chunkBB.append(it->first);
+ chunkBB.append(it->second);
+ chunkBB.done();
+ }
+}
- BSONArrayBuilder chunksBB( bb.subarrayStart( "chunks" ) );
- toBSONChunks( chunksBB );
- chunksBB.done();
+void CollectionMetadata::toBSONPending(BSONArrayBuilder& bb) const {
+ if (_pendingMap.empty())
+ return;
- BSONArrayBuilder pendingBB( bb.subarrayStart( "pending" ) );
- toBSONPending( pendingBB );
+ for (RangeMap::const_iterator it = _pendingMap.begin(); it != _pendingMap.end(); ++it) {
+ BSONArrayBuilder pendingBB(bb.subarrayStart());
+ pendingBB.append(it->first);
+ pendingBB.append(it->second);
pendingBB.done();
}
+}
- bool CollectionMetadata::getNextOrphanRange( const BSONObj& origLookupKey,
- KeyRange* range ) const {
+void CollectionMetadata::toBSON(BSONObjBuilder& bb) const {
+ _collVersion.addToBSON(bb, "collVersion");
+ _shardVersion.addToBSON(bb, "shardVersion");
+ bb.append("keyPattern", _keyPattern);
- if ( _keyPattern.isEmpty() ) return false;
+ BSONArrayBuilder chunksBB(bb.subarrayStart("chunks"));
+ toBSONChunks(chunksBB);
+ chunksBB.done();
- BSONObj lookupKey = origLookupKey;
- BSONObj maxKey = getMaxKey(); // so we don't keep rebuilding
- while ( lookupKey.woCompare( maxKey ) < 0 ) {
+ BSONArrayBuilder pendingBB(bb.subarrayStart("pending"));
+ toBSONPending(pendingBB);
+ pendingBB.done();
+}
- RangeMap::const_iterator lowerChunkIt = _chunksMap.end();
- RangeMap::const_iterator upperChunkIt = _chunksMap.end();
+bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRange* range) const {
+ if (_keyPattern.isEmpty())
+ return false;
- if ( !_chunksMap.empty() ) {
- upperChunkIt = _chunksMap.upper_bound( lookupKey );
- lowerChunkIt = upperChunkIt;
- if ( upperChunkIt != _chunksMap.begin() ) --lowerChunkIt;
- else lowerChunkIt = _chunksMap.end();
- }
+ BSONObj lookupKey = origLookupKey;
+ BSONObj maxKey = getMaxKey(); // so we don't keep rebuilding
+ while (lookupKey.woCompare(maxKey) < 0) {
+ RangeMap::const_iterator lowerChunkIt = _chunksMap.end();
+ RangeMap::const_iterator upperChunkIt = _chunksMap.end();
- // If we overlap, continue after the overlap
- // TODO: Could optimize slightly by finding next non-contiguous chunk
- if ( lowerChunkIt != _chunksMap.end()
- && lowerChunkIt->second.woCompare( lookupKey ) > 0 ) {
- lookupKey = lowerChunkIt->second;
- continue;
- }
-
- RangeMap::const_iterator lowerPendingIt = _pendingMap.end();
- RangeMap::const_iterator upperPendingIt = _pendingMap.end();
+ if (!_chunksMap.empty()) {
+ upperChunkIt = _chunksMap.upper_bound(lookupKey);
+ lowerChunkIt = upperChunkIt;
+ if (upperChunkIt != _chunksMap.begin())
+ --lowerChunkIt;
+ else
+ lowerChunkIt = _chunksMap.end();
+ }
- if ( !_pendingMap.empty() ) {
+ // If we overlap, continue after the overlap
+ // TODO: Could optimize slightly by finding next non-contiguous chunk
+ if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(lookupKey) > 0) {
+ lookupKey = lowerChunkIt->second;
+ continue;
+ }
- upperPendingIt = _pendingMap.upper_bound( lookupKey );
- lowerPendingIt = upperPendingIt;
- if ( upperPendingIt != _pendingMap.begin() ) --lowerPendingIt;
- else lowerPendingIt = _pendingMap.end();
- }
+ RangeMap::const_iterator lowerPendingIt = _pendingMap.end();
+ RangeMap::const_iterator upperPendingIt = _pendingMap.end();
- // If we overlap, continue after the overlap
- // TODO: Could optimize slightly by finding next non-contiguous chunk
- if ( lowerPendingIt != _pendingMap.end()
- && lowerPendingIt->second.woCompare( lookupKey ) > 0 ) {
- lookupKey = lowerPendingIt->second;
- continue;
- }
+ if (!_pendingMap.empty()) {
+ upperPendingIt = _pendingMap.upper_bound(lookupKey);
+ lowerPendingIt = upperPendingIt;
+ if (upperPendingIt != _pendingMap.begin())
+ --lowerPendingIt;
+ else
+ lowerPendingIt = _pendingMap.end();
+ }
- //
- // We know that the lookup key is not covered by a chunk or pending range, and where the
- // previous chunk and pending chunks are. Now we fill in the bounds as the closest
- // bounds of the surrounding ranges in both maps.
- //
+ // If we overlap, continue after the overlap
+ // TODO: Could optimize slightly by finding next non-contiguous chunk
+ if (lowerPendingIt != _pendingMap.end() &&
+ lowerPendingIt->second.woCompare(lookupKey) > 0) {
+ lookupKey = lowerPendingIt->second;
+ continue;
+ }
- range->keyPattern = _keyPattern;
- range->minKey = getMinKey();
- range->maxKey = maxKey;
+ //
+ // We know that the lookup key is not covered by a chunk or pending range, and where the
+ // previous chunk and pending chunks are. Now we fill in the bounds as the closest
+ // bounds of the surrounding ranges in both maps.
+ //
- if ( lowerChunkIt != _chunksMap.end()
- && lowerChunkIt->second.woCompare( range->minKey ) > 0 ) {
- range->minKey = lowerChunkIt->second;
- }
+ range->keyPattern = _keyPattern;
+ range->minKey = getMinKey();
+ range->maxKey = maxKey;
- if ( upperChunkIt != _chunksMap.end()
- && upperChunkIt->first.woCompare( range->maxKey ) < 0 ) {
- range->maxKey = upperChunkIt->first;
- }
+ if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(range->minKey) > 0) {
+ range->minKey = lowerChunkIt->second;
+ }
- if ( lowerPendingIt != _pendingMap.end()
- && lowerPendingIt->second.woCompare( range->minKey ) > 0 ) {
- range->minKey = lowerPendingIt->second;
- }
+ if (upperChunkIt != _chunksMap.end() && upperChunkIt->first.woCompare(range->maxKey) < 0) {
+ range->maxKey = upperChunkIt->first;
+ }
- if ( upperPendingIt != _pendingMap.end()
- && upperPendingIt->first.woCompare( range->maxKey ) < 0 ) {
- range->maxKey = upperPendingIt->first;
- }
+ if (lowerPendingIt != _pendingMap.end() &&
+ lowerPendingIt->second.woCompare(range->minKey) > 0) {
+ range->minKey = lowerPendingIt->second;
+ }
- return true;
+ if (upperPendingIt != _pendingMap.end() &&
+ upperPendingIt->first.woCompare(range->maxKey) < 0) {
+ range->maxKey = upperPendingIt->first;
}
- return false;
+ return true;
}
- string CollectionMetadata::toString() const {
- StringBuilder ss;
- ss << " CollectionManager version: " << _shardVersion.toString() << " key: " << _keyPattern;
- if (_rangesMap.empty()) {
- return ss.str();
- }
+ return false;
+}
- RangeMap::const_iterator it = _rangesMap.begin();
- ss << it->first << " -> " << it->second;
- while (it != _rangesMap.end()) {
- ss << ", "<< it->first << " -> " << it->second;
- }
+string CollectionMetadata::toString() const {
+ StringBuilder ss;
+ ss << " CollectionManager version: " << _shardVersion.toString() << " key: " << _keyPattern;
+ if (_rangesMap.empty()) {
return ss.str();
}
- BSONObj CollectionMetadata::getMinKey() const {
- BSONObjIterator it( _keyPattern );
- BSONObjBuilder minKeyB;
- while ( it.more() ) minKeyB << it.next().fieldName() << MINKEY;
- return minKeyB.obj();
- }
-
- BSONObj CollectionMetadata::getMaxKey() const {
- BSONObjIterator it( _keyPattern );
- BSONObjBuilder maxKeyB;
- while ( it.more() ) maxKeyB << it.next().fieldName() << MAXKEY;
- return maxKeyB.obj();
+ RangeMap::const_iterator it = _rangesMap.begin();
+ ss << it->first << " -> " << it->second;
+ while (it != _rangesMap.end()) {
+ ss << ", " << it->first << " -> " << it->second;
}
+ return ss.str();
+}
+
+BSONObj CollectionMetadata::getMinKey() const {
+ BSONObjIterator it(_keyPattern);
+ BSONObjBuilder minKeyB;
+ while (it.more())
+ minKeyB << it.next().fieldName() << MINKEY;
+ return minKeyB.obj();
+}
+
+BSONObj CollectionMetadata::getMaxKey() const {
+ BSONObjIterator it(_keyPattern);
+ BSONObjBuilder maxKeyB;
+ while (it.more())
+ maxKeyB << it.next().fieldName() << MAXKEY;
+ return maxKeyB.obj();
+}
+
+bool CollectionMetadata::isValid() const {
+ if (_shardVersion > _collVersion)
+ return false;
+ if (_collVersion.majorVersion() == 0)
+ return false;
+ if (_collVersion.epoch() != _shardVersion.epoch())
+ return false;
- bool CollectionMetadata::isValid() const {
- if (_shardVersion > _collVersion)
+ if (_shardVersion.majorVersion() > 0) {
+ // Must be chunks
+ if (_rangesMap.size() == 0 || _chunksMap.size() == 0)
return false;
- if (_collVersion.majorVersion() == 0)
+ } else {
+ // No chunks
+ if (_shardVersion.minorVersion() > 0)
return false;
- if (_collVersion.epoch() != _shardVersion.epoch())
+ if (_rangesMap.size() > 0 || _chunksMap.size() > 0)
return false;
-
- if (_shardVersion.majorVersion() > 0) {
- // Must be chunks
- if (_rangesMap.size() == 0 || _chunksMap.size() == 0)
- return false;
- }
- else {
- // No chunks
- if (_shardVersion.minorVersion() > 0)
- return false;
- if (_rangesMap.size() > 0 || _chunksMap.size() > 0)
- return false;
- }
-
- return true;
}
- bool CollectionMetadata::isValidKey( const BSONObj& key ) const {
- BSONObjIterator it( _keyPattern );
- while ( it.more() ) {
- BSONElement next = it.next();
- if ( !key.hasField( next.fieldName() ) ) return false;
- }
- return key.nFields() == _keyPattern.nFields();
- }
-
- void CollectionMetadata::fillRanges() {
- if (_chunksMap.empty())
- return;
-
- // Load the chunk information, coallesceing their ranges. The version for this shard
- // would be the highest version for any of the chunks.
- RangeMap::const_iterator it = _chunksMap.begin();
- BSONObj min,max;
- while (it != _chunksMap.end()) {
- BSONObj currMin = it->first;
- BSONObj currMax = it->second;
- ++it;
-
- // coalesce the chunk's bounds in ranges if they are adjacent chunks
- if (min.isEmpty()) {
- min = currMin;
- max = currMax;
- continue;
- }
- if (max == currMin) {
- max = currMax;
- continue;
- }
-
- _rangesMap.insert(make_pair(min, max));
+ return true;
+}
+bool CollectionMetadata::isValidKey(const BSONObj& key) const {
+ BSONObjIterator it(_keyPattern);
+ while (it.more()) {
+ BSONElement next = it.next();
+ if (!key.hasField(next.fieldName()))
+ return false;
+ }
+ return key.nFields() == _keyPattern.nFields();
+}
+
+void CollectionMetadata::fillRanges() {
+ if (_chunksMap.empty())
+ return;
+
+ // Load the chunk information, coallesceing their ranges. The version for this shard
+ // would be the highest version for any of the chunks.
+ RangeMap::const_iterator it = _chunksMap.begin();
+ BSONObj min, max;
+ while (it != _chunksMap.end()) {
+ BSONObj currMin = it->first;
+ BSONObj currMax = it->second;
+ ++it;
+
+ // coalesce the chunk's bounds in ranges if they are adjacent chunks
+ if (min.isEmpty()) {
min = currMin;
max = currMax;
+ continue;
+ }
+ if (max == currMin) {
+ max = currMax;
+ continue;
}
- dassert(!min.isEmpty());
_rangesMap.insert(make_pair(min, max));
+
+ min = currMin;
+ max = currMax;
}
+ dassert(!min.isEmpty());
- void CollectionMetadata::fillKeyPatternFields() {
- // Parse the shard keys into the states 'keys' and 'keySet' members.
- BSONObjIterator patternIter = _keyPattern.begin();
- while (patternIter.more()) {
- BSONElement current = patternIter.next();
+ _rangesMap.insert(make_pair(min, max));
+}
- _keyFields.mutableVector().push_back(new FieldRef);
- FieldRef* const newFieldRef = _keyFields.mutableVector().back();
- newFieldRef->parse(current.fieldNameStringData());
- }
+void CollectionMetadata::fillKeyPatternFields() {
+ // Parse the shard keys into the states 'keys' and 'keySet' members.
+ BSONObjIterator patternIter = _keyPattern.begin();
+ while (patternIter.more()) {
+ BSONElement current = patternIter.next();
+
+ _keyFields.mutableVector().push_back(new FieldRef);
+ FieldRef* const newFieldRef = _keyFields.mutableVector().back();
+ newFieldRef->parse(current.fieldNameStringData());
}
+}
-} // namespace mongo
+} // namespace mongo