diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-04-04 11:55:53 -0400 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-04-04 11:57:47 -0400 |
commit | 3ea691f6fea329ee449056c23631610a2c9bd069 (patch) | |
tree | 123d56a54d3bc3de384e12c3252f13ecb7af3f2e /src | |
parent | d44068507081433dff3eb9619223dc566c661a68 (diff) | |
download | mongo-3ea691f6fea329ee449056c23631610a2c9bd069.tar.gz |
SERVER-23493 Cleanup CollectionMetadata::clonePlusChunk
The clonePlusChunk method is only used for testing purposes, so there is
no need for it to uassert. Add invariands and remove an unnecessary out
parameter.
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/commands/mr.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/commands/write_commands/batch_executor.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/s/collection_metadata.cpp | 52 | ||||
-rw-r--r-- | src/mongo/db/s/collection_metadata.h | 56 | ||||
-rw-r--r-- | src/mongo/db/s/collection_metadata_test.cpp | 85 |
5 files changed, 43 insertions, 155 deletions
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index c60577e21bb..56aedcd16ae 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -1321,7 +1321,7 @@ public: uassert(16149, "cannot run map reduce without the js engine", globalScriptEngine); - CollectionMetadataPtr collMetadata; + shared_ptr<CollectionMetadata> collMetadata; // Prevent sharding state from changing during the MR. unique_ptr<RangePreserver> rangePreserver; diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp index 58ea6aa6d1e..51b446532a7 100644 --- a/src/mongo/db/commands/write_commands/batch_executor.cpp +++ b/src/mongo/db/commands/write_commands/batch_executor.cpp @@ -382,8 +382,7 @@ static bool checkIndexConstraints(OperationContext* txn, ShardingState* shardingState = ShardingState::get(txn); if (shardingState->enabled()) { - CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns()); - + auto metadata = shardingState->getCollectionMetadata(nss.ns()); if (metadata) { ShardKeyPattern shardKeyPattern(metadata->getKeyPattern()); if (!shardKeyPattern.isUniqueIndexCompatible(request.getIndexKeyPattern())) { diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index d2ce3b91895..a8babeb576e 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -74,52 +74,26 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::cloneMigrate( return metadata; } -CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk, - const ChunkVersion& newShardVersion, - string* errMsg) const { - // The error message string is optional. - string dummy; - if (errMsg == NULL) { - errMsg = &dummy; - } - - // It is acceptable to move version backwards (e.g., undoing a migration that went bad - // during commit) but only cloning away the last chunk may reset the version to 0. - if (!newShardVersion.isSet()) { - *errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax()) - << " with zero shard version"; - - warning() << *errMsg; - return NULL; - } - - invariant(chunk.getMin().woCompare(chunk.getMax()) < 0); - - // Check that there isn't any chunk on the interval to be added. - if (rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())) { - RangeVector overlap; - getRangeMapOverlap(_chunksMap, chunk.getMin(), chunk.getMax(), &overlap); - - *errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax()) - << " because the chunk overlaps " << overlapToString(overlap); - - warning() << *errMsg; - return NULL; - } +unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusChunk( + const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const { + invariant(newShardVersion.epoch() == _shardVersion.epoch()); + invariant(newShardVersion.isSet()); + invariant(minKey.woCompare(maxKey) < 0); + invariant(!rangeMapOverlaps(_chunksMap, minKey, maxKey)); - unique_ptr<CollectionMetadata> metadata(new CollectionMetadata); - metadata->_keyPattern = this->_keyPattern; + unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); + metadata->_keyPattern = _keyPattern; metadata->_keyPattern.getOwned(); metadata->fillKeyPatternFields(); - metadata->_pendingMap = this->_pendingMap; - metadata->_chunksMap = this->_chunksMap; - metadata->_chunksMap.insert(make_pair(chunk.getMin().getOwned(), chunk.getMax().getOwned())); + metadata->_pendingMap = _pendingMap; + metadata->_chunksMap = _chunksMap; + metadata->_chunksMap.insert(make_pair(minKey.getOwned(), maxKey.getOwned())); metadata->_shardVersion = newShardVersion; - metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion; + metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : _collVersion; metadata->fillRanges(); invariant(metadata->isValid()); - return metadata.release(); + return metadata; } std::unique_ptr<CollectionMetadata> CollectionMetadata::cloneMinusPending( diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h index d11b0ca2fff..38898e15e35 100644 --- a/src/mongo/db/s/collection_metadata.h +++ b/src/mongo/db/s/collection_metadata.h @@ -28,7 +28,6 @@ #pragma once - #include "mongo/base/disallow_copying.h" #include "mongo/base/owned_pointer_vector.h" #include "mongo/db/field_ref_set.h" @@ -40,9 +39,6 @@ namespace mongo { class ChunkType; class MetadataLoader; -class CollectionMetadata; - -typedef std::shared_ptr<const CollectionMetadata> CollectionMetadataPtr; /** * The collection metadata has metadata information about a collection, in particular the @@ -60,12 +56,16 @@ class CollectionMetadata { MONGO_DISALLOW_COPYING(CollectionMetadata); public: + /** + * Use the MetadataLoader to fill the empty metadata from the config server, or use + * clone*() methods to use existing metadatas to build new ones. + * + * Unless you are the MetadataLoader or a test you should probably not be using this + * directly. + */ + CollectionMetadata(); ~CollectionMetadata(); - // - // cloning support - // - /** * Returns a new metadata's instance based on 'this's state by removing a 'pending' chunk. * @@ -120,10 +120,6 @@ public: const ChunkVersion& newShardVersion, std::string* errMsg) const; - // - // verification logic - // - /** * Returns true if the document key 'key' is a valid instance of a shard key for this * metadata. The 'key' must contain exactly the same fields as the shard key pattern. @@ -169,10 +165,6 @@ public: */ bool getNextOrphanRange(const BSONObj& lookupKey, KeyRange* orphanRange) const; - // - // accessors - // - ChunkVersion getCollVersion() const { return _collVersion; } @@ -201,10 +193,6 @@ public: return _pendingMap.size(); } - // - // reporting - // - /** * BSON output of the metadata information. */ @@ -231,30 +219,18 @@ public: std::string toString() const; /** - * Use the MetadataLoader to fill the empty metadata from the config server, or use - * clone*() methods to use existing metadatas to build new ones. + * This method is used only for unit-tests and it returns a new metadata's instance based on + * 'this's state by adding 'chunk'. The new metadata can never be zero. * - * Unless you are the MetadataLoader or a test you should probably not be using this - * directly. - */ - CollectionMetadata(); - - /** - * TESTING ONLY - * - * Returns a new metadata's instance based on 'this's state by adding 'chunk'. The new - * metadata can never be zero, though (see cloneMinus). The caller owns the new metadata. - * - * If a new metadata can't be created, returns NULL and fills in 'errMsg', if it was - * provided. + * It will fassert if the chunk bounds are incorrect or overlap an existing chunk. */ - CollectionMetadata* clonePlusChunk(const ChunkType& chunk, - const ChunkVersion& newShardVersion, - std::string* errMsg) const; + std::unique_ptr<CollectionMetadata> clonePlusChunk(const BSONObj& minKey, + const BSONObj& maxKey, + const ChunkVersion& newShardVersion) const; private: - // Effectively, the MetadataLoader is this class's builder. So we open an exception - // and grant it friendship. + // Effectively, the MetadataLoader is this class's builder. So we open an exception and grant it + // friendship. friend class MetadataLoader; // a version for this collection that identifies the collection incarnation (ie, a diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp index ffd3904bdfc..0e6bdf57755 100644 --- a/src/mongo/db/s/collection_metadata_test.cpp +++ b/src/mongo/db/s/collection_metadata_test.cpp @@ -128,36 +128,16 @@ TEST_F(NoChunkFixture, getNextFromEmpty) { } TEST_F(NoChunkFixture, FirstChunkClonePlus) { - ChunkType chunk; - chunk.setMin(BSON("a" << 10)); - chunk.setMax(BSON("a" << 20)); - - string errMsg; - const ChunkVersion version(99, 0, OID()); + ChunkVersion version(1, 0, getCollMetadata().getCollVersion().epoch()); unique_ptr<CollectionMetadata> cloned( - getCollMetadata().clonePlusChunk(chunk, version, &errMsg)); + getCollMetadata().clonePlusChunk(BSON("a" << 10), BSON("a" << 20), version)); - ASSERT(errMsg.empty()); ASSERT_EQUALS(1u, cloned->getNumChunks()); ASSERT_EQUALS(cloned->getShardVersion().toLong(), version.toLong()); ASSERT_EQUALS(cloned->getCollVersion().toLong(), version.toLong()); ASSERT(cloned->keyBelongsToMe(BSON("a" << 15))); } -TEST_F(NoChunkFixture, MustHaveVersionForFirstChunk) { - ChunkType chunk; - chunk.setMin(BSON("a" << 10)); - chunk.setMax(BSON("a" << 20)); - - string errMsg; - unique_ptr<CollectionMetadata> cloned( - getCollMetadata() // br - .clonePlusChunk(chunk, ChunkVersion(0, 0, OID()), &errMsg)); - - ASSERT(cloned == NULL); - ASSERT_FALSE(errMsg.empty()); -} - TEST_F(NoChunkFixture, NoPendingChunks) { ASSERT(!getCollMetadata().keyIsPending(BSON("a" << 15))); ASSERT(!getCollMetadata().keyIsPending(BSON("a" << 25))); @@ -253,15 +233,8 @@ TEST_F(NoChunkFixture, PlusChunkWithPending) { ASSERT(cloned->keyIsPending(BSON("a" << 15))); ASSERT(!cloned->keyIsPending(BSON("a" << 25))); - chunk.setMin(BSON("a" << 20)); - chunk.setMax(BSON("a" << 30)); - - std::string errMsg; - cloned.reset(cloned->clonePlusChunk( - chunk, ChunkVersion(1, 0, cloned->getCollVersion().epoch()), &errMsg)); - - ASSERT_EQUALS(errMsg, ""); - ASSERT(cloned != NULL); + cloned = cloned->clonePlusChunk( + BSON("a" << 20), BSON("a" << 30), ChunkVersion(1, 0, cloned->getCollVersion().epoch())); ASSERT(cloned->keyIsPending(BSON("a" << 15))); ASSERT(!cloned->keyIsPending(BSON("a" << 25))); @@ -733,16 +706,10 @@ private: }; TEST_F(TwoChunksWithGapCompoundKeyFixture, ClonePlusBasic) { - ChunkType chunk; - chunk.setMin(BSON("a" << 40 << "b" << 0)); - chunk.setMax(BSON("a" << 50 << "b" << 0)); - - string errMsg; ChunkVersion version(1, 0, getCollMetadata().getShardVersion().epoch()); - unique_ptr<CollectionMetadata> cloned( - getCollMetadata().clonePlusChunk(chunk, version, &errMsg)); + unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusChunk( + BSON("a" << 40 << "b" << 0), BSON("a" << 50 << "b" << 0), version)); - ASSERT(errMsg.empty()); ASSERT_EQUALS(2u, getCollMetadata().getNumChunks()); ASSERT_EQUALS(3u, cloned->getNumChunks()); @@ -756,19 +723,6 @@ TEST_F(TwoChunksWithGapCompoundKeyFixture, ClonePlusBasic) { ASSERT_FALSE(cloned->keyBelongsToMe(BSON("a" << 50 << "b" << 0))); } -TEST_F(TwoChunksWithGapCompoundKeyFixture, ClonePlusOverlappingRange) { - ChunkType chunk; - chunk.setMin(BSON("a" << 15 << "b" << 0)); - chunk.setMax(BSON("a" << 25 << "b" << 0)); - - string errMsg; - unique_ptr<CollectionMetadata> cloned( - getCollMetadata().clonePlusChunk(chunk, ChunkVersion(1, 0, OID()), &errMsg)); - ASSERT(cloned == NULL); - ASSERT_FALSE(errMsg.empty()); - ASSERT_EQUALS(2u, getCollMetadata().getNumChunks()); -} - TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneMinusBasic) { ChunkType chunk; chunk.setMin(BSON("a" << 10 << "b" << 0)); @@ -1051,17 +1005,12 @@ TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMinKey) { TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMaxKey) { string errMsg; - unique_ptr<CollectionMetadata> cloned; ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch()); // Add one chunk to complete the range - ChunkType chunk; - chunk.setMin(BSON("a" << 20)); - chunk.setMax(BSON("a" << 30)); - cloned.reset(getCollMetadata().clonePlusChunk(chunk, newShardVersion, &errMsg)); - ASSERT_EQUALS(errMsg, ""); + unique_ptr<CollectionMetadata> cloned( + getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion)); ASSERT_EQUALS(cloned->getNumChunks(), 4u); - ASSERT(cloned != NULL); // Try to merge highest chunks together newShardVersion.incMajor(); @@ -1077,17 +1026,12 @@ TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMaxKey) { TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkFullRange) { string errMsg; - unique_ptr<CollectionMetadata> cloned; ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch()); // Add one chunk to complete the range - ChunkType chunk; - chunk.setMin(BSON("a" << 20)); - chunk.setMax(BSON("a" << 30)); - cloned.reset(getCollMetadata().clonePlusChunk(chunk, newShardVersion, &errMsg)); - ASSERT_EQUALS(errMsg, ""); + unique_ptr<CollectionMetadata> cloned( + getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion)); ASSERT_EQUALS(cloned->getNumChunks(), 4u); - ASSERT(cloned != NULL); // Try to merge all chunks together newShardVersion.incMajor(); @@ -1104,17 +1048,12 @@ TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkFullRange) { TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMiddleRange) { string errMsg; - unique_ptr<CollectionMetadata> cloned; ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch()); // Add one chunk to complete the range - ChunkType chunk; - chunk.setMin(BSON("a" << 20)); - chunk.setMax(BSON("a" << 30)); - cloned.reset(getCollMetadata().clonePlusChunk(chunk, newShardVersion, &errMsg)); - ASSERT_EQUALS(errMsg, ""); + unique_ptr<CollectionMetadata> cloned( + getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion)); ASSERT_EQUALS(cloned->getNumChunks(), 4u); - ASSERT(cloned != NULL); // Try to merge middle two chunks newShardVersion.incMajor(); |