summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-10-07 15:37:23 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-10-10 18:05:27 -0400
commita43da01b5124d89dea500819cbac0e55eb76961a (patch)
treea7035c490bc06b80740f1a8d111a49005b18503e /src/mongo/db
parent8189acb7e18a8cb2c9564ae53c11d2e4c8ea05ae (diff)
downloadmongo-a43da01b5124d89dea500819cbac0e55eb76961a.tar.gz
SERVER-25665 Remove cloneSplit and cloneMerge from CollectionMetadata
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/s/collection_metadata.cpp144
-rw-r--r--src/mongo/db/s/collection_metadata.h29
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp247
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp8
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp84
5 files changed, 37 insertions, 475 deletions
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 1e5efe95b2a..b2cf4f443f8 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -140,150 +140,6 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
return metadata;
}
-StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneSplit(
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const std::vector<BSONObj>& splitKeys,
- const ChunkVersion& newShardVersion) const {
- invariant(newShardVersion.epoch() == _shardVersion.epoch());
- invariant(newShardVersion > _shardVersion);
-
- // The version required in both resulting chunks could be simply an increment in the
- // minor portion of the current version. However, we are enforcing uniqueness over the
- // attributes <ns, version> of the configdb collection 'chunks'. So in practice, a
- // migrate somewhere may force this split to pick up a version that has the major
- // portion higher than the one that this shard has been using.
- //
- // TODO drop the uniqueness constraint and tighten the check below so that only the
- // minor portion of version changes
-
- // Check that we have the exact chunk that will be subtracted.
- if (!rangeMapContains(_chunksMap, minKey, maxKey)) {
- stream errMsg;
- errMsg << "cannot split chunk " << rangeToString(minKey, maxKey)
- << ", this shard does not contain the chunk";
-
- if (rangeMapOverlaps(_chunksMap, minKey, maxKey)) {
- RangeVector overlap;
- getRangeMapOverlap(_chunksMap, minKey, maxKey, &overlap);
-
- errMsg << " and it overlaps " << overlapToString(overlap);
- }
-
- return {ErrorCodes::IllegalOperation, errMsg};
- }
-
- unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
- metadata->_keyPattern = _keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = _pendingMap;
- metadata->_chunksMap = _chunksMap;
- metadata->_shardVersion = newShardVersion; // will increment 2nd, 3rd,... chunks below
-
- BSONObj startKey = minKey;
- for (const auto& split : splitKeys) {
- // Check that the split key is valid
- if (!rangeContains(minKey, maxKey, split)) {
- return {ErrorCodes::IllegalOperation,
- stream() << "Cannot split chunk " << rangeToString(minKey, maxKey) << " at key "
- << split};
- }
-
- // Check that the split keys are in order
- if (split.woCompare(startKey) <= 0) {
- // The split keys came in out of order, this probably indicates a bug, so fail the
- // operation. Re-iterate splitKeys to build a useful error message including the array
- // of splitKeys in the order received.
- str::stream errMsg;
- errMsg << "Invalid input to splitChunk, split keys must be in order, got: [";
- for (auto it2 = splitKeys.cbegin(); it2 != splitKeys.cend(); ++it2) {
- if (it2 != splitKeys.begin()) {
- errMsg << ", ";
- }
- errMsg << it2->toString();
- }
- errMsg << "]";
- return {ErrorCodes::IllegalOperation, errMsg};
- }
-
- metadata->_chunksMap[startKey] = split.getOwned();
- metadata->_chunksMap.insert(make_pair(split.getOwned(), maxKey.getOwned()));
- metadata->_shardVersion.incMinor();
- startKey = split;
- }
-
- metadata->_collVersion =
- metadata->_shardVersion > _collVersion ? metadata->_shardVersion : _collVersion;
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return std::move(metadata);
-}
-
-StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneMerge(
- const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const {
- invariant(newShardVersion.epoch() == _shardVersion.epoch());
- invariant(newShardVersion > _shardVersion);
-
- RangeVector overlap;
- getRangeMapOverlap(_chunksMap, minKey, maxKey, &overlap);
-
- if (overlap.empty() || overlap.size() == 1) {
- return {ErrorCodes::IllegalOperation,
- stream() << "cannot merge range " << rangeToString(minKey, maxKey)
- << (overlap.empty() ? ", no chunks found in this range"
- : ", only one chunk found in this range")};
- }
-
- bool validStartEnd = true;
- bool validNoHoles = true;
-
- if (overlap.begin()->first.woCompare(minKey) != 0) {
- // First chunk doesn't start with minKey
- validStartEnd = false;
- } else if (overlap.rbegin()->second.woCompare(maxKey) != 0) {
- // Last chunk doesn't end with maxKey
- validStartEnd = false;
- } else {
- // Check that there are no holes
- BSONObj prevMaxKey = minKey;
- for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) {
- if (it->first.woCompare(prevMaxKey) != 0) {
- validNoHoles = false;
- break;
- }
- prevMaxKey = it->second;
- }
- }
-
- if (!validStartEnd || !validNoHoles) {
- return {ErrorCodes::IllegalOperation,
- stream() << "cannot merge range " << rangeToString(minKey, maxKey)
- << ", overlapping chunks "
- << overlapToString(overlap)
- << (!validStartEnd ? " do not have the same min and max key"
- : " are not all adjacent")};
- }
-
- unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
- metadata->_keyPattern = _keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = _pendingMap;
- metadata->_chunksMap = _chunksMap;
- metadata->_rangesMap = _rangesMap;
- metadata->_shardVersion = newShardVersion;
- metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion;
-
- for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) {
- metadata->_chunksMap.erase(it->first);
- }
-
- metadata->_chunksMap.insert(make_pair(minKey, maxKey));
-
- invariant(metadata->isValid());
- return std::move(metadata);
-}
-
bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
// For now, collections don't move. So if the collection is not sharded, assume
// the document with the given key can be accessed.
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 796f1d92515..2e9d6694220 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -83,32 +83,6 @@ public:
std::unique_ptr<CollectionMetadata> clonePlusPending(const ChunkType& chunk) const;
/**
- * Returns a new metadata's instance by splitting an existing 'chunk' at the points
- * described by 'splitKeys'. The first resulting chunk will have 'newShardVersion' and
- * subsequent one would have that with the minor version incremented at each chunk. The
- * caller owns the metadata.
- *
- * If a new metadata can't be created returns a failed status.
- *
- * Note: 'splitKeys' must be sorted in ascending order.
- */
- StatusWith<std::unique_ptr<CollectionMetadata>> cloneSplit(
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const std::vector<BSONObj>& splitKeys,
- const ChunkVersion& newShardVersion) const;
-
- /**
- * Returns a new metadata instance by merging a key range which starts and ends at existing
- *chunks into a single chunk. The range may not have holes. The resulting metadata will have the
- *'newShardVersion'.
- *
- * If a new metadata can't be created, returns a failed status.
- */
- StatusWith<std::unique_ptr<CollectionMetadata>> cloneMerge(
- const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const;
-
- /**
* Returns true if the document key 'key' is a valid instance of a shard key for this
* metadata. The 'key' must contain exactly the same fields as the shard key pattern.
*/
@@ -205,6 +179,9 @@ public:
*/
void toBSONPending(BSONArrayBuilder& bb) const;
+ /**
+ * String output of the collection and shard versions.
+ */
std::string toStringBasic() const;
/**
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 6ff125ebef9..948701095dd 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -218,14 +218,6 @@ TEST_F(NoChunkFixture, OverlappingPendingChunks) {
ASSERT(!cloned->keyIsPending(BSON("a" << 45)));
}
-TEST_F(NoChunkFixture, MergeChunkEmpty) {
- ASSERT_NOT_OK(getCollMetadata()
- .cloneMerge(BSON("a" << 15),
- BSON("a" << 25),
- ChunkVersion(2, 0, getCollMetadata().getCollVersion().epoch()))
- .getStatus());
-}
-
TEST_F(NoChunkFixture, OrphanedDataRangeBegin) {
const CollectionMetadata& metadata = getCollMetadata();
@@ -390,102 +382,6 @@ TEST_F(SingleChunkFixture, PlusPendingChunk) {
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
}
-TEST_F(SingleChunkFixture, SingleSplit) {
- ChunkVersion version = getCollMetadata().getCollVersion();
- version.incMinor();
-
- ChunkType chunk;
- chunk.setMin(BSON("a" << 10));
- chunk.setMax(BSON("a" << 20));
-
- vector<BSONObj> splitPoints;
- splitPoints.push_back(BSON("a" << 14));
-
- unique_ptr<CollectionMetadata> cloned(assertGet(
- getCollMetadata().cloneSplit(chunk.getMin(), chunk.getMax(), splitPoints, version)));
-
- ChunkVersion newVersion(cloned->getCollVersion());
- ASSERT_EQUALS(version.epoch(), newVersion.epoch());
- ASSERT_EQUALS(version.majorVersion(), newVersion.majorVersion());
- ASSERT_EQUALS(version.minorVersion() + 1, newVersion.minorVersion());
-
- ASSERT(cloned->getNextChunk(BSON("a" << MINKEY), &chunk));
- ASSERT(chunk.getMin().woCompare(BSON("a" << 10)) == 0);
- ASSERT(chunk.getMax().woCompare(BSON("a" << 14)) == 0);
-
- ASSERT(cloned->getNextChunk(BSON("a" << 14), &chunk));
- ASSERT(chunk.getMin().woCompare(BSON("a" << 14)) == 0);
- ASSERT(chunk.getMax().woCompare(BSON("a" << 20)) == 0);
-
- ASSERT_FALSE(cloned->getNextChunk(BSON("a" << 20), &chunk));
-}
-
-TEST_F(SingleChunkFixture, MultiSplit) {
- ChunkType chunk;
- chunk.setMin(BSON("a" << 10));
- chunk.setMax(BSON("a" << 20));
-
- vector<BSONObj> splitPoints;
- splitPoints.push_back(BSON("a" << 14));
- splitPoints.push_back(BSON("a" << 16));
-
- ChunkVersion version = getCollMetadata().getCollVersion();
- version.incMinor();
-
- unique_ptr<CollectionMetadata> cloned(assertGet(
- getCollMetadata().cloneSplit(chunk.getMin(), chunk.getMax(), splitPoints, version)));
-
- ChunkVersion newVersion(cloned->getCollVersion());
- ASSERT_EQUALS(version.epoch(), newVersion.epoch());
- ASSERT_EQUALS(version.majorVersion(), newVersion.majorVersion());
- ASSERT_EQUALS(version.minorVersion() + 2, newVersion.minorVersion());
-
- ASSERT(cloned->getNextChunk(BSON("a" << MINKEY), &chunk));
- ASSERT(chunk.getMin().woCompare(BSON("a" << 10)) == 0);
- ASSERT(chunk.getMax().woCompare(BSON("a" << 14)) == 0);
-
- ASSERT(cloned->getNextChunk(BSON("a" << 14), &chunk));
- ASSERT(chunk.getMin().woCompare(BSON("a" << 14)) == 0);
- ASSERT(chunk.getMax().woCompare(BSON("a" << 16)) == 0);
-
- ASSERT(cloned->getNextChunk(BSON("a" << 16), &chunk));
- ASSERT(chunk.getMin().woCompare(BSON("a" << 16)) == 0);
- ASSERT(chunk.getMax().woCompare(BSON("a" << 20)) == 0);
-
- ASSERT_FALSE(cloned->getNextChunk(BSON("a" << 20), &chunk));
-}
-
-TEST_F(SingleChunkFixture, SplitChunkWithPending) {
- ChunkType chunk;
- chunk.setMin(BSON("a" << 20));
- chunk.setMax(BSON("a" << 30));
-
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
- ASSERT(cloned->keyIsPending(BSON("a" << 25)));
- ASSERT(!cloned->keyIsPending(BSON("a" << 35)));
-
- vector<BSONObj> splitPoints;
- splitPoints.push_back(BSON("a" << 14));
- splitPoints.push_back(BSON("a" << 16));
-
- cloned = assertGet(cloned->cloneSplit(BSON("a" << 10),
- BSON("a" << 20),
- splitPoints,
- ChunkVersion(cloned->getCollVersion().majorVersion() + 1,
- 0,
- cloned->getCollVersion().epoch())));
- ASSERT(cloned->keyIsPending(BSON("a" << 25)));
- ASSERT(!cloned->keyIsPending(BSON("a" << 35)));
-}
-
-TEST_F(SingleChunkFixture, MergeChunkSingle) {
- ASSERT_NOT_OK(getCollMetadata()
- .cloneMerge(BSON("a" << 10),
- BSON("a" << 20),
- ChunkVersion(2, 0, getCollMetadata().getCollVersion().epoch()))
- .getStatus());
-}
-
TEST_F(SingleChunkFixture, ChunkOrphanedDataRanges) {
KeyRange keyRange;
ASSERT(getCollMetadata().getNextOrphanRange(getCollMetadata().getMinKey(), &keyRange));
@@ -635,59 +531,6 @@ private:
const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
-TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneSplitBasic) {
- const BSONObj min(BSON("a" << 10 << "b" << 0));
- const BSONObj max(BSON("a" << 20 << "b" << 0));
-
- const BSONObj split1(BSON("a" << 15 << "b" << 0));
- const BSONObj split2(BSON("a" << 18 << "b" << 0));
- vector<BSONObj> splitKeys;
- splitKeys.push_back(split1);
- splitKeys.push_back(split2);
- ChunkVersion version(
- 1, 99, getCollMetadata().getCollVersion().epoch()); // first chunk 1|99 , second 1|100
-
- unique_ptr<CollectionMetadata> cloned(
- assertGet(getCollMetadata().cloneSplit(min, max, splitKeys, version)));
-
- version.incMinor(); // second chunk 1|100, first split point
- version.incMinor(); // third chunk 1|101, second split point
- ASSERT_EQUALS(cloned->getShardVersion().toLong(), version.toLong() /* 1|101 */);
- ASSERT_EQUALS(cloned->getCollVersion().toLong(), version.toLong());
- ASSERT_EQUALS(getCollMetadata().getNumChunks(), 2u);
- ASSERT_EQUALS(cloned->getNumChunks(), 4u);
- ASSERT(cloned->keyBelongsToMe(min));
- ASSERT(cloned->keyBelongsToMe(split1));
- ASSERT(cloned->keyBelongsToMe(split2));
- ASSERT(!cloned->keyBelongsToMe(max));
-}
-
-TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneSplitOutOfRangeSplitPoint) {
- vector<BSONObj> splitKeys;
- splitKeys.push_back(BSON("a" << 5 << "b" << 0));
-
- ASSERT_NOT_OK(getCollMetadata()
- .cloneSplit(BSON("a" << 10 << "b" << 0),
- BSON("a" << 20 << "b" << 0),
- splitKeys,
- ChunkVersion(1, 1, getCollMetadata().getCollVersion().epoch()))
- .getStatus());
- ASSERT_EQUALS(2u, getCollMetadata().getNumChunks());
-}
-
-TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneSplitBadChunkRange) {
- vector<BSONObj> splitKeys;
- splitKeys.push_back(BSON("a" << 15 << "b" << 0));
-
- ASSERT_NOT_OK(getCollMetadata()
- .cloneSplit(BSON("a" << 10 << "b" << 0),
- BSON("a" << 25 << "b" << 0),
- splitKeys,
- ChunkVersion(1, 1, getCollMetadata().getCollVersion().epoch()))
- .getStatus());
- ASSERT_EQUALS(2u, getCollMetadata().getNumChunks());
-}
-
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
KeyRange keyRange;
ASSERT(getCollMetadata().getNextOrphanRange(getCollMetadata().getMinKey(), &keyRange));
@@ -866,95 +709,5 @@ TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromLast) {
ASSERT_EQUALS(0, differentChunk.getMax().woCompare(BSON("a" << 10)));
}
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkHoleInRange) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
- ASSERT_NOT_OK(getCollMetadata()
- .cloneMerge(BSON("a" << 10), BSON("a" << MAXKEY), newShardVersion)
- .getStatus());
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkDiffEndKey) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
- ASSERT_NOT_OK(getCollMetadata()
- .cloneMerge(BSON("a" << MINKEY), BSON("a" << 19), newShardVersion)
- .getStatus());
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMinKey) {
- ASSERT_EQUALS(getCollMetadata().getNumChunks(), 3u);
-
- // Try to merge lowest chunks together
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
- unique_ptr<CollectionMetadata> cloned(assertGet(
- getCollMetadata().cloneMerge(BSON("a" << MINKEY), BSON("a" << 20), newShardVersion)));
-
- ASSERT(cloned->keyBelongsToMe(BSON("a" << 10)));
- ASSERT_EQUALS(cloned->getNumChunks(), 2u);
- ASSERT_EQUALS(cloned->getShardVersion().majorVersion(), 5);
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMaxKey) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
-
- // Add one chunk to complete the range
- unique_ptr<CollectionMetadata> cloned(
- getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion));
- ASSERT_EQUALS(cloned->getNumChunks(), 4u);
-
- // Try to merge highest chunks together
- newShardVersion.incMajor();
- cloned = assertGet(cloned->cloneMerge(BSON("a" << 20), BSON("a" << MAXKEY), newShardVersion));
-
- ASSERT(cloned->keyBelongsToMe(BSON("a" << 30)));
- ASSERT_EQUALS(cloned->getNumChunks(), 3u);
- ASSERT_EQUALS(cloned->getShardVersion().majorVersion(), 6);
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkFullRange) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
-
- // Add one chunk to complete the range
- unique_ptr<CollectionMetadata> cloned(
- getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion));
- ASSERT_EQUALS(cloned->getNumChunks(), 4u);
-
- // Try to merge all chunks together
- newShardVersion.incMajor();
- cloned =
- assertGet(cloned->cloneMerge(BSON("a" << MINKEY), BSON("a" << MAXKEY), newShardVersion));
-
- ASSERT(cloned->keyBelongsToMe(BSON("a" << 10)));
- ASSERT(cloned->keyBelongsToMe(BSON("a" << 30)));
- ASSERT_EQUALS(cloned->getNumChunks(), 1u);
- ASSERT_EQUALS(cloned->getShardVersion().majorVersion(), 6);
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, MergeChunkMiddleRange) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
-
- // Add one chunk to complete the range
- unique_ptr<CollectionMetadata> cloned(
- getCollMetadata().clonePlusChunk(BSON("a" << 20), BSON("a" << 30), newShardVersion));
- ASSERT_EQUALS(cloned->getNumChunks(), 4u);
-
- // Try to merge middle two chunks
- newShardVersion.incMajor();
- cloned = assertGet(cloned->cloneMerge(BSON("a" << 10), BSON("a" << 30), newShardVersion));
-
- ASSERT(cloned->keyBelongsToMe(BSON("a" << 20)));
- ASSERT_EQUALS(cloned->getNumChunks(), 3u);
- ASSERT_EQUALS(cloned->getShardVersion().majorVersion(), 6);
-}
-
-TEST_F(ThreeChunkWithRangeGapFixture, CannotMergeWithHole) {
- ChunkVersion newShardVersion(5, 0, getCollMetadata().getShardVersion().epoch());
-
- // Try to merge middle two chunks with a hole in the middle.
- newShardVersion.incMajor();
- ASSERT_NOT_OK(getCollMetadata()
- .cloneMerge(BSON("a" << 10), BSON("a" << 30), newShardVersion)
- .getStatus());
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index ea290611b17..e7728f27560 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -254,14 +254,6 @@ Status mergeChunks(OperationContext* txn,
}
}
- {
- // Ensure that the newly applied chunks would result in a correct metadata state
- ChunkVersion mergeVersion = metadata->getCollVersion();
- mergeVersion.incMinor();
-
- uassertStatusOK(metadata->cloneMerge(minKey, maxKey, mergeVersion));
- }
-
//
// Run _configsvrCommitChunkMerge.
//
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 25fc4681104..90394366218 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -330,18 +330,6 @@ public:
nss.toString(), msg, expectedCollectionVersion, shardVersion);
}
- auto newShardVersion = collVersion;
- // Increment the minor verison once. cloneSplit will increment the minor verison
- // once per every split point past the first.
- //
- // TODO: Revisit this interface, it's a bit clunky
- newShardVersion.incMinor();
-
- // Ensure that the newly applied chunks would result in a correct metadata state
- uassertStatusOK(collMetadata->cloneSplit(min, max, splitKeys, newShardVersion));
-
- log() << "splitChunk accepted at version " << shardVersion;
-
auto request = SplitChunkRequest(
nss, shardName, expectedCollectionVersion.epoch(), chunkRange, splitKeys);
@@ -360,8 +348,8 @@ public:
// Refresh chunk metadata regardless of whether or not the split succeeded
//
{
- ChunkVersion shardVersionAfterSplit;
- refreshStatus = shardingState->refreshMetadataNow(txn, nss, &shardVersionAfterSplit);
+ ChunkVersion unusedShardVersion;
+ refreshStatus = shardingState->refreshMetadataNow(txn, nss, &unusedShardVersion);
if (!refreshStatus.isOK()) {
errmsg = str::stream() << "failed to refresh metadata for split chunk ["
@@ -414,46 +402,42 @@ public:
return appendCommandStatus(result, writeConcernStatus);
}
- {
- // Select chunk to move out for "top chunk optimization".
- KeyPattern shardKeyPattern(collMetadata->getKeyPattern());
-
- AutoGetCollection autoColl(txn, nss, MODE_IS);
+ // Select chunk to move out for "top chunk optimization".
+ KeyPattern shardKeyPattern(collMetadata->getKeyPattern());
- Collection* const collection = autoColl.getCollection();
- if (!collection) {
- warning() << "will not perform top-chunk checking since " << nss.toString()
- << " does not exist after splitting";
- return true;
- }
+ AutoGetCollection autoColl(txn, nss, MODE_IS);
- // Allow multiKey based on the invariant that shard keys must be
- // single-valued. Therefore, any multi-key index prefixed by shard
- // key cannot be multikey over the shard key fields.
- IndexDescriptor* idx =
- collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, keyPatternObj, false);
+ Collection* const collection = autoColl.getCollection();
+ if (!collection) {
+ warning() << "will not perform top-chunk checking since " << nss.toString()
+ << " does not exist after splitting";
+ return true;
+ }
- if (idx == NULL) {
- return true;
- }
+ // Allow multiKey based on the invariant that shard keys must be single-valued. Therefore,
+ // any multi-key index prefixed by shard key cannot be multikey over the shard key fields.
+ IndexDescriptor* idx =
+ collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, keyPatternObj, false);
+ if (!idx) {
+ return true;
+ }
- auto backChunk = ChunkType();
- backChunk.setMin(splitKeys.back());
- backChunk.setMax(max);
-
- auto frontChunk = ChunkType();
- frontChunk.setMin(min);
- frontChunk.setMax(splitKeys.front());
-
- if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 &&
- checkIfSingleDoc(txn, collection, idx, &backChunk)) {
- result.append("shouldMigrate",
- BSON("min" << backChunk.getMin() << "max" << backChunk.getMax()));
- } else if (shardKeyPattern.globalMin().woCompare(frontChunk.getMin()) == 0 &&
- checkIfSingleDoc(txn, collection, idx, &frontChunk)) {
- result.append("shouldMigrate",
- BSON("min" << frontChunk.getMin() << "max" << frontChunk.getMax()));
- }
+ auto backChunk = ChunkType();
+ backChunk.setMin(splitKeys.back());
+ backChunk.setMax(max);
+
+ auto frontChunk = ChunkType();
+ frontChunk.setMin(min);
+ frontChunk.setMax(splitKeys.front());
+
+ if (shardKeyPattern.globalMax().woCompare(backChunk.getMax()) == 0 &&
+ checkIfSingleDoc(txn, collection, idx, &backChunk)) {
+ result.append("shouldMigrate",
+ BSON("min" << backChunk.getMin() << "max" << backChunk.getMax()));
+ } else if (shardKeyPattern.globalMin().woCompare(frontChunk.getMin()) == 0 &&
+ checkIfSingleDoc(txn, collection, idx, &frontChunk)) {
+ result.append("shouldMigrate",
+ BSON("min" << frontChunk.getMin() << "max" << frontChunk.getMax()));
}
return true;