summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-09-16 18:08:29 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-11-02 16:46:47 -0400
commitf0ada5e29eb5218c257897bfd1a8fb1e36e06733 (patch)
treeaf7cb64596f73165d813e7eb68a030e6028c769d /src/mongo/db
parentfd16deb6dd3d08756f15c181facc707cb53f4e15 (diff)
downloadmongo-f0ada5e29eb5218c257897bfd1a8fb1e36e06733.tar.gz
SERVER-25665 Make splitChunk and moveChunk commands use 'chunkVersion'
This change makes the collection metadata on the shard also include the chunk version and makes the splitChunk and moveChunk commands use it when checking for consistency.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/range_arithmetic.cpp12
-rw-r--r--src/mongo/db/range_arithmetic.h33
-rw-r--r--src/mongo/db/range_arithmetic_test.cpp81
-rw-r--r--src/mongo/db/s/collection_metadata.cpp145
-rw-r--r--src/mongo/db/s/collection_metadata.h17
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp41
-rw-r--r--src/mongo/db/s/metadata_loader.cpp9
-rw-r--r--src/mongo/db/s/metadata_manager.cpp26
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp25
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp30
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp36
11 files changed, 276 insertions, 179 deletions
diff --git a/src/mongo/db/range_arithmetic.cpp b/src/mongo/db/range_arithmetic.cpp
index 603dcd324f5..c02daa7c03c 100644
--- a/src/mongo/db/range_arithmetic.cpp
+++ b/src/mongo/db/range_arithmetic.cpp
@@ -37,6 +37,9 @@ using std::pair;
using std::string;
using std::stringstream;
+CachedChunkInfo::CachedChunkInfo(BSONObj maxKey, ChunkVersion version)
+ : _maxKey(std::move(maxKey)), _version(version) {}
+
bool rangeContains(const BSONObj& inclusiveLower,
const BSONObj& exclusiveUpper,
const BSONObj& point) {
@@ -78,7 +81,7 @@ OverlapBounds rangeMapOverlapBounds(const RangeMap& ranges,
--low;
// If the previous range's max value is lte our min value
- if (low->second.woCompare(inclusiveLower) < 1) {
+ if (low->second.getMaxKey().woCompare(inclusiveLower) < 1) {
low = next;
}
}
@@ -97,7 +100,7 @@ void getRangeMapOverlap(const RangeMap& ranges,
overlap->clear();
OverlapBounds bounds = rangeMapOverlapBounds(ranges, inclusiveLower, exclusiveUpper);
for (RangeMap::const_iterator it = bounds.first; it != bounds.second; ++it) {
- overlap->push_back(make_pair(it->first, it->second));
+ overlap->push_back(make_pair(it->first, it->second.getMaxKey()));
}
}
@@ -116,7 +119,7 @@ bool rangeMapContains(const RangeMap& ranges,
return false;
return bounds.first->first.woCompare(inclusiveLower) == 0 &&
- bounds.first->second.woCompare(exclusiveUpper) == 0;
+ bounds.first->second.getMaxKey().woCompare(exclusiveUpper) == 0;
}
string rangeToString(const BSONObj& inclusiveLower, const BSONObj& exclusiveUpper) {
@@ -134,4 +137,5 @@ string overlapToString(RangeVector overlap) {
}
return ss.str();
}
-}
+
+} // namespace mongo
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index f15e955aeba..411602c2ffd 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -34,6 +34,7 @@
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/jsobj.h"
+#include "mongo/s/chunk_version.h"
namespace mongo {
@@ -103,13 +104,32 @@ int compareRanges(const BSONObj& rangeMin1,
const BSONObj& rangeMax2);
/**
- * A RangeMap is a mapping of a BSON range from lower->upper (lower maps to upper), using
- * standard BSON woCompare. Upper bound is exclusive.
+ * Represents a cached chunk information on the shard.
+ */
+class CachedChunkInfo {
+public:
+ CachedChunkInfo(BSONObj maxKey, ChunkVersion version);
+
+ const BSONObj& getMaxKey() const {
+ return _maxKey;
+ }
+
+ const ChunkVersion& getVersion() const {
+ return _version;
+ }
+
+private:
+ BSONObj _maxKey;
+ ChunkVersion _version;
+};
+
+/**
+ * A RangeMap is a mapping of an inclusive lower BSON key to an upper key and chunk version, using
+ * standard BSON woCompare. The upper bound is exclusive.
*
- * NOTE: For overlap testing to work correctly, there may be no overlaps present in the map
- * itself.
+ * NOTE: For overlap testing to work correctly, there may be no overlaps present in the map itself.
*/
-typedef BSONObjIndexedMap<BSONObj> RangeMap;
+typedef BSONObjIndexedMap<CachedChunkInfo> RangeMap;
/**
* A RangeVector is a list of [lower,upper) ranges.
@@ -150,4 +170,5 @@ std::string rangeToString(const BSONObj& inclusiveLower, const BSONObj& exclusiv
* std::string representation of overlapping ranges as a list "[range1),[range2),..."
*/
std::string overlapToString(RangeVector overlap);
-}
+
+} // namespace mongo
diff --git a/src/mongo/db/range_arithmetic_test.cpp b/src/mongo/db/range_arithmetic_test.cpp
index afec7e05e36..8f20b4e1a34 100644
--- a/src/mongo/db/range_arithmetic_test.cpp
+++ b/src/mongo/db/range_arithmetic_test.cpp
@@ -29,16 +29,9 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
-using mongo::BSONObj;
-using mongo::MAXKEY;
-using mongo::MINKEY;
-using mongo::RangeMap;
-using mongo::RangeVector;
-using mongo::SimpleBSONObjComparator;
-using mongo::rangeMapOverlaps;
-using mongo::rangeOverlaps;
using std::make_pair;
TEST(BSONRange, SmallerLowerRangeNonSubset) {
@@ -78,11 +71,15 @@ TEST(BSONRange, EqualRange) {
}
TEST(RangeMap, RangeMapOverlap) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- rangeMap.insert(make_pair(BSON("x" << 200), BSON("x" << 300)));
- rangeMap.insert(make_pair(BSON("x" << 300), BSON("x" << 400)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
+ rangeMap.insert(
+ make_pair(BSON("x" << 200), CachedChunkInfo(BSON("x" << 300), ChunkVersion(2, 0, epoch))));
+ rangeMap.insert(
+ make_pair(BSON("x" << 300), CachedChunkInfo(BSON("x" << 400), ChunkVersion(3, 0, epoch))));
RangeVector overlap;
getRangeMapOverlap(rangeMap, BSON("x" << 50), BSON("x" << 350), &overlap);
@@ -92,10 +89,13 @@ TEST(RangeMap, RangeMapOverlap) {
}
TEST(RangeMap, RangeMapOverlapPartial) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- rangeMap.insert(make_pair(BSON("x" << 200), BSON("x" << 300)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
+ rangeMap.insert(
+ make_pair(BSON("x" << 200), CachedChunkInfo(BSON("x" << 300), ChunkVersion(2, 0, epoch))));
RangeVector overlap;
getRangeMapOverlap(rangeMap, BSON("x" << 150), BSON("x" << 250), &overlap);
@@ -105,9 +105,11 @@ TEST(RangeMap, RangeMapOverlapPartial) {
}
TEST(RangeMap, RangeMapOverlapInner) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
RangeVector overlap;
getRangeMapOverlap(rangeMap, BSON("x" << 125), BSON("x" << 150), &overlap);
@@ -117,10 +119,13 @@ TEST(RangeMap, RangeMapOverlapInner) {
}
TEST(RangeMap, RangeMapNoOverlap) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
- rangeMap.insert(make_pair(BSON("x" << 300), BSON("x" << 400)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
+ rangeMap.insert(
+ make_pair(BSON("x" << 300), CachedChunkInfo(BSON("x" << 400), ChunkVersion(2, 0, epoch))));
RangeVector overlap;
getRangeMapOverlap(rangeMap, BSON("x" << 200), BSON("x" << 300), &overlap);
@@ -129,9 +134,11 @@ TEST(RangeMap, RangeMapNoOverlap) {
}
TEST(RangeMap, RangeMapOverlaps) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 100), BSON("x" << 200)));
ASSERT(rangeMapOverlaps(rangeMap, BSON("x" << 99), BSON("x" << 200)));
@@ -142,9 +149,11 @@ TEST(RangeMap, RangeMapOverlaps) {
}
TEST(RangeMap, RangeMapContains) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << 100), BSON("x" << 200)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(
+ make_pair(BSON("x" << 100), CachedChunkInfo(BSON("x" << 200), ChunkVersion(1, 0, epoch))));
ASSERT(rangeMapContains(rangeMap, BSON("x" << 100), BSON("x" << 200)));
ASSERT(!rangeMapContains(rangeMap, BSON("x" << 99), BSON("x" << 200)));
@@ -152,12 +161,16 @@ TEST(RangeMap, RangeMapContains) {
}
TEST(RangeMap, RangeMapContainsMinMax) {
- SimpleBSONObjComparator bsonCmp;
- RangeMap rangeMap = bsonCmp.makeBSONObjIndexedMap<BSONObj>();
- rangeMap.insert(make_pair(BSON("x" << MINKEY), BSON("x" << MAXKEY)));
+ const OID epoch = OID::gen();
+
+ RangeMap rangeMap = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ rangeMap.insert(make_pair(BSON("x" << MINKEY),
+ CachedChunkInfo(BSON("x" << MAXKEY), ChunkVersion(1, 0, epoch))));
ASSERT(rangeMapContains(rangeMap, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
ASSERT(!rangeMapContains(rangeMap, BSON("x" << 1), BSON("x" << MAXKEY)));
ASSERT(!rangeMapContains(rangeMap, BSON("x" << MINKEY), BSON("x" << 1)));
}
-}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index b2cf4f443f8..de872578c61 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -48,24 +48,25 @@ using std::vector;
using str::stream;
CollectionMetadata::CollectionMetadata()
- : _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()),
- _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()),
- _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {}
+ : _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
+ _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
+ _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {}
CollectionMetadata::CollectionMetadata(const BSONObj& keyPattern, ChunkVersion collectionVersion)
: _collVersion(collectionVersion),
_shardVersion(ChunkVersion(0, 0, collectionVersion.epoch())),
_keyPattern(keyPattern.getOwned()),
- _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()),
- _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()),
- _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()) {}
+ _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
+ _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
+ _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {}
CollectionMetadata::~CollectionMetadata() = default;
unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusChunk(
- const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const {
- invariant(newShardVersion.epoch() == _shardVersion.epoch());
- invariant(newShardVersion.isSet());
+ const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& chunkVersion) const {
+ invariant(chunkVersion.epoch() == _shardVersion.epoch());
+ invariant(chunkVersion.isSet());
+ invariant(chunkVersion > _shardVersion);
invariant(minKey.woCompare(maxKey) < 0);
invariant(!rangeMapOverlaps(_chunksMap, minKey, maxKey));
@@ -74,9 +75,10 @@ unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusChunk(
metadata->fillKeyPatternFields();
metadata->_pendingMap = _pendingMap;
metadata->_chunksMap = _chunksMap;
- metadata->_chunksMap.insert(make_pair(minKey.getOwned(), maxKey.getOwned()));
- metadata->_shardVersion = newShardVersion;
- metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : _collVersion;
+ metadata->_chunksMap.insert(
+ make_pair(minKey.getOwned(), CachedChunkInfo(maxKey.getOwned(), chunkVersion)));
+ metadata->_shardVersion = chunkVersion;
+ metadata->_collVersion = chunkVersion > _collVersion ? chunkVersion : _collVersion;
metadata->fillRanges();
invariant(metadata->isValid());
@@ -115,11 +117,11 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
metadata->_shardVersion = _shardVersion;
metadata->_collVersion = _collVersion;
- // If there are any pending chunks on the interval to be added this is ok, since pending
- // chunks aren't officially tracked yet and something may have changed on servers we do not
- // see yet.
- // We remove any chunks we overlap, the remote request starting a chunk migration must have
- // been authoritative.
+ // If there are any pending chunks on the interval to be added this is ok, since pending chunks
+ // aren't officially tracked yet and something may have changed on servers we do not see yet.
+ //
+ // We remove any chunks we overlap because the remote request starting a chunk migration is what
+ // is authoritative.
if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) {
RangeVector pendingOverlap;
@@ -134,7 +136,10 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
}
}
- metadata->_pendingMap.insert(make_pair(chunk.getMin(), chunk.getMax()));
+ // The pending map entry cannot contain a specific chunk version because we don't know what
+ // version would be generated for it at commit time. That's why we insert an IGNORED value.
+ metadata->_pendingMap.insert(
+ make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED())));
invariant(metadata->isValid());
return metadata;
@@ -155,22 +160,7 @@ bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
if (it != _rangesMap.begin())
it--;
- bool good = rangeContains(it->first, it->second, key);
-
-#if 0
- // DISABLED because of SERVER-11175 - huge amount of logging
- // Logs if the point doesn't belong here.
- if ( !good ) {
- log() << "bad: " << key << " " << it->first << " " << key.woCompare( it->first ) << " "
- << key.woCompare( it->second );
-
- for ( RangeMap::const_iterator i = _rangesMap.begin(); i != _rangesMap.end(); ++i ) {
- log() << "\t" << i->first << "\t" << i->second << "\t";
- }
- }
-#endif
-
- return good;
+ return rangeContains(it->first, it->second.getMaxKey(), key);
}
bool CollectionMetadata::keyIsPending(const BSONObj& key) const {
@@ -187,7 +177,7 @@ bool CollectionMetadata::keyIsPending(const BSONObj& key) const {
if (it != _pendingMap.begin())
it--;
- bool isPending = rangeContains(it->first, it->second, key);
+ bool isPending = rangeContains(it->first, it->second.getMaxKey(), key);
return isPending;
}
@@ -201,15 +191,18 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk
lowerChunkIt = _chunksMap.end();
}
- if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(lookupKey) > 0) {
+ if (lowerChunkIt != _chunksMap.end() &&
+ lowerChunkIt->second.getMaxKey().woCompare(lookupKey) > 0) {
chunk->setMin(lowerChunkIt->first);
- chunk->setMax(lowerChunkIt->second);
+ chunk->setMax(lowerChunkIt->second.getMaxKey());
+ chunk->setVersion(lowerChunkIt->second.getVersion());
return true;
}
if (upperChunkIt != _chunksMap.end()) {
chunk->setMin(upperChunkIt->first);
- chunk->setMax(upperChunkIt->second);
+ chunk->setMax(upperChunkIt->second.getMaxKey());
+ chunk->setVersion(upperChunkIt->second.getVersion());
return true;
}
@@ -224,7 +217,8 @@ bool CollectionMetadata::getDifferentChunk(const BSONObj& chunkMinKey,
while (lowerChunkIt != upperChunkIt) {
if (lowerChunkIt->first.woCompare(chunkMinKey) != 0) {
differentChunk->setMin(lowerChunkIt->first);
- differentChunk->setMax(lowerChunkIt->second);
+ differentChunk->setMax(lowerChunkIt->second.getMaxKey());
+ differentChunk->setVersion(lowerChunkIt->second.getVersion());
return true;
}
++lowerChunkIt;
@@ -233,6 +227,37 @@ bool CollectionMetadata::getDifferentChunk(const BSONObj& chunkMinKey,
return false;
}
+Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) {
+ ChunkType existingChunk;
+
+ if (!getNextChunk(chunk.getMin(), &existingChunk)) {
+ return {ErrorCodes::IncompatibleShardingMetadata,
+ str::stream() << "Chunk with bounds "
+ << ChunkRange(chunk.getMin(), chunk.getMax()).toString()
+ << " is not owned by this shard."};
+ }
+
+ if (existingChunk.getMin().woCompare(chunk.getMin()) ||
+ existingChunk.getMax().woCompare(chunk.getMax())) {
+ return {ErrorCodes::IncompatibleShardingMetadata,
+ str::stream() << "Unable to find chunk with the exact bounds "
+ << ChunkRange(chunk.getMin(), chunk.getMax()).toString()
+ << " at collection version "
+ << getCollVersion().toString()};
+ }
+
+ if (chunk.isVersionSet() && !chunk.getVersion().isStrictlyEqualTo(existingChunk.getVersion())) {
+ return {ErrorCodes::IncompatibleShardingMetadata,
+ str::stream() << "Chunk with the specified bounds exists but the version does not "
+ "match. Expected: "
+ << chunk.getVersion().toString()
+ << ", actual: "
+ << existingChunk.getVersion().toString()};
+ }
+
+ return Status::OK();
+}
+
void CollectionMetadata::toBSONBasic(BSONObjBuilder& bb) const {
_collVersion.addToBSON(bb, "collVersion");
_shardVersion.addToBSON(bb, "shardVersion");
@@ -246,7 +271,7 @@ void CollectionMetadata::toBSONChunks(BSONArrayBuilder& bb) const {
for (RangeMap::const_iterator it = _chunksMap.begin(); it != _chunksMap.end(); ++it) {
BSONArrayBuilder chunkBB(bb.subarrayStart());
chunkBB.append(it->first);
- chunkBB.append(it->second);
+ chunkBB.append(it->second.getMaxKey());
chunkBB.done();
}
}
@@ -258,7 +283,7 @@ void CollectionMetadata::toBSONPending(BSONArrayBuilder& bb) const {
for (RangeMap::const_iterator it = _pendingMap.begin(); it != _pendingMap.end(); ++it) {
BSONArrayBuilder pendingBB(bb.subarrayStart());
pendingBB.append(it->first);
- pendingBB.append(it->second);
+ pendingBB.append(it->second.getMaxKey());
pendingBB.done();
}
}
@@ -289,8 +314,9 @@ bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRan
// If we overlap, continue after the overlap
// TODO: Could optimize slightly by finding next non-contiguous chunk
- if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(lookupKey) > 0) {
- lookupKey = lowerChunkIt->second;
+ if (lowerChunkIt != _chunksMap.end() &&
+ lowerChunkIt->second.getMaxKey().woCompare(lookupKey) > 0) {
+ lookupKey = lowerChunkIt->second.getMaxKey();
continue;
}
@@ -309,8 +335,8 @@ bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRan
// If we overlap, continue after the overlap
// TODO: Could optimize slightly by finding next non-contiguous chunk
if (lowerPendingIt != _pendingMap.end() &&
- lowerPendingIt->second.woCompare(lookupKey) > 0) {
- lookupKey = lowerPendingIt->second;
+ lowerPendingIt->second.getMaxKey().woCompare(lookupKey) > 0) {
+ lookupKey = lowerPendingIt->second.getMaxKey();
continue;
}
@@ -324,8 +350,9 @@ bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRan
range->minKey = getMinKey();
range->maxKey = maxKey;
- if (lowerChunkIt != _chunksMap.end() && lowerChunkIt->second.woCompare(range->minKey) > 0) {
- range->minKey = lowerChunkIt->second;
+ if (lowerChunkIt != _chunksMap.end() &&
+ lowerChunkIt->second.getMaxKey().woCompare(range->minKey) > 0) {
+ range->minKey = lowerChunkIt->second.getMaxKey();
}
if (upperChunkIt != _chunksMap.end() && upperChunkIt->first.woCompare(range->maxKey) < 0) {
@@ -333,8 +360,8 @@ bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRan
}
if (lowerPendingIt != _pendingMap.end() &&
- lowerPendingIt->second.woCompare(range->minKey) > 0) {
- range->minKey = lowerPendingIt->second;
+ lowerPendingIt->second.getMaxKey().woCompare(range->minKey) > 0) {
+ range->minKey = lowerPendingIt->second.getMaxKey();
}
if (upperPendingIt != _pendingMap.end() &&
@@ -401,14 +428,12 @@ void CollectionMetadata::fillRanges() {
if (_chunksMap.empty())
return;
- // Load the chunk information, coallesceing their ranges. The version for this shard
- // would be the highest version for any of the chunks.
- RangeMap::const_iterator it = _chunksMap.begin();
+ // Load the chunk information, coallesceing their ranges. The version for this shard would be
+ // the highest version for any of the chunks.
BSONObj min, max;
- while (it != _chunksMap.end()) {
- BSONObj currMin = it->first;
- BSONObj currMax = it->second;
- ++it;
+ for (const auto& entry : _chunksMap) {
+ BSONObj currMin = entry.first;
+ BSONObj currMax = entry.second.getMaxKey();
// coalesce the chunk's bounds in ranges if they are adjacent chunks
if (min.isEmpty()) {
@@ -421,14 +446,16 @@ void CollectionMetadata::fillRanges() {
continue;
}
- _rangesMap.insert(make_pair(min, max));
+ _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED())));
min = currMin;
max = currMax;
}
- dassert(!min.isEmpty());
- _rangesMap.insert(make_pair(min, max));
+ invariant(!min.isEmpty());
+ invariant(!max.isEmpty());
+
+ _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED())));
}
void CollectionMetadata::fillKeyPatternFields() {
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 4a4c637cffe..b3c1c92a6d0 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -114,6 +114,13 @@ public:
bool getDifferentChunk(const BSONObj& chunkMinKey, ChunkType* differentChunk) const;
/**
+ * Validates that the passed-in chunk's bounds exactly match a chunk in the metadata cache. If
+ * the chunk's version has been set as well (it might not be in the case of request coming from
+ * a 3.2 shard), also ensures that the versions are the same.
+ */
+ Status checkChunkIsValid(const ChunkType& chunk);
+
+ /**
* Given a key in the shard key range, get the next range which overlaps or is greater than
* this key.
*
@@ -184,14 +191,16 @@ public:
std::string toStringBasic() const;
/**
- * This method is used only for unit-tests and it returns a new metadata's instance based on
- * 'this's state by adding 'chunk'. The new metadata can never be zero.
+ * This method is used only for unit-tests and it returns a new metadata's instance based on the
+ * current state by adding a chunk with the specified bounds and version. The chunk's version
+ * must be higher than that of all chunks which are in the cache.
*
- * It will fassert if the chunk bounds are incorrect or overlap an existing chunk.
+ * It will fassert if the chunk bounds are incorrect or overlap an existing chunk or if the
+ * chunk version is lower than the maximum one.
*/
std::unique_ptr<CollectionMetadata> clonePlusChunk(const BSONObj& minKey,
const BSONObj& maxKey,
- const ChunkVersion& newShardVersion) const;
+ const ChunkVersion& chunkVersion) const;
/**
* Returns true if this metadata was loaded with all necessary information.
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index cbb456c0c9e..e3c0cc318e8 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -285,16 +285,12 @@ protected:
setRemote(HostAndPort("FakeRemoteClient:34567"));
configTargeter()->setFindHostReturnValue(configHost);
- OID epoch = OID::gen();
-
- ChunkVersion chunkVersion = ChunkVersion(1, 0, epoch);
-
CollectionType collType;
collType.setNs(NamespaceString{"test.foo"});
collType.setKeyPattern(BSON("a" << 1));
collType.setUnique(false);
collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
+ collType.setEpoch(chunkVersion.epoch());
BSONObj fooSingle = BSON(
ChunkType::name("test.foo-a_10")
@@ -302,7 +298,7 @@ protected:
<< ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
<< ChunkType::shard("shard0000"));
std::vector<BSONObj> chunksToSend{fooSingle};
@@ -326,6 +322,8 @@ protected:
return _metadata;
}
+ const ChunkVersion chunkVersion{ChunkVersion(1, 0, OID::gen())};
+
private:
CollectionMetadata _metadata;
const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
@@ -355,6 +353,7 @@ TEST_F(SingleChunkFixture, getNextFromEmpty) {
ASSERT(getCollMetadata().getNextChunk(getCollMetadata().getMinKey(), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 10)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << 20)));
+ ASSERT_EQUALS(chunkVersion, nextChunk.getVersion());
}
TEST_F(SingleChunkFixture, GetLastChunkIsFalse) {
@@ -447,6 +446,8 @@ protected:
return _metadata;
}
+ const ChunkVersion chunkVersion{ChunkVersion(1, 0, OID::gen())};
+
private:
CollectionMetadata _metadata;
const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
@@ -473,16 +474,14 @@ protected:
setRemote(HostAndPort("FakeRemoteClient:34567"));
configTargeter()->setFindHostReturnValue(configHost);
- OID epoch = OID::gen();
-
- ChunkVersion chunkVersion = ChunkVersion(1, 0, epoch);
+ ChunkVersion chunkVersion = ChunkVersion(1, 0, OID::gen());
CollectionType collType;
collType.setNs(NamespaceString{"test.foo"});
collType.setKeyPattern(BSON("a" << 1));
collType.setUnique(false);
collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
+ collType.setEpoch(chunkVersion.epoch());
std::vector<BSONObj> chunksToSend;
chunksToSend.push_back(BSON(
@@ -491,15 +490,18 @@ protected:
<< ChunkType::min(BSON("a" << 10 << "b" << 0))
<< ChunkType::max(BSON("a" << 20 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
<< ChunkType::shard("shard0000")));
+
+ chunkVersion.incMinor();
+
chunksToSend.push_back(BSON(
ChunkType::name("test.foo-a_10")
<< ChunkType::ns("test.foo")
<< ChunkType::min(BSON("a" << 30 << "b" << 0))
<< ChunkType::max(BSON("a" << 40 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
<< ChunkType::shard("shard0000")));
auto future = launchAsync([this] {
@@ -650,6 +652,21 @@ private:
const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
};
+TEST_F(ThreeChunkWithRangeGapFixture, ChunkVersionsMatch) {
+ const OID epoch = getCollMetadata().getCollVersion().epoch();
+
+ ChunkType chunk;
+
+ ASSERT(getCollMetadata().getNextChunk(BSON("a" << MINKEY), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 1, epoch), chunk.getVersion());
+
+ ASSERT(getCollMetadata().getNextChunk(BSON("a" << 30), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 2, epoch), chunk.getVersion());
+
+ ASSERT(getCollMetadata().getNextChunk(BSON("a" << 10), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 3, epoch), chunk.getVersion());
+}
+
TEST_F(ThreeChunkWithRangeGapFixture, ShardOwnsDoc) {
ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 5)));
ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 10)));
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index ea9a0ed87a4..43e718dbe9b 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -57,22 +57,23 @@ namespace {
*
* The mongod adapter here tracks only a single shard, and stores ranges by (min, max).
*/
-class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj> {
+class SCMConfigDiffTracker : public ConfigDiffTracker<CachedChunkInfo> {
public:
SCMConfigDiffTracker(const std::string& ns,
RangeMap* currMap,
ChunkVersion* maxVersion,
MaxChunkVersionMap* maxShardVersions,
const ShardId& currShard)
- : ConfigDiffTracker<BSONObj>(ns, currMap, maxVersion, maxShardVersions),
+ : ConfigDiffTracker<CachedChunkInfo>(ns, currMap, maxVersion, maxShardVersions),
_currShard(currShard) {}
virtual bool isTracked(const ChunkType& chunk) const {
return chunk.getShard() == _currShard;
}
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
- return make_pair(chunk.getMin(), chunk.getMax());
+ virtual pair<BSONObj, CachedChunkInfo> rangeFor(OperationContext* txn,
+ const ChunkType& chunk) const {
+ return make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), chunk.getVersion()));
}
virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index a40dac135ab..77c83bb3626 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -47,7 +47,7 @@ MetadataManager::MetadataManager(ServiceContext* sc, NamespaceString nss)
: _nss(std::move(nss)),
_serviceContext(sc),
_activeMetadataTracker(stdx::make_unique<CollectionMetadataTracker>(nullptr)),
- _receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>()),
+ _receivingChunks(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
_rangesToClean(
SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<RangeToCleanDescriptor>()) {}
@@ -135,7 +135,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
// Resolve any receiving chunks, which might have completed by now
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {
const BSONObj min = it->first;
- const BSONObj max = it->second;
+ const BSONObj max = it->second.getMaxKey();
// Our pending range overlaps at least one chunk
if (rangeMapContains(remoteMetadata->getChunks(), min, max)) {
@@ -164,7 +164,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
auto itRecv = _receivingChunks.find(overlapChunkMin.first);
invariant(itRecv != _receivingChunks.end());
- const ChunkRange receivingRange(itRecv->first, itRecv->second);
+ const ChunkRange receivingRange(itRecv->first, itRecv->second.getMaxKey());
_receivingChunks.erase(itRecv);
@@ -181,7 +181,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
for (const auto& receivingChunk : _receivingChunks) {
ChunkType chunk;
chunk.setMin(receivingChunk.first);
- chunk.setMax(receivingChunk.second);
+ chunk.setMax(receivingChunk.second.getMaxKey());
remoteMetadata = remoteMetadata->clonePlusPending(chunk);
}
@@ -203,7 +203,7 @@ void MetadataManager::beginReceive(const ChunkRange& range) {
auto itRecv = _receivingChunks.find(overlapChunkMin.first);
invariant(itRecv != _receivingChunks.end());
- const ChunkRange receivingRange(itRecv->first, itRecv->second);
+ const ChunkRange receivingRange(itRecv->first, itRecv->second.getMaxKey());
_receivingChunks.erase(itRecv);
@@ -214,7 +214,9 @@ void MetadataManager::beginReceive(const ChunkRange& range) {
// Need to ensure that the background range deleter task won't delete the range we are about to
// receive
_removeRangeToClean_inlock(range, Status::OK());
- _receivingChunks.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned()));
+ _receivingChunks.insert(
+ std::make_pair(range.getMin().getOwned(),
+ CachedChunkInfo(range.getMax().getOwned(), ChunkVersion::IGNORED())));
// For compatibility with the current range deleter, update the pending chunks on the collection
// metadata to include the chunk being received
@@ -232,7 +234,8 @@ void MetadataManager::forgetReceive(const ChunkRange& range) {
invariant(it != _receivingChunks.end());
// Verify entire ChunkRange is identical, not just the min key.
- invariant(SimpleBSONObjComparator::kInstance.evaluate(it->second == range.getMax()));
+ invariant(
+ SimpleBSONObjComparator::kInstance.evaluate(it->second.getMaxKey() == range.getMax()));
_receivingChunks.erase(it);
}
@@ -347,9 +350,10 @@ RangeMap MetadataManager::getCopyOfRangesToClean() {
}
RangeMap MetadataManager::_getCopyOfRangesToClean_inlock() {
- RangeMap ranges = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>();
+ RangeMap ranges = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
for (auto it = _rangesToClean.begin(); it != _rangesToClean.end(); ++it) {
- ranges.insert(std::make_pair(it->first, it->second.getMax()));
+ ranges.insert(std::make_pair(
+ it->first, CachedChunkInfo(it->second.getMax(), ChunkVersion::IGNORED())));
}
return ranges;
}
@@ -428,7 +432,7 @@ void MetadataManager::append(BSONObjBuilder* builder) {
BSONArrayBuilder pcArr(builder->subarrayStart("pendingChunks"));
for (const auto& entry : _receivingChunks) {
BSONObjBuilder obj;
- ChunkRange r = ChunkRange(entry.first, entry.second);
+ ChunkRange r = ChunkRange(entry.first, entry.second.getMaxKey());
r.append(&obj);
pcArr.append(obj.done());
}
@@ -437,7 +441,7 @@ void MetadataManager::append(BSONObjBuilder* builder) {
BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges"));
for (const auto& entry : _activeMetadataTracker->metadata->getChunks()) {
BSONObjBuilder obj;
- ChunkRange r = ChunkRange(entry.first, entry.second);
+ ChunkRange r = ChunkRange(entry.first, entry.second.getMaxKey());
r.append(&obj);
amrArr.append(obj.done());
}
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 977b35a5f70..08f26ac3298 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -58,7 +58,7 @@ protected:
->setScheduleCleanupFunctionForTest([](const NamespaceString& nss) {});
}
- std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
+ static std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
return stdx::make_unique<CollectionMetadata>(BSON("key" << 1),
ChunkVersion(1, 0, OID::gen()));
}
@@ -95,7 +95,7 @@ TEST_F(MetadataManagerTest, ResetActiveMetadata) {
ASSERT_EQ(cm2Ptr, scopedMetadata2.getMetadata());
};
-TEST_F(MetadataManagerTest, AddAndRemoveRanges) {
+TEST_F(MetadataManagerTest, AddAndRemoveRangesToClean) {
MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10));
ChunkRange cr2 = ChunkRange(BSON("key" << 10), BSON("key" << 20));
@@ -111,7 +111,7 @@ TEST_F(MetadataManagerTest, AddAndRemoveRanges) {
ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
auto ranges = manager.getCopyOfRangesToClean();
auto it = ranges.find(cr2.getMin());
- ChunkRange remainingChunk = ChunkRange(it->first, it->second);
+ ChunkRange remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
ASSERT_EQ(remainingChunk.toString(), cr2.toString());
manager.removeRangeToClean(cr2);
}
@@ -129,12 +129,12 @@ TEST_F(MetadataManagerTest, RemoveRangeInMiddleOfRange) {
auto ranges = manager.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 0));
ChunkRange expectedChunk = ChunkRange(BSON("key" << 0), BSON("key" << 4));
- ChunkRange remainingChunk = ChunkRange(it->first, it->second);
+ ChunkRange remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
it++;
expectedChunk = ChunkRange(BSON("key" << 6), BSON("key" << 10));
- remainingChunk = ChunkRange(it->first, it->second);
+ remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
manager.removeRangeToClean(cr1);
@@ -151,7 +151,7 @@ TEST_F(MetadataManagerTest, RemoveRangeWithSingleRangeOverlap) {
ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
auto ranges = manager.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 5));
- ChunkRange remainingChunk = ChunkRange(it->first, it->second);
+ ChunkRange remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
ChunkRange expectedChunk = ChunkRange(BSON("key" << 5), BSON("key" << 10));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
@@ -159,7 +159,7 @@ TEST_F(MetadataManagerTest, RemoveRangeWithSingleRangeOverlap) {
ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
ranges = manager.getCopyOfRangesToClean();
it = ranges.find(BSON("key" << 6));
- remainingChunk = ChunkRange(it->first, it->second);
+ remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
expectedChunk = ChunkRange(BSON("key" << 6), BSON("key" << 10));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
@@ -167,7 +167,7 @@ TEST_F(MetadataManagerTest, RemoveRangeWithSingleRangeOverlap) {
ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 1UL);
ranges = manager.getCopyOfRangesToClean();
it = ranges.find(BSON("key" << 6));
- remainingChunk = ChunkRange(it->first, it->second);
+ remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
expectedChunk = ChunkRange(BSON("key" << 6), BSON("key" << 9));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
@@ -191,11 +191,11 @@ TEST_F(MetadataManagerTest, RemoveRangeWithMultipleRangeOverlaps) {
ASSERT_EQ(manager.getCopyOfRangesToClean().size(), 2UL);
auto ranges = manager.getCopyOfRangesToClean();
auto it = ranges.find(BSON("key" << 0));
- ChunkRange remainingChunk = ChunkRange(it->first, it->second);
+ ChunkRange remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
ChunkRange expectedChunk = ChunkRange(BSON("key" << 0), BSON("key" << 8));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
it++;
- remainingChunk = ChunkRange(it->first, it->second);
+ remainingChunk = ChunkRange(it->first, it->second.getMaxKey());
expectedChunk = ChunkRange(BSON("key" << 22), BSON("key" << 30));
ASSERT_EQ(remainingChunk.toString(), expectedChunk.toString());
@@ -356,7 +356,7 @@ TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) {
const auto it = copyOfPending.find(BSON("key" << 5));
ASSERT(it != copyOfPending.end());
- ASSERT_BSONOBJ_EQ(it->second, BSON("key" << 35));
+ ASSERT_BSONOBJ_EQ(it->second.getMaxKey(), BSON("key" << 35));
}
TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
@@ -383,7 +383,8 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
const auto chunkEntry = manager.getActiveMetadata()->getChunks().begin();
ASSERT_BSONOBJ_EQ(BSON("key" << 20), chunkEntry->first);
- ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second);
+ ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second.getMaxKey());
+ ASSERT_EQ(newVersion, chunkEntry->second.getVersion());
}
// Tests membership functions for _rangesToClean
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 3c09cfad2fe..bbb40821b2e 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -142,26 +142,20 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
// With nonzero shard version, we must have a shard key
invariant(!_collectionMetadata->getKeyPattern().isEmpty());
- ChunkType origChunk;
- if (!_collectionMetadata->getNextChunk(_args.getMinKey(), &origChunk) ||
- origChunk.getMin().woCompare(_args.getMinKey()) != 0) {
- // If this assertion is hit, it means that whoever called the shard moveChunk command
- // (mongos or the CSRS balancer) did not check whether the chunk actually belongs to this
- // shard. It is a benign error and does not indicate data corruption.
- uasserted(40145,
- str::stream() << "Chunk " << _args.toString() << " is not owned by this shard.");
+ ChunkType chunkToMove;
+ chunkToMove.setMin(_args.getMinKey());
+ chunkToMove.setMax(_args.getMaxKey());
+ if (_args.hasChunkVersion()) {
+ chunkToMove.setVersion(_args.getChunkVersion());
}
- uassert(40146,
- str::stream()
- << "Unable to find a chunk '"
- << _args.toString()
- << "' at collection version '"
- << collectionVersion.toString()
- << "'. Instead found chunk with bounds '"
- << origChunk.toString()
- << "'. The chunk must have been split before the moveChunk command was issued.",
- origChunk.getMax().woCompare(_args.getMaxKey()) == 0);
+ Status chunkValidateStatus = _collectionMetadata->checkChunkIsValid(chunkToMove);
+ if (!chunkValidateStatus.isOK()) {
+ uasserted(chunkValidateStatus.code(),
+ str::stream() << "Unable to move chunk with arguments '" << _args.toString()
+ << "' due to error "
+ << chunkValidateStatus.reason());
+ }
}
MigrationSourceManager::~MigrationSourceManager() {
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 90394366218..e88555ac0fe 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -63,6 +63,8 @@ using std::vector;
namespace {
+const char kChunkVersion[] = "chunkVersion";
+
const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOnly};
bool checkIfSingleDoc(OperationContext* txn,
@@ -196,14 +198,19 @@ public:
keyPatternObj = keyPatternElem.Obj();
}
- auto chunkRangeStatus = ChunkRange::fromBSON(cmdObj);
- if (!chunkRangeStatus.isOK())
- return appendCommandStatus(result, chunkRangeStatus.getStatus());
-
- auto chunkRange = chunkRangeStatus.getValue();
+ auto chunkRange = uassertStatusOK(ChunkRange::fromBSON(cmdObj));
const BSONObj min = chunkRange.getMin();
const BSONObj max = chunkRange.getMax();
+ boost::optional<ChunkVersion> expectedChunkVersion;
+ auto statusWithChunkVersion =
+ ChunkVersion::parseFromBSONWithFieldForCommands(cmdObj, kChunkVersion);
+ if (statusWithChunkVersion.isOK()) {
+ expectedChunkVersion = std::move(statusWithChunkVersion.getValue());
+ } else if (statusWithChunkVersion != ErrorCodes::NoSuchKey) {
+ uassertStatusOK(statusWithChunkVersion);
+ }
+
vector<BSONObj> splitKeys;
{
BSONElement splitKeysElem;
@@ -318,16 +325,15 @@ public:
// With nonzero shard version, we must have a coll version >= our shard version
invariant(collVersion >= shardVersion);
- ChunkType origChunk;
- if (!collMetadata->getNextChunk(min, &origChunk) || origChunk.getMin().woCompare(min) ||
- origChunk.getMax().woCompare(max)) {
- // Our boundaries are different from those passed in
- std::string msg = str::stream() << "splitChunk cannot find chunk "
- << "[" << redact(min) << "," << redact(max) << ") "
- << " to split, the chunk boundaries may be stale";
- warning() << msg;
- throw SendStaleConfigException(
- nss.toString(), msg, expectedCollectionVersion, shardVersion);
+ {
+ ChunkType chunkToMove;
+ chunkToMove.setMin(min);
+ chunkToMove.setMax(max);
+ if (expectedChunkVersion) {
+ chunkToMove.setVersion(*expectedChunkVersion);
+ }
+
+ uassertStatusOK(collMetadata->checkChunkIsValid(chunkToMove));
}
auto request = SplitChunkRequest(