summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-12-05 13:47:54 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-12-06 09:04:36 -0500
commitd5be73dbe4d28211994746dc1f5b47fb840ccdbd (patch)
tree4a27750cfaad68c90671bc14ef83759e0e607a47 /src/mongo
parenta18859168f73428522d4338fee982329d9d431ed (diff)
downloadmongo-d5be73dbe4d28211994746dc1f5b47fb840ccdbd.tar.gz
SERVER-31056 Remove the KeyRange type from non-test code
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/range_arithmetic.h39
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp54
-rw-r--r--src/mongo/db/s/collection_metadata.cpp31
-rw-r--r--src/mongo/db/s/collection_metadata.h11
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp49
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp3
-rw-r--r--src/mongo/db/s/collection_sharding_state.h2
-rw-r--r--src/mongo/db/s/metadata_manager.cpp26
-rw-r--r--src/mongo/db/s/metadata_manager.h12
-rw-r--r--src/mongo/s/catalog/type_chunk.h4
-rw-r--r--src/mongo/s/write_ops/mock_ns_targeter.h60
11 files changed, 139 insertions, 152 deletions
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index 75707ca9b44..91e35d8bd6f 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -38,45 +38,6 @@
namespace mongo {
/**
- * A KeyRange represents a range over keys of documents in a namespace, qualified by a
- * key pattern which defines the documents that are in the key range.
- *
- * There may be many different expressions to generate the same key fields from a document - the
- * keyPattern tells us these expressions.
- *
- * Ex:
- * DocA : { field : "aaaa" }
- * DocB : { field : "bbb" }
- * DocC : { field : "ccccc" }
- *
- * keyPattern : { field : 1 }
- * minKey : { field : "aaaa" } : Id(DocA)
- * maxKey : { field : "ccccc" } : Id(DocB)
- *
- * contains Id(DocB)
- *
- * keyPattern : { field : "numberofletters" }
- * minKey : { field : 4 } : numberofletters(DocA)
- * maxKey : { field : 5 } : numberofletters(DocC)
- *
- * does not contain numberofletters(DocB)
- */
-struct KeyRange {
- KeyRange(const std::string& ns,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const BSONObj& keyPattern)
- : ns(ns), minKey(minKey), maxKey(maxKey), keyPattern(keyPattern) {}
-
- KeyRange() {}
-
- std::string ns;
- BSONObj minKey;
- BSONObj maxKey;
- BSONObj keyPattern;
-};
-
-/**
* Returns true if the point is within the range [inclusiveLower, exclusiveUpper).
*/
bool rangeContains(const BSONObj& inclusiveLower,
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 5ab210ec520..69e7b311b79 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -30,6 +30,7 @@
#include "mongo/platform/basic.h"
+#include <boost/optional.hpp>
#include <string>
#include <vector>
@@ -52,13 +53,7 @@
#include "mongo/s/migration_secondary_throttle_options.h"
#include "mongo/util/log.h"
-#include <boost/optional.hpp>
-
namespace mongo {
-
-using std::string;
-using str::stream;
-
namespace {
enum CleanupResult { CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error };
@@ -78,22 +73,20 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
const BSONObj& startingFromKeyConst,
const WriteConcernOptions& secondaryThrottle,
BSONObj* stoppedAtKey,
- string* errMsg) {
-
+ std::string* errMsg) {
BSONObj startingFromKey = startingFromKeyConst;
boost::optional<ChunkRange> targetRange;
CollectionShardingState::CleanupNotification notifn;
- OID epoch;
+
{
AutoGetCollection autoColl(opCtx, ns, MODE_IX);
- auto css = CollectionShardingState::get(opCtx, ns.toString());
+ const auto css = CollectionShardingState::get(opCtx, ns.toString());
auto metadata = css->getMetadata();
if (!metadata) {
log() << "skipping orphaned data cleanup for " << ns.toString()
<< ", collection is not sharded";
return CleanupResult_Done;
}
- epoch = metadata->getCollVersion().epoch();
BSONObj keyPattern = metadata->getKeyPattern();
if (!startingFromKey.isEmpty()) {
@@ -109,18 +102,16 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
startingFromKey = metadata->getMinKey();
}
- boost::optional<KeyRange> orphanRange = css->getNextOrphanRange(startingFromKey);
- if (!orphanRange) {
+ targetRange = css->getNextOrphanRange(startingFromKey);
+ if (!targetRange) {
LOG(1) << "cleanupOrphaned requested for " << ns.toString() << " starting from "
<< redact(startingFromKey) << ", no orphan ranges remain";
return CleanupResult_Done;
}
- orphanRange->ns = ns.ns();
- *stoppedAtKey = orphanRange->maxKey;
- targetRange.emplace(
- ChunkRange(orphanRange->minKey.getOwned(), orphanRange->maxKey.getOwned()));
+ *stoppedAtKey = targetRange->getMax();
+
notifn = css->cleanUpRange(*targetRange, CollectionShardingState::kNow);
}
@@ -130,13 +121,17 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
LOG(1) << "cleanupOrphaned requested for " << ns.toString() << " starting from "
<< redact(startingFromKey) << ", removing next orphan range "
<< redact(targetRange->toString()) << "; waiting...";
+
Status result = notifn.waitStatus(opCtx);
+
LOG(1) << "Finished waiting for last " << ns.toString() << " orphan range cleanup";
+
if (!result.isOK()) {
log() << redact(result.reason());
*errMsg = result.reason();
return CleanupResult_Error;
}
+
return CleanupResult_Continue;
}
@@ -172,16 +167,17 @@ class CleanupOrphanedCommand : public ErrmsgCommandDeprecated {
public:
CleanupOrphanedCommand() : ErrmsgCommandDeprecated("cleanupOrphaned") {}
- virtual bool slaveOk() const {
+ bool slaveOk() const override {
return false;
}
- virtual bool adminOnly() const {
+
+ bool adminOnly() const override {
return true;
}
- virtual Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ Status checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
return Status(ErrorCodes::Unauthorized, "Not authorized for cleanupOrphaned command.");
@@ -189,23 +185,23 @@ public:
return Status::OK();
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
// Input
- static BSONField<string> nsField;
+ static BSONField<std::string> nsField;
static BSONField<BSONObj> startingFromKeyField;
// Output
static BSONField<BSONObj> stoppedAtKeyField;
bool errmsgRun(OperationContext* opCtx,
- string const& db,
+ std::string const& db,
const BSONObj& cmdObj,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns;
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ std::string ns;
if (!FieldParser::extract(cmdObj, nsField, &ns, &errmsg)) {
return false;
}
@@ -255,7 +251,7 @@ public:
} cleanupOrphanedCmd;
-BSONField<string> CleanupOrphanedCommand::nsField("cleanupOrphaned");
+BSONField<std::string> CleanupOrphanedCommand::nsField("cleanupOrphaned");
BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField("startingFromKey");
BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField("stoppedAtKey");
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 964370cc4e8..20b419d0bc5 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -208,16 +208,15 @@ std::string CollectionMetadata::toStringBasic() const {
<< ", shard version: " << _shardVersion.toString();
}
-boost::optional<KeyRange> CollectionMetadata::getNextOrphanRange(
+boost::optional<ChunkRange> CollectionMetadata::getNextOrphanRange(
RangeMap const& receivingChunks, BSONObj const& origLookupKey) const {
-
+ const BSONObj maxKey = getMaxKey();
BSONObj lookupKey = origLookupKey;
- BSONObj maxKey = getMaxKey(); // so we don't keep rebuilding
- while (lookupKey.woCompare(maxKey) < 0) {
+ while (lookupKey.woCompare(maxKey) < 0) {
using Its = std::pair<RangeMap::const_iterator, RangeMap::const_iterator>;
- auto patchLookupKey = [&](RangeMap const& map) -> boost::optional<Its> {
+ const auto patchLookupKey = [&](RangeMap const& map) -> boost::optional<Its> {
auto lowerIt = map.end(), upperIt = map.end();
if (!map.empty()) {
@@ -230,6 +229,7 @@ boost::optional<KeyRange> CollectionMetadata::getNextOrphanRange(
}
// If we overlap, continue after the overlap
+ //
// TODO: Could optimize slightly by finding next non-contiguous chunk
if (lowerIt != map.end() && lowerIt->second.woCompare(lookupKey) > 0) {
lookupKey = lowerIt->second; // note side effect
@@ -245,26 +245,29 @@ boost::optional<KeyRange> CollectionMetadata::getNextOrphanRange(
continue;
}
- boost::optional<KeyRange> range =
- KeyRange("", getMinKey(), maxKey, _cm->getShardKeyPattern().toBSON());
+ BSONObj rangeMin = getMinKey();
+ BSONObj rangeMax = maxKey;
- auto patchArgRange = [&range](RangeMap const& map, Its its) {
+ const auto patchArgRange = [&rangeMin, &rangeMax](RangeMap const& map, Its const& its) {
// We know that the lookup key is not covered by a chunk or pending range, and where the
// previous chunk and pending chunks are. Now we fill in the bounds as the closest
// bounds of the surrounding ranges in both maps.
- auto lowerIt = its.first, upperIt = its.second;
+ const auto& lowerIt = its.first;
+ const auto& upperIt = its.second;
- if (lowerIt != map.end() && lowerIt->second.woCompare(range->minKey) > 0) {
- range->minKey = lowerIt->second;
+ if (lowerIt != map.end() && lowerIt->second.woCompare(rangeMin) > 0) {
+ rangeMin = lowerIt->second;
}
- if (upperIt != map.end() && upperIt->first.woCompare(range->maxKey) < 0) {
- range->maxKey = upperIt->first;
+
+ if (upperIt != map.end() && upperIt->first.woCompare(rangeMax) < 0) {
+ rangeMax = upperIt->first;
}
};
patchArgRange(_chunksMap, *chunksIts);
patchArgRange(receivingChunks, *pendingIts);
- return range;
+
+ return ChunkRange(rangeMin.getOwned(), rangeMax.getOwned());
}
return boost::none;
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 57f0974d848..38d5c11819f 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -104,19 +104,20 @@ public:
*
* This allows us to do the following to iterate over all orphan ranges:
*
- * KeyRange range;
+ * ChunkRange range;
* BSONObj lookupKey = metadata->getMinKey();
- * boost::optional<KeyRange> range;
+ * boost::optional<ChunkRange> range;
* while((range = metadata->getNextOrphanRange(receiveMap, lookupKey))) {
* lookupKey = range->maxKey;
* }
*
* @param lookupKey passing a key that does not belong to this metadata is undefined.
* @param receiveMap is an extra set of chunks not considered orphaned.
- * @param orphanRange the output range. Note that the NS is not set.
+ *
+ * @return orphanRange the output range. Note that the NS is not set.
*/
- boost::optional<KeyRange> getNextOrphanRange(RangeMap const& receiveMap,
- BSONObj const& lookupKey) const;
+ boost::optional<ChunkRange> getNextOrphanRange(RangeMap const& receiveMap,
+ BSONObj const& lookupKey) const;
ChunkVersion getCollVersion() const {
return _cm->getVersion();
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 14229bc3965..9f619b1ca12 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -127,11 +127,11 @@ TEST_F(NoChunkFixture, OrphanedDataRangeBegin) {
auto keyRange = metadata->getNextOrphanRange(pending, lookupKey);
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(metadata->getMinKey()) == 0);
- ASSERT(keyRange->maxKey.woCompare(metadata->getMaxKey()) == 0);
+ ASSERT(keyRange->getMin().woCompare(metadata->getMinKey()) == 0);
+ ASSERT(keyRange->getMax().woCompare(metadata->getMaxKey()) == 0);
// Make sure we don't have any more ranges
- ASSERT(!metadata->getNextOrphanRange(pending, keyRange->maxKey));
+ ASSERT(!metadata->getNextOrphanRange(pending, keyRange->getMax()));
}
TEST_F(NoChunkFixture, OrphanedDataRangeMiddle) {
@@ -142,12 +142,11 @@ TEST_F(NoChunkFixture, OrphanedDataRangeMiddle) {
auto keyRange = metadata->getNextOrphanRange(pending, lookupKey);
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(metadata->getMinKey()) == 0);
- ASSERT(keyRange->maxKey.woCompare(metadata->getMaxKey()) == 0);
- ASSERT(keyRange->keyPattern.woCompare(metadata->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(metadata->getMinKey()) == 0);
+ ASSERT(keyRange->getMax().woCompare(metadata->getMaxKey()) == 0);
// Make sure we don't have any more ranges
- ASSERT(!metadata->getNextOrphanRange(pending, keyRange->maxKey));
+ ASSERT(!metadata->getNextOrphanRange(pending, keyRange->getMax()));
}
TEST_F(NoChunkFixture, OrphanedDataRangeEnd) {
@@ -213,17 +212,15 @@ TEST_F(SingleChunkFixture, ChunkOrphanedDataRanges) {
pending, makeCollectionMetadata()->getMinKey());
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(makeCollectionMetadata()->getMinKey()) == 0);
- ASSERT(keyRange->maxKey.woCompare(BSON("a" << 10)) == 0);
- ASSERT(keyRange->keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(makeCollectionMetadata()->getMinKey()) == 0);
+ ASSERT(keyRange->getMax().woCompare(BSON("a" << 10)) == 0);
- keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->maxKey);
+ keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->getMax());
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(BSON("a" << 20)) == 0);
- ASSERT(keyRange->maxKey.woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
- ASSERT(keyRange->keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(BSON("a" << 20)) == 0);
+ ASSERT(keyRange->getMax().woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
- ASSERT(!makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->maxKey));
+ ASSERT(!makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->getMax()));
}
/**
@@ -266,26 +263,24 @@ protected:
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
stRangeMap pending;
+
auto keyRange = makeCollectionMetadata()->getNextOrphanRange(
pending, makeCollectionMetadata()->getMinKey());
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(makeCollectionMetadata()->getMinKey()) == 0);
- ASSERT(keyRange->maxKey.woCompare(BSON("a" << 10 << "b" << 0)) == 0);
- ASSERT(keyRange->keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(makeCollectionMetadata()->getMinKey()) == 0);
+ ASSERT(keyRange->getMax().woCompare(BSON("a" << 10 << "b" << 0)) == 0);
- keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->maxKey);
+ keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->getMax());
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(BSON("a" << 20 << "b" << 0)) == 0);
- ASSERT(keyRange->maxKey.woCompare(BSON("a" << 30 << "b" << 0)) == 0);
- ASSERT(keyRange->keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(BSON("a" << 20 << "b" << 0)) == 0);
+ ASSERT(keyRange->getMax().woCompare(BSON("a" << 30 << "b" << 0)) == 0);
- keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->maxKey);
+ keyRange = makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->getMax());
ASSERT(keyRange);
- ASSERT(keyRange->minKey.woCompare(BSON("a" << 40 << "b" << 0)) == 0);
- ASSERT(keyRange->maxKey.woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
- ASSERT(keyRange->keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
+ ASSERT(keyRange->getMin().woCompare(BSON("a" << 40 << "b" << 0)) == 0);
+ ASSERT(keyRange->getMax().woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
- ASSERT(!makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->maxKey));
+ ASSERT(!makeCollectionMetadata()->getNextOrphanRange(pending, keyRange->getMax()));
}
/**
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 3ead8021941..872f8fbc11a 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -272,6 +272,7 @@ Status CollectionShardingState::waitForClean(OperationContext* opCtx,
<< result.reason()};
}
}
+
MONGO_UNREACHABLE;
}
@@ -280,7 +281,7 @@ auto CollectionShardingState::trackOrphanedDataCleanup(ChunkRange const& range)
return _metadataManager->trackOrphanedDataCleanup(range);
}
-boost::optional<KeyRange> CollectionShardingState::getNextOrphanRange(BSONObj const& from) {
+boost::optional<ChunkRange> CollectionShardingState::getNextOrphanRange(BSONObj const& from) {
return _metadataManager->getNextOrphanRange(from);
}
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 87d18a8c5d4..7af282f3463 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -215,7 +215,7 @@ public:
* Returns a range _not_ owned by this shard that starts no lower than the specified
* startingFrom key value, if any, or boost::none if there is no such range.
*/
- boost::optional<KeyRange> getNextOrphanRange(BSONObj const& startingFrom);
+ boost::optional<ChunkRange> getNextOrphanRange(BSONObj const& startingFrom);
/**
* Replication oplog OpObserver hooks. Informs the sharding system of changes that may be
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index a4ab501ff88..4bc2a85de26 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -215,7 +215,7 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
// Collection is becoming unsharded
if (!remoteMetadata) {
log() << "Marking collection " << _nss.ns() << " with "
- << redact(_metadata.back()->metadata.toStringBasic()) << " as no longer sharded";
+ << redact(_metadata.back()->metadata.toStringBasic()) << " as unsharded";
_receivingChunks.clear();
_clearAllCleanups(lg);
@@ -260,20 +260,19 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
log() << "Updating collection metadata for " << _nss.ns() << " from "
<< activeMetadata->toStringBasic() << " to " << remoteMetadata->toStringBasic();
- // Resolve any receiving chunks, which might have completed by now.
- // Should be no more than one.
+ // Resolve any receiving chunks, which might have completed by now
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {
- BSONObj const& min = it->first;
- BSONObj const& max = it->second;
+ const ChunkRange receivingRange(it->first, it->second);
- if (!remoteMetadata->rangeOverlapsChunk(ChunkRange(min, max))) {
+ if (!remoteMetadata->rangeOverlapsChunk(receivingRange)) {
++it;
continue;
}
- // The remote metadata contains a chunk we were earlier in the process of receiving, so
- // we deem it successfully received.
- LOG(2) << "Verified chunk " << ChunkRange(min, max) << " for collection " << _nss.ns()
- << " has been migrated to this shard earlier";
+
+ // The remote metadata contains a chunk we were earlier in the process of receiving, so we
+ // deem it successfully received
+ LOG(2) << "Verified chunk " << redact(receivingRange.toString()) << " for collection "
+ << _nss.ns() << " has been migrated to this shard earlier";
_receivingChunks.erase(it);
it = _receivingChunks.begin();
@@ -532,7 +531,7 @@ auto MetadataManager::_overlapsInUseCleanups(WithLock, ChunkRange const& range)
return boost::none;
}
-boost::optional<KeyRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const {
+boost::optional<ChunkRange> MetadataManager::getNextOrphanRange(BSONObj const& from) const {
stdx::lock_guard<stdx::mutex> lg(_managerLock);
invariant(!_metadata.empty());
return _metadata.back()->metadata.getNextOrphanRange(_receivingChunks, from);
@@ -557,8 +556,11 @@ ScopedCollectionMetadata::ScopedCollectionMetadata(ScopedCollectionMetadata&& ot
ScopedCollectionMetadata& ScopedCollectionMetadata::operator=(ScopedCollectionMetadata&& other) {
if (this != &other) {
_clear();
+
_metadataManager = std::move(other._metadataManager);
- _metadataTracker = other._metadataTracker;
+ _metadataTracker = std::move(other._metadataTracker);
+
+ other._metadataManager = nullptr;
other._metadataTracker = nullptr;
}
return *this;
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index afe8ab85a9d..999588fa27a 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -140,7 +140,7 @@ public:
*/
boost::optional<CleanupNotification> trackOrphanedDataCleanup(ChunkRange const& orphans) const;
- boost::optional<KeyRange> getNextOrphanRange(BSONObj const& from) const;
+ boost::optional<ChunkRange> getNextOrphanRange(BSONObj const& from) const;
private:
// Management of the _metadata list is implemented in ScopedCollectionMetadata
@@ -298,16 +298,6 @@ public:
*/
BSONObj extractDocumentKey(BSONObj const& doc) const;
- /**
- * Checks whether both objects refer to the identically the same metadata.
- */
- bool operator==(ScopedCollectionMetadata const& other) const {
- return _metadataTracker == other._metadataTracker;
- }
- bool operator!=(ScopedCollectionMetadata const& other) const {
- return _metadataTracker != other._metadataTracker;
- }
-
private:
friend ScopedCollectionMetadata MetadataManager::getActiveMetadata(
std::shared_ptr<MetadataManager>);
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index f792510c109..2852955deb6 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -100,8 +100,8 @@ public:
ChunkRange unionWith(ChunkRange const& other) const;
private:
- const BSONObj _minKey;
- const BSONObj _maxKey;
+ BSONObj _minKey;
+ BSONObj _maxKey;
};
/**
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/write_ops/mock_ns_targeter.h
index 5046c60c277..213a5abcb6a 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/write_ops/mock_ns_targeter.h
@@ -32,6 +32,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/range_arithmetic.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/ns_targeter.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
@@ -39,6 +40,45 @@
namespace mongo {
/**
+ * A KeyRange represents a range over keys of documents in a namespace, qualified by a
+ * key pattern which defines the documents that are in the key range.
+ *
+ * There may be many different expressions to generate the same key fields from a document - the
+ * keyPattern tells us these expressions.
+ *
+ * Ex:
+ * DocA : { field : "aaaa" }
+ * DocB : { field : "bbb" }
+ * DocC : { field : "ccccc" }
+ *
+ * keyPattern : { field : 1 }
+ * minKey : { field : "aaaa" } : Id(DocA)
+ * maxKey : { field : "ccccc" } : Id(DocB)
+ *
+ * contains Id(DocB)
+ *
+ * keyPattern : { field : "numberofletters" }
+ * minKey : { field : 4 } : numberofletters(DocA)
+ * maxKey : { field : 5 } : numberofletters(DocC)
+ *
+ * does not contain numberofletters(DocB)
+ */
+struct KeyRange {
+ KeyRange(const std::string& ns,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const BSONObj& keyPattern)
+ : ns(ns), minKey(minKey), maxKey(maxKey), keyPattern(keyPattern) {}
+
+ KeyRange() {}
+
+ std::string ns;
+ BSONObj minKey;
+ BSONObj maxKey;
+ BSONObj keyPattern;
+};
+
+/**
* A MockRange represents a range with endpoint that a MockNSTargeter uses to direct writes to
* a particular endpoint.
*/
@@ -155,14 +195,12 @@ public:
}
private:
- KeyRange parseRange(const BSONObj& query) const {
- std::string fieldName = query.firstElement().fieldName();
+ ChunkRange parseRange(const BSONObj& query) const {
+ const std::string fieldName = query.firstElement().fieldName();
if (query.firstElement().isNumber()) {
- return KeyRange("",
- BSON(fieldName << query.firstElement().numberInt()),
- BSON(fieldName << query.firstElement().numberInt() + 1),
- BSON(fieldName << 1));
+ return ChunkRange(BSON(fieldName << query.firstElement().numberInt()),
+ BSON(fieldName << query.firstElement().numberInt() + 1));
} else if (query.firstElement().type() == Object) {
BSONObj queryRange = query.firstElement().Obj();
@@ -174,11 +212,11 @@ private:
BSONObjBuilder maxKeyB;
maxKeyB.appendAs(queryRange[LT.l_], fieldName);
- return KeyRange("", minKeyB.obj(), maxKeyB.obj(), BSON(fieldName << 1));
+ return ChunkRange(minKeyB.obj(), maxKeyB.obj());
}
ASSERT(false);
- return KeyRange("", BSONObj(), BSONObj(), BSONObj());
+ return ChunkRange({}, {});
}
/**
@@ -187,15 +225,15 @@ private:
*/
Status targetQuery(const BSONObj& query,
std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
- KeyRange queryRange = parseRange(query);
+ ChunkRange queryRange(parseRange(query));
const std::vector<MockRange*>& ranges = getRanges();
for (std::vector<MockRange*>::const_iterator it = ranges.begin(); it != ranges.end();
++it) {
const MockRange* range = *it;
- if (rangeOverlaps(queryRange.minKey,
- queryRange.maxKey,
+ if (rangeOverlaps(queryRange.getMin(),
+ queryRange.getMax(),
range->range.minKey,
range->range.maxKey)) {
endpoints->push_back(stdx::make_unique<ShardEndpoint>(range->endpoint));