summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-03-14 17:33:49 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-03-19 22:42:29 -0400
commit3e53f0ce9cdff926473276a34a351972ea963a57 (patch)
tree73d8525cc4ed68faf3bb1c983510150841c4e872 /src/mongo/db/s
parent914bbbd26a686e032fdddec964b109ea78c6e6f6 (diff)
downloadmongo-3e53f0ce9cdff926473276a34a351972ea963a57.tar.gz
SERVER-27681 Use CatalogCache to load metadata on shards
This change gets rid of the MetadataLoader in place of using the CatalogCache for loading chunk metadata on shards. This ensures that the same concurrency control and rate limiting applies on mongos and mongod and obviates the need for having different kinds of ChunkDiffer.
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp15
-rw-r--r--src/mongo/db/s/collection_metadata.cpp225
-rw-r--r--src/mongo/db/s/collection_metadata.h65
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp562
-rw-r--r--src/mongo/db/s/collection_range_deleter_test.cpp9
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp2
-rw-r--r--src/mongo/db/s/metadata_loader.cpp246
-rw-r--r--src/mongo/db/s/metadata_loader.h137
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp461
-rw-r--r--src/mongo/db/s/metadata_manager.cpp6
-rw-r--r--src/mongo/db/s/metadata_manager.h4
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp69
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp3
-rw-r--r--src/mongo/db/s/sharding_state.cpp131
-rw-r--r--src/mongo/db/s/sharding_state.h12
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp316
17 files changed, 371 insertions, 1894 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 386bbd73d5b..e4ec6233845 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -8,7 +8,6 @@ env.Library(
target='metadata',
source=[
'collection_metadata.cpp',
- 'metadata_loader.cpp',
],
LIBDEPS=[
'shard_metadata_util',
@@ -221,7 +220,6 @@ env.CppUnitTest(
target='sharding_metadata_test',
source=[
'collection_metadata_test.cpp',
- 'metadata_loader_test.cpp',
'shard_metadata_util_test.cpp',
],
LIBDEPS=[
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index d6690f872ef..d0e58779a3b 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -86,7 +86,7 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx,
metadata = CollectionShardingState::get(opCtx, ns.toString())->getMetadata();
}
- if (!metadata || metadata->getKeyPattern().isEmpty()) {
+ if (!metadata) {
warning() << "skipping orphaned data cleanup for " << ns.toString()
<< ", collection is not sharded";
@@ -237,17 +237,8 @@ public:
return false;
}
- ChunkVersion shardVersion;
- Status status = shardingState->refreshMetadataNow(opCtx, nss, &shardVersion);
- if (!status.isOK()) {
- if (status.code() == ErrorCodes::RemoteChangeDetected) {
- warning() << "Shard version in transition detected while refreshing "
- << "metadata for " << ns << " at version " << shardVersion;
- } else {
- errmsg = str::stream() << "failed to refresh shard metadata: " << redact(status);
- return false;
- }
- }
+ ChunkVersion unusedShardVersion;
+ uassertStatusOK(shardingState->refreshMetadataNow(opCtx, nss, &unusedShardVersion));
BSONObj stoppedAtKey;
CleanupResult cleanupResult =
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index e3c95a06e93..fbda424de67 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -41,66 +41,81 @@
namespace mongo {
-using std::unique_ptr;
-using std::make_pair;
-using std::string;
-using std::vector;
-using str::stream;
-
-CollectionMetadata::CollectionMetadata()
- : _pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
- _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
- _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {}
-
-CollectionMetadata::CollectionMetadata(const BSONObj& keyPattern, ChunkVersion collectionVersion)
- : _collVersion(collectionVersion),
- _shardVersion(ChunkVersion(0, 0, collectionVersion.epoch())),
- _keyPattern(keyPattern.getOwned()),
+CollectionMetadata::CollectionMetadata(const BSONObj& keyPattern,
+ ChunkVersion collectionVersion,
+ ChunkVersion shardVersion,
+ RangeMap shardChunksMap)
+ : _keyPattern(keyPattern.getOwned()),
+ _collVersion(collectionVersion),
+ _shardVersion(shardVersion),
+ _chunksMap(std::move(shardChunksMap)),
_pendingMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
- _chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()),
- _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {}
+ _rangesMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()) {
-CollectionMetadata::~CollectionMetadata() = default;
+ invariant(!_keyPattern.isEmpty());
+ invariant(_collVersion.epoch() == _shardVersion.epoch());
+ invariant(_collVersion.isSet());
+ invariant(_collVersion >= _shardVersion);
-unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusChunk(
- const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& chunkVersion) const {
- invariant(chunkVersion.epoch() == _shardVersion.epoch());
- invariant(chunkVersion.isSet());
- invariant(chunkVersion > _shardVersion);
- invariant(minKey.woCompare(maxKey) < 0);
- invariant(!rangeMapOverlaps(_chunksMap, minKey, maxKey));
-
- unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
- metadata->_keyPattern = _keyPattern.getOwned();
- metadata->fillKeyPatternFields();
- metadata->_pendingMap = _pendingMap;
- metadata->_chunksMap = _chunksMap;
- metadata->_chunksMap.insert(
- make_pair(minKey.getOwned(), CachedChunkInfo(maxKey.getOwned(), chunkVersion)));
- metadata->_shardVersion = chunkVersion;
- metadata->_collVersion = chunkVersion > _collVersion ? chunkVersion : _collVersion;
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return metadata;
+ // Parse the shard keys into the states 'keys' and 'keySet' members.
+ for (const auto& current : _keyPattern) {
+ auto newFieldRef(stdx::make_unique<FieldRef>());
+ newFieldRef->parse(current.fieldNameStringData());
+
+ _keyFields.emplace_back(std::move(newFieldRef));
+ }
+
+ if (_chunksMap.empty()) {
+ invariant(!_shardVersion.isSet());
+ return;
+ }
+
+ invariant(_shardVersion.isSet());
+
+ // Load the chunk information, coallesceing their ranges. The version for this shard would be
+ // the highest version for any of the chunks.
+ BSONObj min, max;
+
+ for (const auto& entry : _chunksMap) {
+ BSONObj currMin = entry.first;
+ BSONObj currMax = entry.second.getMaxKey();
+
+ // Coalesce the chunk's bounds in ranges if they are adjacent chunks
+ if (min.isEmpty()) {
+ min = currMin;
+ max = currMax;
+ continue;
+ }
+
+ if (SimpleBSONObjComparator::kInstance.evaluate(max == currMin)) {
+ max = currMax;
+ continue;
+ }
+
+ _rangesMap.emplace(min, CachedChunkInfo(max, ChunkVersion::IGNORED()));
+
+ min = currMin;
+ max = currMax;
+ }
+
+ invariant(!min.isEmpty());
+ invariant(!max.isEmpty());
+
+ _rangesMap.emplace(min, CachedChunkInfo(max, ChunkVersion::IGNORED()));
}
+CollectionMetadata::~CollectionMetadata() = default;
+
std::unique_ptr<CollectionMetadata> CollectionMetadata::cloneMinusPending(
const ChunkType& chunk) const {
invariant(rangeMapContains(_pendingMap, chunk.getMin(), chunk.getMax()));
- unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
- metadata->_keyPattern = _keyPattern.getOwned();
- metadata->fillKeyPatternFields();
+ auto metadata(stdx::make_unique<CollectionMetadata>(
+ _keyPattern, getCollVersion(), getShardVersion(), getChunks()));
metadata->_pendingMap = _pendingMap;
- metadata->_pendingMap.erase(chunk.getMin());
- metadata->_chunksMap = _chunksMap;
- metadata->_rangesMap = _rangesMap;
- metadata->_shardVersion = _shardVersion;
- metadata->_collVersion = _collVersion;
+ metadata->_pendingMap.erase(chunk.getMin());
- invariant(metadata->isValid());
return metadata;
}
@@ -108,14 +123,9 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
const ChunkType& chunk) const {
invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax()));
- unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
- metadata->_keyPattern = _keyPattern.getOwned();
- metadata->fillKeyPatternFields();
+ auto metadata(stdx::make_unique<CollectionMetadata>(
+ _keyPattern, getCollVersion(), getShardVersion(), getChunks()));
metadata->_pendingMap = _pendingMap;
- metadata->_chunksMap = _chunksMap;
- metadata->_rangesMap = _rangesMap;
- metadata->_shardVersion = _shardVersion;
- metadata->_collVersion = _collVersion;
// If there are any pending chunks on the interval to be added this is ok, since pending chunks
// aren't officially tracked yet and something may have changed on servers we do not see yet.
@@ -138,25 +148,18 @@ std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
// The pending map entry cannot contain a specific chunk version because we don't know what
// version would be generated for it at commit time. That's why we insert an IGNORED value.
- metadata->_pendingMap.insert(
- make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED())));
+ metadata->_pendingMap.emplace(chunk.getMin(),
+ CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED()));
- invariant(metadata->isValid());
return metadata;
}
bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
- // For now, collections don't move. So if the collection is not sharded, assume
- // the document with the given key can be accessed.
- if (_keyPattern.isEmpty()) {
- return true;
- }
-
- if (_rangesMap.size() <= 0) {
+ if (_rangesMap.empty()) {
return false;
}
- RangeMap::const_iterator it = _rangesMap.upper_bound(key);
+ auto it = _rangesMap.upper_bound(key);
if (it != _rangesMap.begin())
it--;
@@ -164,21 +167,15 @@ bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
}
bool CollectionMetadata::keyIsPending(const BSONObj& key) const {
- // If we aren't sharded, then the key is never pending (though it belongs-to-me)
- if (_keyPattern.isEmpty()) {
- return false;
- }
-
- if (_pendingMap.size() <= 0) {
+ if (_pendingMap.empty()) {
return false;
}
- RangeMap::const_iterator it = _pendingMap.upper_bound(key);
+ auto it = _pendingMap.upper_bound(key);
if (it != _pendingMap.begin())
it--;
- bool isPending = rangeContains(it->first, it->second.getMaxKey(), key);
- return isPending;
+ return rangeContains(it->first, it->second.getMaxKey(), key);
}
bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const {
@@ -279,15 +276,12 @@ void CollectionMetadata::toBSONPending(BSONArrayBuilder& bb) const {
}
}
-string CollectionMetadata::toStringBasic() const {
- return stream() << "collection version: " << _collVersion.toString()
- << ", shard version: " << _shardVersion.toString();
+std::string CollectionMetadata::toStringBasic() const {
+ return str::stream() << "collection version: " << _collVersion.toString()
+ << ", shard version: " << _shardVersion.toString();
}
bool CollectionMetadata::getNextOrphanRange(const BSONObj& origLookupKey, KeyRange* range) const {
- if (_keyPattern.isEmpty())
- return false;
-
BSONObj lookupKey = origLookupKey;
BSONObj maxKey = getMaxKey(); // so we don't keep rebuilding
while (lookupKey.woCompare(maxKey) < 0) {
@@ -382,29 +376,6 @@ BSONObj CollectionMetadata::getMaxKey() const {
return maxKeyB.obj();
}
-bool CollectionMetadata::isValid() const {
- if (_shardVersion > _collVersion)
- return false;
- if (_collVersion.majorVersion() == 0)
- return false;
- if (_collVersion.epoch() != _shardVersion.epoch())
- return false;
-
- if (_shardVersion.majorVersion() > 0) {
- // Must be chunks
- if (_rangesMap.size() == 0 || _chunksMap.size() == 0)
- return false;
- } else {
- // No chunks
- if (_shardVersion.minorVersion() > 0)
- return false;
- if (_rangesMap.size() > 0 || _chunksMap.size() > 0)
- return false;
- }
-
- return true;
-}
-
bool CollectionMetadata::isValidKey(const BSONObj& key) const {
BSONObjIterator it(_keyPattern);
while (it.more()) {
@@ -415,50 +386,4 @@ bool CollectionMetadata::isValidKey(const BSONObj& key) const {
return key.nFields() == _keyPattern.nFields();
}
-void CollectionMetadata::fillRanges() {
- if (_chunksMap.empty())
- return;
-
- // Load the chunk information, coallesceing their ranges. The version for this shard would be
- // the highest version for any of the chunks.
- BSONObj min, max;
- for (const auto& entry : _chunksMap) {
- BSONObj currMin = entry.first;
- BSONObj currMax = entry.second.getMaxKey();
-
- // coalesce the chunk's bounds in ranges if they are adjacent chunks
- if (min.isEmpty()) {
- min = currMin;
- max = currMax;
- continue;
- }
- if (SimpleBSONObjComparator::kInstance.evaluate(max == currMin)) {
- max = currMax;
- continue;
- }
-
- _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED())));
-
- min = currMin;
- max = currMax;
- }
-
- invariant(!min.isEmpty());
- invariant(!max.isEmpty());
-
- _rangesMap.insert(make_pair(min, CachedChunkInfo(max, ChunkVersion::IGNORED())));
-}
-
-void CollectionMetadata::fillKeyPatternFields() {
- // Parse the shard keys into the states 'keys' and 'keySet' members.
- BSONObjIterator patternIter = _keyPattern.begin();
- while (patternIter.more()) {
- BSONElement current = patternIter.next();
-
- _keyFields.push_back(stdx::make_unique<FieldRef>());
- FieldRef* const newFieldRef = _keyFields.back().get();
- newFieldRef->parse(current.fieldNameStringData());
- }
-}
-
} // namespace mongo
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 8d3dec9e6ef..51845fc3c1d 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -28,8 +28,6 @@
#pragma once
-#include "mongo/base/disallow_copying.h"
-#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/field_ref_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/range_arithmetic.h"
@@ -52,8 +50,6 @@ class ChunkType;
* This class is immutable once constructed.
*/
class CollectionMetadata {
- MONGO_DISALLOW_COPYING(CollectionMetadata);
-
public:
/**
* The main way to construct CollectionMetadata is through MetadataLoader or the clone*()
@@ -61,8 +57,11 @@ public:
*
* The constructors should not be used directly outside of tests.
*/
- CollectionMetadata();
- CollectionMetadata(const BSONObj& keyPattern, ChunkVersion collectionVersion);
+ CollectionMetadata(const BSONObj& keyPattern,
+ ChunkVersion collectionVersion,
+ ChunkVersion shardVersion,
+ RangeMap shardChunksMap);
+
~CollectionMetadata();
/**
@@ -148,7 +147,7 @@ public:
return _chunksMap;
}
- BSONObj getKeyPattern() const {
+ const BSONObj& getKeyPattern() const {
return _keyPattern;
}
@@ -164,10 +163,6 @@ public:
return _chunksMap.size();
}
- std::size_t getNumPending() const {
- return _pendingMap.size();
- }
-
/**
* BSON output of the basic metadata information (chunk and shard version).
*/
@@ -188,70 +183,30 @@ public:
*/
std::string toStringBasic() const;
- /**
- * This method is used only for unit-tests and it returns a new metadata's instance based on the
- * current state by adding a chunk with the specified bounds and version. The chunk's version
- * must be higher than that of all chunks which are in the cache.
- *
- * It will fassert if the chunk bounds are incorrect or overlap an existing chunk or if the
- * chunk version is lower than the maximum one.
- */
- std::unique_ptr<CollectionMetadata> clonePlusChunk(const BSONObj& minKey,
- const BSONObj& maxKey,
- const ChunkVersion& chunkVersion) const;
-
- /**
- * Returns true if this metadata was loaded with all necessary information.
- */
- bool isValid() const;
-
private:
- // Effectively, the MetadataLoader is this class's builder. So we open an exception and grant it
- // friendship.
- friend class MetadataLoader;
+ // Shard key pattern for the collection
+ BSONObj _keyPattern;
// a version for this collection that identifies the collection incarnation (ie, a
// dropped and recreated collection with the same name would have a different version)
ChunkVersion _collVersion;
- //
- // sharded state below, for when the collection gets sharded
- //
-
// highest ChunkVersion for which this metadata's information is accurate
ChunkVersion _shardVersion;
- // key pattern for chunks under this range
- BSONObj _keyPattern;
-
// A vector owning the FieldRefs parsed from the shard-key pattern of field names.
std::vector<std::unique_ptr<FieldRef>> _keyFields;
- //
- // RangeMaps represent chunks by mapping the min key to the chunk's max key, allowing
- // efficient lookup and intersection.
- //
+ // Map of chunks tracked by this shard
+ RangeMap _chunksMap;
// Map of ranges of chunks that are migrating but have not been confirmed added yet
RangeMap _pendingMap;
- // Map of chunks tracked by this shard
- RangeMap _chunksMap;
-
// A second map from a min key into a range or contiguous chunks. The map is redundant
// w.r.t. _chunkMap but we expect high chunk contiguity, especially in small
// installations.
RangeMap _rangesMap;
-
- /**
- * Try to find chunks that are adjacent and record these intervals in the _rangesMap
- */
- void fillRanges();
-
- /**
- * Creates the _keyField* local data
- */
- void fillKeyPatternFields();
};
} // namespace mongo
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index cdb469610bc..c765c67ee3a 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -29,113 +29,66 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status.h"
-#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/db/commands.h"
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/metadata_loader.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/util/scopeguard.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/unittest/unittest.h"
namespace mongo {
namespace {
-using std::string;
-using std::unique_ptr;
-using std::vector;
using unittest::assertGet;
-class NoChunkFixture : public ShardServerTestFixture {
+class NoChunkFixture : public unittest::Test {
protected:
- void setUp() {
- ShardServerTestFixture::setUp();
-
- OID epoch = OID::gen();
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
- ASSERT_OK(collType.validate());
-
- // Need a chunk on another shard, otherwise the chunks are invalid in general and we
- // can't load metadata
- ChunkType chunkType;
- chunkType.setNS(NamespaceString{"test.foo"}.ns());
- chunkType.setShard(ShardId("shard0001"));
- chunkType.setMin(BSON("a" << MINKEY));
- chunkType.setMax(BSON("a" << MAXKEY));
- chunkType.setVersion(ChunkVersion(1, 0, epoch));
- ASSERT_OK(chunkType.validate());
- std::vector<BSONObj> chunksToSend{chunkType.toConfigBSON()};
-
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &_metadata);
- ASSERT_OK(status);
- ASSERT_EQUALS(0u, _metadata.getNumChunks());
- });
-
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
+ std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
+ const OID epoch = OID::gen();
+
+ return stdx::make_unique<CollectionMetadata>(
+ BSON("a" << 1),
+ ChunkVersion(1, 0, epoch),
+ ChunkVersion(0, 0, epoch),
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>());
}
-
- const CollectionMetadata& getCollMetadata() const {
- return _metadata;
- }
-
-private:
- CollectionMetadata _metadata;
};
TEST_F(NoChunkFixture, BasicBelongsToMe) {
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 10)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
}
TEST_F(NoChunkFixture, CompoundKeyBelongsToMe) {
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 1 << "b" << 2)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 1 << "b" << 2)));
}
TEST_F(NoChunkFixture, IsKeyValid) {
- ASSERT_TRUE(getCollMetadata().isValidKey(BSON("a"
- << "abcde")));
- ASSERT_TRUE(getCollMetadata().isValidKey(BSON("a" << 3)));
- ASSERT_FALSE(getCollMetadata().isValidKey(BSON("a"
- << "abcde"
- << "b"
- << 1)));
- ASSERT_FALSE(getCollMetadata().isValidKey(BSON("c"
- << "abcde")));
+ ASSERT_TRUE(makeCollectionMetadata()->isValidKey(BSON("a"
+ << "abcde")));
+ ASSERT_TRUE(makeCollectionMetadata()->isValidKey(BSON("a" << 3)));
+ ASSERT_FALSE(makeCollectionMetadata()->isValidKey(BSON("a"
+ << "abcde"
+ << "b"
+ << 1)));
+ ASSERT_FALSE(makeCollectionMetadata()->isValidKey(BSON("c"
+ << "abcde")));
}
TEST_F(NoChunkFixture, getNextFromEmpty) {
ChunkType nextChunk;
- ASSERT(!getCollMetadata().getNextChunk(getCollMetadata().getMinKey(), &nextChunk));
+ ASSERT(
+ !makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
}
TEST_F(NoChunkFixture, getDifferentFromEmpty) {
ChunkType differentChunk;
- ASSERT(!getCollMetadata().getDifferentChunk(getCollMetadata().getMinKey(), &differentChunk));
+ ASSERT(!makeCollectionMetadata()->getDifferentChunk(makeCollectionMetadata()->getMinKey(),
+ &differentChunk));
}
TEST_F(NoChunkFixture, NoPendingChunks) {
- ASSERT(!getCollMetadata().keyIsPending(BSON("a" << 15)));
- ASSERT(!getCollMetadata().keyIsPending(BSON("a" << 25)));
+ ASSERT(!makeCollectionMetadata()->keyIsPending(BSON("a" << 15)));
+ ASSERT(!makeCollectionMetadata()->keyIsPending(BSON("a" << 25)));
}
TEST_F(NoChunkFixture, FirstPendingChunk) {
@@ -143,7 +96,7 @@ TEST_F(NoChunkFixture, FirstPendingChunk) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
ASSERT(cloned->keyIsPending(BSON("a" << 15)));
ASSERT(!cloned->keyIsPending(BSON("a" << 25)));
ASSERT(cloned->keyIsPending(BSON("a" << 10)));
@@ -155,7 +108,7 @@ TEST_F(NoChunkFixture, EmptyMultiPendingChunk) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
chunk.setMin(BSON("a" << 40));
chunk.setMax(BSON("a" << 50));
@@ -172,7 +125,7 @@ TEST_F(NoChunkFixture, MinusPendingChunk) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
cloned = cloned->cloneMinusPending(chunk);
ASSERT(!cloned->keyIsPending(BSON("a" << 15)));
@@ -184,7 +137,7 @@ TEST_F(NoChunkFixture, OverlappingPendingChunk) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 30));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 40));
@@ -201,7 +154,7 @@ TEST_F(NoChunkFixture, OverlappingPendingChunks) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 30));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
chunk.setMin(BSON("a" << 30));
chunk.setMax(BSON("a" << 50));
@@ -220,39 +173,39 @@ TEST_F(NoChunkFixture, OverlappingPendingChunks) {
}
TEST_F(NoChunkFixture, OrphanedDataRangeBegin) {
- const CollectionMetadata& metadata = getCollMetadata();
+ auto metadata(makeCollectionMetadata());
KeyRange keyRange;
- BSONObj lookupKey = metadata.getMinKey();
- ASSERT(metadata.getNextOrphanRange(lookupKey, &keyRange));
+ BSONObj lookupKey = metadata->getMinKey();
+ ASSERT(metadata->getNextOrphanRange(lookupKey, &keyRange));
- ASSERT(keyRange.minKey.woCompare(metadata.getMinKey()) == 0);
- ASSERT(keyRange.maxKey.woCompare(metadata.getMaxKey()) == 0);
+ ASSERT(keyRange.minKey.woCompare(metadata->getMinKey()) == 0);
+ ASSERT(keyRange.maxKey.woCompare(metadata->getMaxKey()) == 0);
// Make sure we don't have any more ranges
- ASSERT(!metadata.getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(!metadata->getNextOrphanRange(keyRange.maxKey, &keyRange));
}
TEST_F(NoChunkFixture, OrphanedDataRangeMiddle) {
- const CollectionMetadata& metadata = getCollMetadata();
+ auto metadata(makeCollectionMetadata());
KeyRange keyRange;
BSONObj lookupKey = BSON("a" << 20);
- ASSERT(metadata.getNextOrphanRange(lookupKey, &keyRange));
+ ASSERT(metadata->getNextOrphanRange(lookupKey, &keyRange));
- ASSERT(keyRange.minKey.woCompare(metadata.getMinKey()) == 0);
- ASSERT(keyRange.maxKey.woCompare(metadata.getMaxKey()) == 0);
- ASSERT(keyRange.keyPattern.woCompare(metadata.getKeyPattern()) == 0);
+ ASSERT(keyRange.minKey.woCompare(metadata->getMinKey()) == 0);
+ ASSERT(keyRange.maxKey.woCompare(metadata->getMaxKey()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(metadata->getKeyPattern()) == 0);
// Make sure we don't have any more ranges
- ASSERT(!metadata.getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(!metadata->getNextOrphanRange(keyRange.maxKey, &keyRange));
}
TEST_F(NoChunkFixture, OrphanedDataRangeEnd) {
- const CollectionMetadata& metadata = getCollMetadata();
+ auto metadata(makeCollectionMetadata());
KeyRange keyRange;
- ASSERT(!metadata.getNextOrphanRange(metadata.getMaxKey(), &keyRange));
+ ASSERT(!metadata->getNextOrphanRange(metadata->getMaxKey(), &keyRange));
}
TEST_F(NoChunkFixture, PendingOrphanedDataRanges) {
@@ -260,7 +213,7 @@ TEST_F(NoChunkFixture, PendingOrphanedDataRanges) {
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
KeyRange keyRange;
ASSERT(cloned->getNextOrphanRange(cloned->getMinKey(), &keyRange));
@@ -280,93 +233,59 @@ TEST_F(NoChunkFixture, PendingOrphanedDataRanges) {
* Fixture with single chunk containing:
* [10->20)
*/
-class SingleChunkFixture : public ShardServerTestFixture {
+class SingleChunkFixture : public unittest::Test {
protected:
- void setUp() {
- ShardServerTestFixture::setUp();
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(chunkVersion.epoch());
-
- BSONObj fooSingle = BSON(
- ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
- << ChunkType::shard("shard0000"));
- std::vector<BSONObj> chunksToSend{fooSingle};
-
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &_metadata);
- ASSERT_OK(status);
- });
-
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
- }
-
- const CollectionMetadata& getCollMetadata() const {
- return _metadata;
+ std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
+ const OID epoch = OID::gen();
+
+ auto shardChunksMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ shardChunksMap.emplace(BSON("a" << 10),
+ CachedChunkInfo(BSON("a" << 20), ChunkVersion(1, 0, epoch)));
+
+ return stdx::make_unique<CollectionMetadata>(BSON("a" << 1),
+ ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch),
+ std::move(shardChunksMap));
}
-
- const ChunkVersion chunkVersion{ChunkVersion(1, 0, OID::gen())};
-
-private:
- CollectionMetadata _metadata;
};
TEST_F(SingleChunkFixture, BasicBelongsToMe) {
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 10)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 15)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 19)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 15)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 19)));
}
TEST_F(SingleChunkFixture, DoesntBelongsToMe) {
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 0)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 9)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 20)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 1234)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 0)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 9)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 20)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 1234)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
}
TEST_F(SingleChunkFixture, CompoudKeyBelongsToMe) {
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 15 << "a" << 14)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 15 << "a" << 14)));
}
TEST_F(SingleChunkFixture, getNextFromEmpty) {
ChunkType nextChunk;
- ASSERT(getCollMetadata().getNextChunk(getCollMetadata().getMinKey(), &nextChunk));
+ ASSERT(
+ makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 10)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << 20)));
- ASSERT_EQUALS(chunkVersion, nextChunk.getVersion());
}
TEST_F(SingleChunkFixture, GetLastChunkIsFalse) {
ChunkType nextChunk;
- ASSERT(!getCollMetadata().getNextChunk(getCollMetadata().getMaxKey(), &nextChunk));
+ ASSERT(
+ !makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMaxKey(), &nextChunk));
}
TEST_F(SingleChunkFixture, getDifferentFromOneIsFalse) {
ChunkType differentChunk;
- ASSERT(!getCollMetadata().getDifferentChunk(BSON("a" << 10), &differentChunk));
+ ASSERT(!makeCollectionMetadata()->getDifferentChunk(BSON("a" << 10), &differentChunk));
}
TEST_F(SingleChunkFixture, PlusPendingChunk) {
@@ -374,7 +293,7 @@ TEST_F(SingleChunkFixture, PlusPendingChunk) {
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 30));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
ASSERT(cloned->keyBelongsToMe(BSON("a" << 15)));
ASSERT(!cloned->keyBelongsToMe(BSON("a" << 25)));
@@ -384,174 +303,96 @@ TEST_F(SingleChunkFixture, PlusPendingChunk) {
TEST_F(SingleChunkFixture, ChunkOrphanedDataRanges) {
KeyRange keyRange;
- ASSERT(getCollMetadata().getNextOrphanRange(getCollMetadata().getMinKey(), &keyRange));
- ASSERT(keyRange.minKey.woCompare(getCollMetadata().getMinKey()) == 0);
+ ASSERT(makeCollectionMetadata()->getNextOrphanRange(makeCollectionMetadata()->getMinKey(),
+ &keyRange));
+ ASSERT(keyRange.minKey.woCompare(makeCollectionMetadata()->getMinKey()) == 0);
ASSERT(keyRange.maxKey.woCompare(BSON("a" << 10)) == 0);
- ASSERT(keyRange.keyPattern.woCompare(getCollMetadata().getKeyPattern()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
- ASSERT(getCollMetadata().getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(makeCollectionMetadata()->getNextOrphanRange(keyRange.maxKey, &keyRange));
ASSERT(keyRange.minKey.woCompare(BSON("a" << 20)) == 0);
- ASSERT(keyRange.maxKey.woCompare(getCollMetadata().getMaxKey()) == 0);
- ASSERT(keyRange.keyPattern.woCompare(getCollMetadata().getKeyPattern()) == 0);
+ ASSERT(keyRange.maxKey.woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
- ASSERT(!getCollMetadata().getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(!makeCollectionMetadata()->getNextOrphanRange(keyRange.maxKey, &keyRange));
}
/**
* Fixture with single chunk containing:
* [(min, min)->(max, max))
*/
-class SingleChunkMinMaxCompoundKeyFixture : public ShardServerTestFixture {
+class SingleChunkMinMaxCompoundKeyFixture : public unittest::Test {
protected:
- void setUp() {
- ShardServerTestFixture::setUp();
-
- OID epoch = OID::gen();
-
- ChunkVersion chunkVersion = ChunkVersion(1, 0, epoch);
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
-
- BSONObj fooSingle = BSON(
- ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo")
- << ChunkType::min(BSON("a" << MINKEY << "b" << MINKEY))
- << ChunkType::max(BSON("a" << MAXKEY << "b" << MAXKEY))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch)
- << ChunkType::shard("shard0000"));
- std::vector<BSONObj> chunksToSend{fooSingle};
-
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &_metadata);
- ASSERT_OK(status);
- });
-
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
- }
-
- const CollectionMetadata& getCollMetadata() const {
- return _metadata;
+ std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
+ const OID epoch = OID::gen();
+
+ auto shardChunksMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ shardChunksMap.emplace(
+ BSON("a" << MINKEY << "b" << MINKEY),
+ CachedChunkInfo(BSON("a" << MAXKEY << "b" << MAXKEY), ChunkVersion(1, 0, epoch)));
+
+ return stdx::make_unique<CollectionMetadata>(BSON("a" << 1 << "b" << 1),
+ ChunkVersion(1, 0, epoch),
+ ChunkVersion(1, 0, epoch),
+ std::move(shardChunksMap));
}
-
- const ChunkVersion chunkVersion{ChunkVersion(1, 0, OID::gen())};
-
-private:
- CollectionMetadata _metadata;
};
// Note: no tests for single key belongsToMe because they are not allowed
// if shard key is compound.
TEST_F(SingleChunkMinMaxCompoundKeyFixture, CompoudKeyBelongsToMe) {
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY << "b" << MINKEY)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY << "b" << 10)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 10 << "b" << 20)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << MINKEY)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << 10)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10 << "b" << 20)));
}
/**
* Fixture with chunks:
* [(10, 0)->(20, 0)), [(30, 0)->(40, 0))
*/
-class TwoChunksWithGapCompoundKeyFixture : public ShardServerTestFixture {
+class TwoChunksWithGapCompoundKeyFixture : public unittest::Test {
protected:
- void setUp() {
- ShardServerTestFixture::setUp();
-
- ChunkVersion chunkVersion = ChunkVersion(1, 0, OID::gen());
-
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(chunkVersion.epoch());
-
- std::vector<BSONObj> chunksToSend;
- chunksToSend.push_back(BSON(
- ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo")
- << ChunkType::min(BSON("a" << 10 << "b" << 0))
- << ChunkType::max(BSON("a" << 20 << "b" << 0))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
- << ChunkType::shard("shard0000")));
-
- chunkVersion.incMinor();
-
- chunksToSend.push_back(BSON(
- ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo")
- << ChunkType::min(BSON("a" << 30 << "b" << 0))
- << ChunkType::max(BSON("a" << 40 << "b" << 0))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(chunkVersion.epoch())
- << ChunkType::shard("shard0000")));
-
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &_metadata);
- ASSERT_OK(status);
- });
-
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
+ std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
+ const OID epoch = OID::gen();
+
+ auto shardChunksMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ shardChunksMap.emplace(
+ BSON("a" << 10 << "b" << 0),
+ CachedChunkInfo(BSON("a" << 20 << "b" << 0), ChunkVersion(1, 0, epoch)));
+ shardChunksMap.emplace(
+ BSON("a" << 30 << "b" << 0),
+ CachedChunkInfo(BSON("a" << 40 << "b" << 0), ChunkVersion(1, 1, epoch)));
+
+ return stdx::make_unique<CollectionMetadata>(BSON("a" << 1 << "b" << 1),
+ ChunkVersion(1, 1, epoch),
+ ChunkVersion(1, 1, epoch),
+ std::move(shardChunksMap));
}
-
- const CollectionMetadata& getCollMetadata() const {
- return _metadata;
- }
-
-private:
- CollectionMetadata _metadata;
};
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
KeyRange keyRange;
- ASSERT(getCollMetadata().getNextOrphanRange(getCollMetadata().getMinKey(), &keyRange));
- ASSERT(keyRange.minKey.woCompare(getCollMetadata().getMinKey()) == 0);
+ ASSERT(makeCollectionMetadata()->getNextOrphanRange(makeCollectionMetadata()->getMinKey(),
+ &keyRange));
+ ASSERT(keyRange.minKey.woCompare(makeCollectionMetadata()->getMinKey()) == 0);
ASSERT(keyRange.maxKey.woCompare(BSON("a" << 10 << "b" << 0)) == 0);
- ASSERT(keyRange.keyPattern.woCompare(getCollMetadata().getKeyPattern()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
- ASSERT(getCollMetadata().getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(makeCollectionMetadata()->getNextOrphanRange(keyRange.maxKey, &keyRange));
ASSERT(keyRange.minKey.woCompare(BSON("a" << 20 << "b" << 0)) == 0);
ASSERT(keyRange.maxKey.woCompare(BSON("a" << 30 << "b" << 0)) == 0);
- ASSERT(keyRange.keyPattern.woCompare(getCollMetadata().getKeyPattern()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
- ASSERT(getCollMetadata().getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(makeCollectionMetadata()->getNextOrphanRange(keyRange.maxKey, &keyRange));
ASSERT(keyRange.minKey.woCompare(BSON("a" << 40 << "b" << 0)) == 0);
- ASSERT(keyRange.maxKey.woCompare(getCollMetadata().getMaxKey()) == 0);
- ASSERT(keyRange.keyPattern.woCompare(getCollMetadata().getKeyPattern()) == 0);
+ ASSERT(keyRange.maxKey.woCompare(makeCollectionMetadata()->getMaxKey()) == 0);
+ ASSERT(keyRange.keyPattern.woCompare(makeCollectionMetadata()->getKeyPattern()) == 0);
- ASSERT(!getCollMetadata().getNextOrphanRange(keyRange.maxKey, &keyRange));
+ ASSERT(!makeCollectionMetadata()->getNextOrphanRange(keyRange.maxKey, &keyRange));
}
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapAndPendingOrphanedDataRanges) {
@@ -559,7 +400,7 @@ TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapAndPendingOrphanedDataRanges)
chunk.setMin(BSON("a" << 20 << "b" << 0));
chunk.setMax(BSON("a" << 30 << "b" << 0));
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ auto cloned(makeCollectionMetadata()->clonePlusPending(chunk));
KeyRange keyRange;
ASSERT(cloned->getNextOrphanRange(cloned->getMinKey(), &keyRange));
@@ -579,150 +420,97 @@ TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapAndPendingOrphanedDataRanges)
* Fixture with chunk containing:
* [min->10) , [10->20) , <gap> , [30->max)
*/
-class ThreeChunkWithRangeGapFixture : public ShardServerTestFixture {
+class ThreeChunkWithRangeGapFixture : public unittest::Test {
protected:
- void setUp() {
- ShardServerTestFixture::setUp();
-
- OID epoch = OID::gen();
-
- CollectionType collType;
- collType.setNs(NamespaceString{"x.y"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(epoch);
-
- std::vector<BSONObj> chunksToSend;
- {
- ChunkVersion version(1, 1, epoch);
- chunksToSend.push_back(BSON(
- ChunkType::name("x.y-a_MinKey")
- << ChunkType::ns("x.y")
- << ChunkType::min(BSON("a" << MINKEY))
- << ChunkType::max(BSON("a" << 10))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch())
- << ChunkType::shard("shard0000")));
- }
-
- {
- ChunkVersion version(1, 3, epoch);
- chunksToSend.push_back(BSON(
- ChunkType::name("x.y-a_10")
- << ChunkType::ns("x.y")
- << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch())
- << ChunkType::shard("shard0000")));
- }
-
- {
- ChunkVersion version(1, 2, epoch);
- chunksToSend.push_back(BSON(
- ChunkType::name("x.y-a_30")
- << ChunkType::ns("x.y")
- << ChunkType::min(BSON("a" << 30))
- << ChunkType::max(BSON("a" << MAXKEY))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch())
- << ChunkType::shard("shard0000")));
- }
-
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- "test.foo",
- "shard0000",
- NULL, /* no old metadata */
- &_metadata);
- ASSERT_OK(status);
- });
-
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- expectFindOnConfigSendBSONObjVector(chunksToSend);
-
- future.timed_get(kFutureTimeout);
+ std::unique_ptr<CollectionMetadata> makeCollectionMetadata() const {
+ const OID epoch = OID::gen();
+
+ auto shardChunksMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
+ shardChunksMap.emplace(BSON("a" << MINKEY),
+ CachedChunkInfo(BSON("a" << 10), ChunkVersion(1, 1, epoch)));
+ shardChunksMap.emplace(BSON("a" << 10),
+ CachedChunkInfo(BSON("a" << 20), ChunkVersion(1, 3, epoch)));
+ shardChunksMap.emplace(BSON("a" << 30),
+ CachedChunkInfo(BSON("a" << MAXKEY), ChunkVersion(1, 2, epoch)));
+
+ return stdx::make_unique<CollectionMetadata>(BSON("a" << 1),
+ ChunkVersion(1, 3, epoch),
+ ChunkVersion(1, 3, epoch),
+ std::move(shardChunksMap));
}
-
- const CollectionMetadata& getCollMetadata() const {
- return _metadata;
- }
-
-private:
- CollectionMetadata _metadata;
};
TEST_F(ThreeChunkWithRangeGapFixture, ChunkVersionsMatch) {
- const OID epoch = getCollMetadata().getCollVersion().epoch();
+ auto metadata(makeCollectionMetadata());
ChunkType chunk;
- ASSERT(getCollMetadata().getNextChunk(BSON("a" << MINKEY), &chunk));
- ASSERT_EQ(ChunkVersion(1, 1, epoch), chunk.getVersion());
+ ASSERT(metadata->getNextChunk(BSON("a" << MINKEY), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 1, metadata->getCollVersion().epoch()), chunk.getVersion());
+ ASSERT_BSONOBJ_EQ(metadata->getMinKey(), chunk.getMin());
- ASSERT(getCollMetadata().getNextChunk(BSON("a" << 30), &chunk));
- ASSERT_EQ(ChunkVersion(1, 2, epoch), chunk.getVersion());
+ ASSERT(metadata->getNextChunk(BSON("a" << 10), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 3, metadata->getCollVersion().epoch()), chunk.getVersion());
- ASSERT(getCollMetadata().getNextChunk(BSON("a" << 10), &chunk));
- ASSERT_EQ(ChunkVersion(1, 3, epoch), chunk.getVersion());
+ ASSERT(metadata->getNextChunk(BSON("a" << 30), &chunk));
+ ASSERT_EQ(ChunkVersion(1, 2, metadata->getCollVersion().epoch()), chunk.getVersion());
+ ASSERT_BSONOBJ_EQ(metadata->getMaxKey(), chunk.getMax());
}
TEST_F(ThreeChunkWithRangeGapFixture, ShardOwnsDoc) {
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 5)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 10)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 30)));
- ASSERT(getCollMetadata().keyBelongsToMe(BSON("a" << 40)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 5)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 10)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 30)));
+ ASSERT(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 40)));
}
TEST_F(ThreeChunkWithRangeGapFixture, ShardDoesntOwnDoc) {
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << 25)));
- ASSERT_FALSE(getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << 25)));
+ ASSERT_FALSE(makeCollectionMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)));
}
TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromEmpty) {
ChunkType nextChunk;
- ASSERT(getCollMetadata().getNextChunk(getCollMetadata().getMinKey(), &nextChunk));
+ ASSERT(
+ makeCollectionMetadata()->getNextChunk(makeCollectionMetadata()->getMinKey(), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << MINKEY)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << 10)));
}
TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromMiddle) {
ChunkType nextChunk;
- ASSERT(getCollMetadata().getNextChunk(BSON("a" << 20), &nextChunk));
+ ASSERT(makeCollectionMetadata()->getNextChunk(BSON("a" << 20), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 30)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << MAXKEY)));
}
TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromLast) {
ChunkType nextChunk;
- ASSERT(getCollMetadata().getNextChunk(BSON("a" << 30), &nextChunk));
+ ASSERT(makeCollectionMetadata()->getNextChunk(BSON("a" << 30), &nextChunk));
ASSERT_EQUALS(0, nextChunk.getMin().woCompare(BSON("a" << 30)));
ASSERT_EQUALS(0, nextChunk.getMax().woCompare(BSON("a" << MAXKEY)));
}
TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromBeginning) {
+ auto metadata(makeCollectionMetadata());
+
ChunkType differentChunk;
- ASSERT(getCollMetadata().getDifferentChunk(getCollMetadata().getMinKey(), &differentChunk));
- ASSERT_EQUALS(0, differentChunk.getMin().woCompare(BSON("a" << 10)));
- ASSERT_EQUALS(0, differentChunk.getMax().woCompare(BSON("a" << 20)));
+ ASSERT(metadata->getDifferentChunk(metadata->getMinKey(), &differentChunk));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 10), differentChunk.getMin());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 20), differentChunk.getMax());
}
TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromMiddle) {
ChunkType differentChunk;
- ASSERT(getCollMetadata().getDifferentChunk(BSON("a" << 10), &differentChunk));
+ ASSERT(makeCollectionMetadata()->getDifferentChunk(BSON("a" << 10), &differentChunk));
ASSERT_EQUALS(0, differentChunk.getMin().woCompare(BSON("a" << MINKEY)));
ASSERT_EQUALS(0, differentChunk.getMax().woCompare(BSON("a" << 10)));
}
TEST_F(ThreeChunkWithRangeGapFixture, GetDifferentFromLast) {
ChunkType differentChunk;
- ASSERT(getCollMetadata().getDifferentChunk(BSON("a" << 30), &differentChunk));
+ ASSERT(makeCollectionMetadata()->getDifferentChunk(BSON("a" << 30), &differentChunk));
ASSERT_EQUALS(0, differentChunk.getMin().woCompare(BSON("a" << MINKEY)));
ASSERT_EQUALS(0, differentChunk.getMax().woCompare(BSON("a" << 10)));
}
diff --git a/src/mongo/db/s/collection_range_deleter_test.cpp b/src/mongo/db/s/collection_range_deleter_test.cpp
index f00a345caca..2927379ff1c 100644
--- a/src/mongo/db/s/collection_range_deleter_test.cpp
+++ b/src/mongo/db/s/collection_range_deleter_test.cpp
@@ -72,13 +72,20 @@ void CollectionRangeDeleterTest::setUp() {
repl::getGlobalReplicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY);
_opCtx = getServiceContext()->makeOperationContext(&cc());
_dbDirectClient = stdx::make_unique<DBDirectClient>(operationContext());
+
{
+ const OID epoch = OID::gen();
+
AutoGetCollection autoColl(operationContext(), kNamespaceString, MODE_IX);
auto collectionShardingState =
CollectionShardingState::get(operationContext(), kNamespaceString);
collectionShardingState->refreshMetadata(
operationContext(),
- stdx::make_unique<CollectionMetadata>(kKeyPattern, ChunkVersion(1, 1, OID::gen())));
+ stdx::make_unique<CollectionMetadata>(
+ kKeyPattern,
+ ChunkVersion(1, 0, epoch),
+ ChunkVersion(0, 0, epoch),
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>()));
_metadataManager = collectionShardingState->getMetadataManagerForTest();
}
}
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index f03e7948374..98975244838 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -133,7 +133,7 @@ Status mergeChunks(OperationContext* opCtx,
AutoGetCollection autoColl(opCtx, nss, MODE_IS);
metadata = CollectionShardingState::get(opCtx, nss.ns())->getMetadata();
- if (!metadata || metadata->getKeyPattern().isEmpty()) {
+ if (!metadata) {
std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns()
<< " is not sharded";
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
deleted file mode 100644
index ee3714b71ac..00000000000
--- a/src/mongo/db/s/metadata_loader.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Copyright (C) 2012 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/db/s/metadata_loader.h"
-
-#include <vector>
-
-#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/repl/replication_coordinator_impl.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/shard_metadata_util.h"
-#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/rpc/unique_message.h"
-#include "mongo/s/catalog/sharding_catalog_client.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/catalog/type_shard_collection.h"
-#include "mongo/s/chunk_diff.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/util/log.h"
-
-namespace mongo {
-
-using std::make_pair;
-using std::map;
-using std::pair;
-using std::string;
-
-namespace {
-
-/**
- * This is an adapter so we can use config diffs - mongos and mongod do them slightly
- * differently.
- *
- * The mongod adapter here tracks only a single shard, and stores ranges by (min, max).
- */
-class SCMConfigDiffTracker : public ConfigDiffTracker<CachedChunkInfo> {
-public:
- SCMConfigDiffTracker(const std::string& ns,
- RangeMap* currMap,
- ChunkVersion* maxVersion,
- MaxChunkVersionMap* maxShardVersions,
- const ShardId& currShard)
- : ConfigDiffTracker<CachedChunkInfo>(ns, currMap, maxVersion, maxShardVersions),
- _currShard(currShard) {}
-
- virtual bool isTracked(const ChunkType& chunk) const {
- return chunk.getShard() == _currShard;
- }
-
- virtual pair<BSONObj, CachedChunkInfo> rangeFor(OperationContext* opCtx,
- const ChunkType& chunk) const {
- return make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), chunk.getVersion()));
- }
-
- virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const {
- return name;
- }
-
- virtual string nameFrom(const string& shard) const {
- return shard;
- }
-
-private:
- const ShardId _currShard;
-};
-
-} // namespace
-
-Status MetadataLoader::makeCollectionMetadata(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const string& ns,
- const string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata) {
- invariant(serverGlobalParams.clusterRole == ClusterRole::ShardServer);
-
- bool isShardPrimary = false;
- if (repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase_UNSAFE(opCtx, ns)) {
- isShardPrimary = true;
- }
-
- Status initCollectionStatus =
- _initCollection(opCtx, catalogClient, ns, metadata, isShardPrimary);
- if (!initCollectionStatus.isOK()) {
- return initCollectionStatus;
- }
-
- return _initChunks(opCtx, catalogClient, ns, shard, oldMetadata, metadata, isShardPrimary);
-}
-
-
-Status MetadataLoader::_initCollection(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const string& ns,
- CollectionMetadata* metadata,
- bool isShardPrimary) {
- StatusWith<std::pair<BSONObj, OID>> statusWithShardKeyAndEpoch =
- shardmetadatautil::getCollectionShardKeyAndEpoch(
- opCtx, catalogClient, NamespaceString(ns), isShardPrimary);
- if (!statusWithShardKeyAndEpoch.isOK()) {
- return statusWithShardKeyAndEpoch.getStatus();
- }
- std::pair<BSONObj, OID> shardKeyAndEpoch = statusWithShardKeyAndEpoch.getValue();
-
- metadata->_keyPattern = shardKeyAndEpoch.first;
- metadata->fillKeyPatternFields();
- metadata->_shardVersion = ChunkVersion(0, 0, shardKeyAndEpoch.second);
- metadata->_collVersion = ChunkVersion(0, 0, shardKeyAndEpoch.second);
-
- return Status::OK();
-}
-
-Status MetadataLoader::_initChunks(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const string& ns,
- const string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata,
- bool isShardPrimary) {
- const OID epoch = metadata->getCollVersion().epoch();
-
- SCMConfigDiffTracker::MaxChunkVersionMap versionMap;
- versionMap[shard] = metadata->_shardVersion;
-
- bool fullReload = true;
-
- // Check to see if we should use the old version or not.
- if (oldMetadata) {
- // If our epochs are compatible, it's useful to use the old metadata for diffs: this leads
- // to a performance gain because not all the chunks must be reloaded, just the ones this
- // shard has not seen -- they will have higher versions than present in oldMetadata.
- if (oldMetadata->getCollVersion().hasEqualEpoch(epoch)) {
- fullReload = false;
- invariant(oldMetadata->isValid());
-
- versionMap[shard] = oldMetadata->_shardVersion;
- metadata->_collVersion = oldMetadata->_collVersion;
-
- // TODO: This could be made more efficient if copying not required, but
- // not as frequently reloaded as in mongos.
- metadata->_chunksMap = oldMetadata->_chunksMap;
-
- LOG(2) << "loading new chunks for collection " << ns
- << " using old metadata w/ version " << oldMetadata->getShardVersion() << " and "
- << metadata->_chunksMap.size() << " chunks";
- } else {
- log() << "reloading collection metadata for " << ns << " with new epoch "
- << epoch.toString() << ", the current epoch is "
- << oldMetadata->getCollVersion().epoch().toString();
- }
- }
-
- // Exposes the new metadata's range map and version to the "differ" which would ultimately be
- // responsible for filling them up
- SCMConfigDiffTracker differ(
- ns, &metadata->_chunksMap, &metadata->_collVersion, &versionMap, shard);
-
- try {
- auto statusWithChunks = shardmetadatautil::getChunks(
- opCtx, catalogClient, NamespaceString(ns), metadata->_collVersion, isShardPrimary);
- if (!statusWithChunks.isOK()) {
- return statusWithChunks.getStatus();
- }
- auto chunks = statusWithChunks.getValue();
-
- //
- // The diff tracker should always find at least one chunk (the highest chunk we saw
- // last time). If not, something has changed on the config server (potentially between
- // when we read the collection data and when we read the chunks data).
- //
- int diffsApplied = differ.calculateConfigDiff(opCtx, chunks);
- if (diffsApplied > 0) {
- // Chunks found, return ok
- LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns
- << " with version " << metadata->_collVersion;
-
- // If the last chunk was moved off of this shard, the shardVersion should be reset to
- // zero (if we did not conduct a full reload and oldMetadata was present,
- // versionMap[shard] was previously set to the oldMetadata's shardVersion for
- // performance gains).
- if (!fullReload && metadata->_chunksMap.empty()) {
- versionMap[shard] = ChunkVersion(0, 0, epoch);
- }
-
- metadata->_shardVersion = versionMap[shard];
- metadata->fillRanges();
-
- invariant(metadata->isValid());
- return Status::OK();
- } else if (diffsApplied == 0) {
- // No chunks found, the collection is dropping or we're confused
- // If this is a full reload, assume it is a drop for backwards compatibility
- // TODO: drop the config.collections entry *before* the chunks and eliminate this
- // ambiguity
- return {fullReload ? ErrorCodes::NamespaceNotFound : ErrorCodes::RemoteChangeDetected,
- str::stream() << "No chunks found when reloading " << ns
- << ", previous version was "
- << metadata->_collVersion.toString()
- << (fullReload ? ", this is a drop" : "")};
- } else {
- // Invalid chunks found, our epoch may have changed because we dropped/recreated the
- // collection
- return {ErrorCodes::RemoteChangeDetected,
- str::stream() << "Invalid chunks found when reloading " << ns
- << ", previous version was "
- << metadata->_collVersion.toString()
- << ", this should be rare"};
- }
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-}
-
-} // namespace mongo
diff --git a/src/mongo/db/s/metadata_loader.h b/src/mongo/db/s/metadata_loader.h
deleted file mode 100644
index 6ce6d901223..00000000000
--- a/src/mongo/db/s/metadata_loader.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Copyright (C) 2012 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "mongo/base/status.h"
-
-namespace mongo {
-
-class CollectionMetadata;
-class CollectionType;
-class NamespaceString;
-class OID;
-class OperationContext;
-class ShardingCatalogClient;
-
-/**
- * The MetadataLoader is responsible for interfacing with the config servers and previous
- * metadata to build new instances of CollectionMetadata. MetadataLoader is the "builder"
- * class for metadata.
- *
- * CollectionMetadata has both persisted and volatile state (for now) - the persisted
- * config server chunk state and the volatile pending state which is only tracked locally
- * while a server is the primary. This requires a two-step loading process - the persisted
- * chunk state *cannot* be loaded in a DBLock lock while the pending chunk state *must* be.
- *
- * Example usage:
- * beforeMetadata = <get latest local metadata>;
- * remoteMetadata = makeCollectionMetadata( beforeMetadata, remoteMetadata );
- * DBLock lock(opCtx, dbname, MODE_X);
- * afterMetadata = <get latest local metadata>;
- *
- * The loader will go out of its way to try to fetch the smaller amount possible of data
- * from the config server without sacrificing the freshness and accuracy of the metadata it
- * builds. (See ConfigDiffTracker class.)
- *
- */
-class MetadataLoader {
-public:
- /**
- * Fills a new metadata instance representing the chunkset of the collection 'ns' (or its
- * entirety, if not sharded) that lives on 'shard' with data either from the config server if
- * primary or a shard persisted copy if secondary. Optionally, uses an 'oldMetadata' for the
- * same 'ns'/'shard'; the contents of 'oldMetadata' can help reduce the amount of data read from
- * the config servers.
- *
- * Locking note:
- * + Must not be called in a DBLock, since this loads over the network
- *
- * OK on success.
- *
- * Failure return values:
- * Abnormal:
- * @return FailedToParse if there was an error parsing the remote config data
- * Normal:
- * @return NamespaceNotFound if the collection no longer exists
- * @return HostUnreachable if there was an error contacting the config servers
- * @return RemoteChangeDetected if the data loaded was modified by another operation
- */
- static Status makeCollectionMetadata(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const std::string& ns,
- const std::string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata);
-
-private:
- /**
- * Returns OK and fills in the internal state of 'metadata' with general collection
- * information, not including chunks. Gets the collection information from the config server if
- * 'isShardPrimary' is true, else from a shard persisted copy.
- *
- * If information about the collection can be accessed or is invalid, returns:
- * @return NamespaceNotFound if the collection no longer exists
- * @return FailedToParse if there was an error parsing the remote config data
- * @return HostUnreachable if there was an error contacting the config servers
- * @return RemoteChangeDetected if the collection doc loaded is unexpectedly different
- *
- */
- static Status _initCollection(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const std::string& ns,
- CollectionMetadata* metadata,
- bool isShardPrimary);
-
- /**
- * Returns OK and fills in the chunk state of 'metadata' to portray the chunks of the collection
- * 'ns' that sit in 'shard'. If provided, uses the contents of 'oldMetadata' as a base (see
- * description in initCollection above). If 'isShardPrimary' is true, 'chunks' is persisted on
- * the shard so that secondaries receive the new chunks through replication.
- *
- * If information about the chunks can be accessed or is invalid, returns:
- * @return HostUnreachable if there was an error contacting the config servers
- * @return RemoteChangeDetected if the chunks loaded are unexpectedly different
- *
- * For backwards compatibility,
- * @return NamespaceNotFound if there are no chunks loaded and an epoch change is detected
- * TODO: @return FailedToParse
- */
- static Status _initChunks(OperationContext* opCtx,
- ShardingCatalogClient* catalogClient,
- const std::string& ns,
- const std::string& shard,
- const CollectionMetadata* oldMetadata,
- CollectionMetadata* metadata,
- bool isShardPrimary);
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
deleted file mode 100644
index cbf69c5e185..00000000000
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ /dev/null
@@ -1,461 +0,0 @@
-/**
- * Copyright (C) 2012 10gen Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/base/status.h"
-#include "mongo/client/dbclientinterface.h"
-#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/repl/replication_coordinator_mock.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/metadata_loader.h"
-#include "mongo/db/server_options.h"
-#include "mongo/s/catalog/dist_lock_catalog_mock.h"
-#include "mongo/s/catalog/dist_lock_manager_mock.h"
-#include "mongo/s/catalog/sharding_catalog_client_impl.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/catalog/type_shard_collection.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/shard_server_test_fixture.h"
-#include "mongo/stdx/memory.h"
-#include "mongo/util/scopeguard.h"
-
-namespace mongo {
-namespace {
-
-using std::string;
-using std::unique_ptr;
-using std::vector;
-using unittest::assertGet;
-
-const HostAndPort kConfigHostAndPort = HostAndPort("dummy", 123);
-const NamespaceString kNss = NamespaceString("test.foo");
-const NamespaceString kChunkMetadataNss = NamespaceString("config.chunks.test.foo");
-const ShardId kShardId = ShardId("shard0");
-
-class MetadataLoaderTest : public ShardServerTestFixture {
-protected:
- /**
- * Goes throught the chunks in 'metadata' and uses a DBDirectClient to check that they are all
- * persisted. Also checks that 'totalNumChunksPersisted' is correct because 'metadata' is only
- * aware of a shard's chunks.
- */
- void checkCollectionMetadataChunksMatchPersistedChunks(
- const NamespaceString& nss,
- const CollectionMetadata& metadata,
- unsigned long long totalNumChunksPersisted) {
- try {
- DBDirectClient client(operationContext());
- ASSERT_EQUALS(client.count(nss.ns()), totalNumChunksPersisted);
-
- auto chunks = metadata.getChunks();
- for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID() << chunk.first << ChunkType::max()
- << chunk.second.getMaxKey()));
- query.readPref(ReadPreference::Nearest, BSONArray());
-
- std::unique_ptr<DBClientCursor> cursor = client.query(nss.ns().c_str(), query, 1);
- ASSERT(cursor);
-
- ASSERT(cursor->more());
- BSONObj queryResult = cursor->nextSafe();
- ChunkType foundChunk = assertGet(
- ChunkType::fromShardBSON(queryResult, chunk.second.getVersion().epoch()));
- ASSERT_BSONOBJ_EQ(chunk.first, foundChunk.getMin());
- ASSERT_BSONOBJ_EQ(chunk.second.getMaxKey(), foundChunk.getMax());
- }
- } catch (const DBException& ex) {
- ASSERT(false);
- }
- }
-
- void checkCollectionsEntryExists(const NamespaceString& nss,
- const CollectionMetadata& metadata,
- bool hasLastConsistentCollectionVersion) {
- try {
- DBDirectClient client(operationContext());
- Query query BSON(ShardCollectionType::uuid() << nss.ns());
- query.readPref(ReadPreference::Nearest, BSONArray());
- std::unique_ptr<DBClientCursor> cursor =
- client.query(CollectionType::ConfigNS.c_str(), query, 1);
- ASSERT(cursor);
- ASSERT(cursor->more());
- BSONObj queryResult = cursor->nextSafe();
-
- ShardCollectionType shardCollectionEntry =
- assertGet(ShardCollectionType::fromBSON(queryResult));
-
- BSONObjBuilder builder;
- builder.append(ShardCollectionType::uuid(), nss.ns());
- builder.append(ShardCollectionType::ns(), nss.ns());
- builder.append(ShardCollectionType::keyPattern(), metadata.getKeyPattern());
- if (hasLastConsistentCollectionVersion) {
- metadata.getCollVersion().appendWithFieldForCommands(
- &builder, ShardCollectionType::lastConsistentCollectionVersion());
- }
-
- ASSERT_BSONOBJ_EQ(shardCollectionEntry.toBSON(), builder.obj());
- } catch (const DBException& ex) {
- ASSERT(false);
- }
- }
-
- void checkCollectionsEntryDoesNotExist(const NamespaceString& nss) {
- try {
- DBDirectClient client(operationContext());
- Query query BSON(ShardCollectionType::uuid() << nss.ns());
- query.readPref(ReadPreference::Nearest, BSONArray());
- std::unique_ptr<DBClientCursor> cursor =
- client.query(ShardCollectionType::ConfigNS.c_str(), query, 1);
- ASSERT(cursor);
- ASSERT(!cursor->more());
- } catch (const DBException& ex) {
- ASSERT(false);
- }
- }
-
- void expectFindOnConfigSendCollectionDefault() {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(_maxCollVersion.epoch());
- ASSERT_OK(collType.validate());
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- }
-
- void expectFindOnConfigSendChunksDefault() {
- BSONObj chunk = BSON(
- ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns(kNss.ns())
- << ChunkType::min(BSON("a" << MINKEY))
- << ChunkType::max(BSON("a" << MAXKEY))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
- << ChunkType::shard(kShardId.toString()));
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunk});
- }
-
- /**
- * Helper to make a number of chunks that can then be manipulated in various ways in the tests.
- */
- std::vector<BSONObj> makeFourChunks() {
- std::vector<BSONObj> chunks;
- std::string names[] = {
- "test.foo-a_MinKey", "test.foo-a_10", "test.foo-a_50", "test.foo-a_100"};
- BSONObj mins[] = {BSON("a" << MINKEY), BSON("a" << 10), BSON("a" << 50), BSON("a" << 100)};
- BSONObj maxs[] = {BSON("a" << 10), BSON("a" << 50), BSON("a" << 100), BSON("a" << MAXKEY)};
- for (int i = 0; i < 4; ++i) {
- _maxCollVersion.incMajor();
- BSONObj chunk = BSON(ChunkType::name(names[i])
- << ChunkType::ns(kNss.ns())
- << ChunkType::min(mins[i])
- << ChunkType::max(maxs[i])
- << ChunkType::DEPRECATED_lastmod(
- Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
- << ChunkType::shard(kShardId.toString()));
- chunks.push_back(std::move(chunk));
- }
- return chunks;
- }
-
- ChunkVersion getMaxCollVersion() const {
- return _maxCollVersion;
- }
-
- ChunkVersion getMaxShardVersion() const {
- return _maxCollVersion;
- }
-
-private:
- ChunkVersion _maxCollVersion{1, 0, OID::gen()};
-};
-
-// TODO: Test config server down
-// TODO: Test read of chunks with new epoch
-// TODO: Test that you can properly load config using format with deprecated fields?
-
-TEST_F(MetadataLoaderTest, DroppedColl) {
- CollectionType collType;
- collType.setNs(kNss);
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUpdatedAt(Date_t());
- collType.setEpoch(OID());
- collType.setDropped(true);
- ASSERT_OK(collType.validate());
-
- // The config.collections entry indicates that the collection was dropped, failing the refresh.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
- checkCollectionsEntryDoesNotExist(kNss);
- });
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, EmptyColl) {
- // Fail due to no config.collections entry found.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
- checkCollectionsEntryDoesNotExist(kNss);
- });
- expectFindOnConfigSendErrorCode(ErrorCodes::NamespaceNotFound);
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, BadColl) {
- BSONObj badCollToSend = BSON(CollectionType::fullNs(kNss.ns()));
-
- // Providing an invalid config.collections document should fail the refresh.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_EQUALS(status.code(), ErrorCodes::NoSuchKey);
- checkCollectionsEntryDoesNotExist(kNss);
- });
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{badCollToSend});
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, BadChunk) {
- ChunkType chunkInfo;
- chunkInfo.setNS(kNss.ns());
- chunkInfo.setVersion(ChunkVersion(1, 0, getMaxCollVersion().epoch()));
- ASSERT(!chunkInfo.validate().isOK());
-
- // Providing an invalid config.chunks document should fail the refresh.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_EQUALS(status.code(), ErrorCodes::NoSuchKey);
- checkCollectionsEntryExists(kNss, metadata, false);
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkInfo.toConfigBSON()});
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, NoChunksIsDropped) {
- // Finding no chunks in config.chunks indicates that the collection was dropped, even if an
- // entry was previously found in config.collestions indicating that it wasn't dropped.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- // This is interpreted as a dropped ns, since we drop the chunks first
- ASSERT_EQUALS(status.code(), ErrorCodes::NamespaceNotFound);
-
- checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 0);
- });
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{});
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, CheckNumChunk) {
- // Need a chunk on another shard, otherwise the chunks are invalid in general and we
- // can't load metadata
- ChunkType chunkType;
- chunkType.setNS(kNss.ns());
- chunkType.setShard(ShardId("altshard"));
- chunkType.setMin(BSON("a" << MINKEY));
- chunkType.setMax(BSON("a" << MAXKEY));
- chunkType.setVersion(ChunkVersion(1, 0, getMaxCollVersion().epoch()));
- ASSERT(chunkType.validate().isOK());
-
- // Check that finding no new chunks for the shard works smoothly.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_OK(status);
- ASSERT_EQUALS(0U, metadata.getNumChunks());
- ASSERT_EQUALS(1, metadata.getCollVersion().majorVersion());
- ASSERT_EQUALS(0, metadata.getShardVersion().majorVersion());
-
- checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
- checkCollectionsEntryExists(kNss, metadata, true);
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunkType.toConfigBSON()});
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, SingleChunkCheckNumChunk) {
- // Check that loading a single chunk for the shard works successfully.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_OK(status);
- ASSERT_EQUALS(1U, metadata.getNumChunks());
- ASSERT_EQUALS(getMaxCollVersion(), metadata.getCollVersion());
- ASSERT_EQUALS(getMaxCollVersion(), metadata.getShardVersion());
-
- checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
- checkCollectionsEntryExists(kNss, metadata, true);
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, SeveralChunksCheckNumChunks) {
- // Check that loading several chunks for the shard works successfully.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_OK(status);
- ASSERT_EQUALS(4U, metadata.getNumChunks());
- ASSERT_EQUALS(getMaxCollVersion(), metadata.getCollVersion());
- ASSERT_EQUALS(getMaxCollVersion(), metadata.getShardVersion());
-
- checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 4);
- checkCollectionsEntryExists(kNss, metadata, true);
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendBSONObjVector(makeFourChunks());
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(MetadataLoaderTest, CollectionMetadataSetUp) {
- // Check that the CollectionMetadata is set up correctly.
- auto future = launchAsync([this] {
- ON_BLOCK_EXIT([&] { Client::destroy(); });
- Client::initThreadIfNotAlready("Test");
- auto opCtx = cc().makeOperationContext();
-
- CollectionMetadata metadata;
- auto status = MetadataLoader::makeCollectionMetadata(opCtx.get(),
- catalogClient(),
- kNss.ns(),
- kShardId.toString(),
- NULL, /* no old metadata */
- &metadata);
- ASSERT_BSONOBJ_EQ(metadata.getKeyPattern(), BSON("a" << 1));
- ASSERT_TRUE(getMaxCollVersion().equals(metadata.getCollVersion()));
- ASSERT_TRUE(getMaxShardVersion().equals(metadata.getShardVersion()));
-
- checkCollectionMetadataChunksMatchPersistedChunks(kChunkMetadataNss, metadata, 1);
- checkCollectionsEntryExists(kNss, metadata, true);
- });
-
- expectFindOnConfigSendCollectionDefault();
- expectFindOnConfigSendChunksDefault();
-
- future.timed_get(kFutureTimeout);
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 77c83bb3626..faa062e9476 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -257,8 +257,6 @@ RangeMap MetadataManager::getCopyOfReceivingChunks() {
}
void MetadataManager::_setActiveMetadata_inlock(std::unique_ptr<CollectionMetadata> newMetadata) {
- invariant(!newMetadata || newMetadata->isValid());
-
if (_activeMetadataTracker->usageCounter > 0) {
_metadataInUse.push_front(std::move(_activeMetadataTracker));
}
@@ -300,11 +298,11 @@ ScopedCollectionMetadata::~ScopedCollectionMetadata() {
_decrementUsageCounter();
}
-CollectionMetadata* ScopedCollectionMetadata::operator->() {
+CollectionMetadata* ScopedCollectionMetadata::operator->() const {
return _tracker->metadata.get();
}
-CollectionMetadata* ScopedCollectionMetadata::getMetadata() {
+CollectionMetadata* ScopedCollectionMetadata::getMetadata() const {
return _tracker->metadata.get();
}
diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h
index 44ab0dbbf71..5c2e7f2e64a 100644
--- a/src/mongo/db/s/metadata_manager.h
+++ b/src/mongo/db/s/metadata_manager.h
@@ -243,8 +243,8 @@ public:
/**
* Dereferencing the ScopedCollectionMetadata will dereference the internal CollectionMetadata.
*/
- CollectionMetadata* operator->();
- CollectionMetadata* getMetadata();
+ CollectionMetadata* operator->() const;
+ CollectionMetadata* getMetadata() const;
/**
* True if the ScopedCollectionMetadata stores a metadata (is not empty)
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 1ca00820b66..d4416cbfbea 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -42,14 +42,12 @@
#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/scopeguard.h"
namespace mongo {
+namespace {
using unittest::assertGet;
-namespace {
-
class MetadataManagerTest : public ServiceContextMongoDTest {
protected:
void setUp() override {
@@ -59,11 +57,42 @@ protected:
}
static std::unique_ptr<CollectionMetadata> makeEmptyMetadata() {
- return stdx::make_unique<CollectionMetadata>(BSON("key" << 1),
- ChunkVersion(1, 0, OID::gen()));
+ const OID epoch = OID::gen();
+
+ return stdx::make_unique<CollectionMetadata>(
+ BSON("key" << 1),
+ ChunkVersion(1, 0, epoch),
+ ChunkVersion(0, 0, epoch),
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>());
}
-};
+ /**
+ * Returns a new metadata's instance based on the current state by adding a chunk with the
+ * specified bounds and version. The chunk's version must be higher than that of all chunks
+ * which are in the input metadata.
+ *
+ * It will fassert if the chunk bounds are incorrect or overlap an existing chunk or if the
+ * chunk version is lower than the maximum one.
+ */
+ static std::unique_ptr<CollectionMetadata> cloneMetadataPlusChunk(
+ const CollectionMetadata& metadata,
+ const BSONObj& minKey,
+ const BSONObj& maxKey,
+ const ChunkVersion& chunkVersion) {
+ invariant(chunkVersion.epoch() == metadata.getShardVersion().epoch());
+ invariant(chunkVersion.isSet());
+ invariant(chunkVersion > metadata.getCollVersion());
+ invariant(minKey.woCompare(maxKey) < 0);
+ invariant(!rangeMapOverlaps(metadata.getChunks(), minKey, maxKey));
+
+ auto chunksMap = metadata.getChunks();
+ chunksMap.insert(
+ std::make_pair(minKey.getOwned(), CachedChunkInfo(maxKey.getOwned(), chunkVersion)));
+
+ return stdx::make_unique<CollectionMetadata>(
+ metadata.getKeyPattern(), chunkVersion, chunkVersion, std::move(chunksMap));
+ }
+};
TEST_F(MetadataManagerTest, SetAndGetActiveMetadata) {
MetadataManager manager(getServiceContext(), NamespaceString("TestDb", "CollDB"));
@@ -85,8 +114,8 @@ TEST_F(MetadataManagerTest, ResetActiveMetadata) {
ChunkVersion newVersion = scopedMetadata1->getCollVersion();
newVersion.incMajor();
- std::unique_ptr<CollectionMetadata> cm2 =
- scopedMetadata1->clonePlusChunk(BSON("key" << 0), BSON("key" << 10), newVersion);
+ std::unique_ptr<CollectionMetadata> cm2 = cloneMetadataPlusChunk(
+ *scopedMetadata1.getMetadata(), BSON("key" << 0), BSON("key" << 10), newVersion);
auto cm2Ptr = cm2.get();
manager.refreshActiveMetadata(std::move(cm2));
@@ -274,8 +303,8 @@ TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) {
ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *manager.getActiveMetadata().getMetadata(), cr1.getMin(), cr1.getMax(), version));
ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 0UL);
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
}
@@ -297,8 +326,8 @@ TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) {
ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr1.getMin(), cr1.getMax(), version));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *manager.getActiveMetadata().getMetadata(), cr1.getMin(), cr1.getMax(), version));
ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 1UL);
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
}
@@ -307,8 +336,8 @@ TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) {
ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(cr2.getMin(), cr2.getMax(), version));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *manager.getActiveMetadata().getMetadata(), cr2.getMin(), cr2.getMax(), version));
ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 0UL);
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 2UL);
}
@@ -330,8 +359,8 @@ TEST_F(MetadataManagerTest, RefreshAfterNotYetCompletedMigrationMultiplePending)
ChunkVersion version = manager.getActiveMetadata()->getCollVersion();
version.incMajor();
- manager.refreshActiveMetadata(
- manager.getActiveMetadata()->clonePlusChunk(BSON("key" << 50), BSON("key" << 60), version));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *manager.getActiveMetadata().getMetadata(), BSON("key" << 50), BSON("key" << 60), version));
ASSERT_EQ(manager.getCopyOfReceivingChunks().size(), 2UL);
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
}
@@ -368,8 +397,8 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
ChunkVersion newVersion = metadata->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(
- metadata->clonePlusChunk(BSON("key" << 0), BSON("key" << 10), newVersion));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *metadata.getMetadata(), BSON("key" << 0), BSON("key" << 10), newVersion));
}
// Now, pretend that the collection was dropped and recreated
@@ -377,8 +406,8 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
ChunkVersion newVersion = recreateMetadata->getCollVersion();
newVersion.incMajor();
- manager.refreshActiveMetadata(
- recreateMetadata->clonePlusChunk(BSON("key" << 20), BSON("key" << 30), newVersion));
+ manager.refreshActiveMetadata(cloneMetadataPlusChunk(
+ *recreateMetadata, BSON("key" << 20), BSON("key" << 30), newVersion));
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
const auto chunkEntry = manager.getActiveMetadata()->getChunks().begin();
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index fdb92230763..374b859ec15 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -137,9 +137,6 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx,
// With nonzero shard version, we must have a coll version >= our shard version
invariant(collectionVersion >= shardVersion);
- // With nonzero shard version, we must have a shard key
- invariant(!_collectionMetadata->getKeyPattern().isEmpty());
-
ChunkType chunkToMove;
chunkToMove.setMin(_args.getMinKey());
chunkToMove.setMax(_args.getMaxKey());
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 1320ee15465..2646af582dd 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -47,7 +47,6 @@
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
-#include "mongo/db/s/metadata_loader.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_initialization_mongod.h"
@@ -59,6 +58,7 @@
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/catalog_cache.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/sharding_network_connection_hook.h"
@@ -85,14 +85,6 @@ namespace {
const auto getShardingState = ServiceContext::declareDecoration<ShardingState>();
-// Max number of concurrent config server refresh threads
-// TODO: temporarily decreased from 3 to 1 to serialize refresh writes. Alternate per collection
-// serialization must be implemented: SERVER-28118
-const int kMaxConfigServerRefreshThreads = 1;
-
-// Maximum number of times to try to refresh the collection metadata if conflicts are occurring
-const int kMaxNumMetadataRefreshAttempts = 3;
-
/**
* Updates the config server field of the shardIdentity document with the given connection string
* if setName is equal to the config server replica set name.
@@ -148,7 +140,6 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(MongoDLocalShardingInfo, ("SetGlobalEnviron
ShardingState::ShardingState()
: _initializationState(static_cast<uint32_t>(InitializationState::kNew)),
_initializationStatus(Status(ErrorCodes::InternalError, "Uninitialized value")),
- _configServerTickets(kMaxConfigServerRefreshThreads),
_globalInit(&initializeGlobalShardingStateForMongod),
_scheduleWorkFn([](NamespaceString nss) {}) {}
@@ -289,30 +280,24 @@ Status ShardingState::onStaleShardVersion(OperationContext* opCtx,
}
}
- auto refreshStatusAndVersion =
- _refreshMetadata(opCtx, nss, (currentMetadata ? currentMetadata.getMetadata() : nullptr));
- return refreshStatusAndVersion.getStatus();
+ try {
+ _refreshMetadata(opCtx, nss);
+ return Status::OK();
+ } catch (const DBException& ex) {
+ log() << "Failed to refresh metadata for collection" << nss << causedBy(redact(ex));
+ return ex.toStatus();
+ }
}
Status ShardingState::refreshMetadataNow(OperationContext* opCtx,
const NamespaceString& nss,
ChunkVersion* latestShardVersion) {
- ScopedCollectionMetadata currentMetadata;
-
- {
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
-
- currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata();
- }
-
- auto refreshLatestShardVersionStatus =
- _refreshMetadata(opCtx, nss, currentMetadata.getMetadata());
- if (!refreshLatestShardVersionStatus.isOK()) {
- return refreshLatestShardVersionStatus.getStatus();
+ try {
+ *latestShardVersion = _refreshMetadata(opCtx, nss);
+ return Status::OK();
+ } catch (const DBException& ex) {
+ return ex.toStatus();
}
-
- *latestShardVersion = refreshLatestShardVersionStatus.getValue();
- return Status::OK();
}
// NOTE: This method can be called inside a database lock so it should never take any database
@@ -495,84 +480,58 @@ StatusWith<bool> ShardingState::initializeShardingAwarenessIfNeeded(OperationCon
}
}
-StatusWith<ChunkVersion> ShardingState::_refreshMetadata(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const CollectionMetadata* metadataForDiff) {
+ChunkVersion ShardingState::_refreshMetadata(OperationContext* opCtx, const NamespaceString& nss) {
invariant(!opCtx->lockState()->isLocked());
-
invariant(enabled());
- // We can't reload if a shard name has not yet been set
- {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- if (_shardName.empty()) {
- string errMsg = str::stream() << "cannot refresh metadata for " << nss.ns()
- << " before shard name has been set";
- warning() << errMsg;
- return {ErrorCodes::NotYetInitialized, errMsg};
- }
- }
+ const ShardId shardId = getShardName();
- Status status = {ErrorCodes::InternalError, "metadata refresh not performed"};
- Timer t;
- int numAttempts = 0;
- std::unique_ptr<CollectionMetadata> remoteMetadata;
+ uassert(ErrorCodes::NotYetInitialized,
+ str::stream() << "Cannot refresh metadata for " << nss.ns()
+ << " before shard name has been set",
+ shardId.isValid());
- do {
- // The _configServerTickets serializes this process such that only a small number of threads
- // can try to refresh at the same time in order to avoid overloading the config server.
- _configServerTickets.waitForTicket();
- TicketHolderReleaser needTicketFrom(&_configServerTickets);
+ auto newCollectionMetadata = [&]() -> std::unique_ptr<CollectionMetadata> {
+ auto const catalogCache = Grid::get(opCtx)->catalogCache();
+ catalogCache->invalidateShardedCollection(nss);
- if (status == ErrorCodes::RemoteChangeDetected) {
- metadataForDiff = nullptr;
- log() << "Refresh failed and will be retried as full reload " << status;
+ const auto routingInfo =
+ uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss));
+ const auto cm = routingInfo.cm();
+ if (!cm) {
+ return nullptr;
}
- log() << "MetadataLoader loading chunks for " << nss.ns() << " based on: "
- << (metadataForDiff ? metadataForDiff->getCollVersion().toString() : "(empty)");
+ RangeMap shardChunksMap =
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<CachedChunkInfo>();
- remoteMetadata = stdx::make_unique<CollectionMetadata>();
- status = MetadataLoader::makeCollectionMetadata(opCtx,
- grid.catalogClient(opCtx),
- nss.ns(),
- getShardName(),
- metadataForDiff,
- remoteMetadata.get());
- } while (status == ErrorCodes::RemoteChangeDetected &&
- ++numAttempts < kMaxNumMetadataRefreshAttempts);
+ for (const auto& chunkMapEntry : cm->chunkMap()) {
+ const auto& chunk = chunkMapEntry.second;
- if (!status.isOK() && status != ErrorCodes::NamespaceNotFound) {
- warning() << "MetadataLoader failed after " << t.millis() << " ms"
- << causedBy(redact(status));
+ if (chunk->getShardId() != shardId)
+ continue;
- return status;
- }
+ shardChunksMap.emplace(chunk->getMin(),
+ CachedChunkInfo(chunk->getMax(), chunk->getLastmod()));
+ }
+
+ return stdx::make_unique<CollectionMetadata>(cm->getShardKeyPattern().toBSON(),
+ cm->getVersion(),
+ cm->getVersion(shardId),
+ std::move(shardChunksMap));
+ }();
// Exclusive collection lock needed since we're now changing the metadata
AutoGetCollection autoColl(opCtx, nss, MODE_IX, MODE_X);
auto css = CollectionShardingState::get(opCtx, nss);
+ css->refreshMetadata(opCtx, std::move(newCollectionMetadata));
- if (!status.isOK()) {
- invariant(status == ErrorCodes::NamespaceNotFound);
- css->refreshMetadata(opCtx, nullptr);
-
- log() << "MetadataLoader took " << t.millis() << " ms and did not find the namespace";
-
+ if (!css->getMetadata()) {
return ChunkVersion::UNSHARDED();
}
- css->refreshMetadata(opCtx, std::move(remoteMetadata));
-
- auto metadata = css->getMetadata();
-
- log() << "MetadataLoader took " << t.millis() << " ms and found version "
- << metadata->getCollVersion();
-
- return metadata->getShardVersion();
+ return css->getMetadata()->getShardVersion();
}
StatusWith<ScopedRegisterDonateChunk> ShardingState::registerDonateChunk(
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 96b650ad803..e5a74e081a9 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -43,8 +43,6 @@
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
#include "mongo/stdx/unordered_map.h"
-#include "mongo/util/concurrency/ticketholder.h"
-#include "mongo/util/time_support.h"
namespace mongo {
@@ -303,13 +301,8 @@ private:
* Refreshes collection metadata by asking the config server for the latest information and
* returns the latest version at the time the reload was done. This call does network I/O and
* should never be called with a lock.
- *
- * The metadataForDiff argument indicates that the specified metadata should be used as a base
- * from which to only load the differences. If nullptr is passed, a full reload will be done.
*/
- StatusWith<ChunkVersion> _refreshMetadata(OperationContext* opCtx,
- const NamespaceString& nss,
- const CollectionMetadata* metadataForDiff);
+ ChunkVersion _refreshMetadata(OperationContext* opCtx, const NamespaceString& nss);
// Initializes a TaskExecutor for cleaning up orphaned ranges
void _initializeRangeDeleterTaskExecutor();
@@ -335,9 +328,6 @@ private:
// Sets the shard name for this host (comes through setShardVersion)
std::string _shardName;
- // Protects from hitting the config server from too many threads at once
- TicketHolder _configServerTickets;
-
// Cache of collection metadata on this shard. It is not safe to look-up values from this map
// without holding some form of collection lock. It is only safe to add/remove values when
// holding X lock on the respective namespace.
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index aa63085b60a..6e233262e2a 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -33,7 +33,6 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
@@ -42,8 +41,6 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
#include "mongo/s/catalog/sharding_catalog_client_impl.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/sharding_mongod_test_fixture.h"
@@ -63,31 +60,6 @@ public:
return _shardName.toString();
}
- void setupCollectionMetadata(const NamespaceString& nss,
- const OID& epoch,
- const std::vector<BSONObj>& initChunks) {
- auto future = launchAsync([this, &nss] {
- ChunkVersion latestShardVersion;
- Client::initThreadIfNotAlready();
- ASSERT_OK(
- shardingState()->refreshMetadataNow(operationContext(), nss, &latestShardVersion));
- });
-
- ChunkVersion initVersion(1, 0, epoch);
- onFindCommand([&nss, &initVersion](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(initVersion.epoch());
- coll.setKeyPattern(BSON("x" << 1));
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- onFindCommand([&initChunks](const RemoteCommandRequest&) { return initChunks; });
-
- future.timed_get(kFutureTimeout);
- }
-
protected:
// Used to write to set up local collections before exercising server logic.
std::unique_ptr<DBDirectClient> _dbDirectClient;
@@ -522,293 +494,5 @@ TEST_F(ShardingStateTest,
ASSERT_FALSE(swShardingInitialized.getValue());
}
-TEST_F(ShardingStateTest, MetadataRefreshShouldUseDiffQuery) {
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
-
- ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
-
- const NamespaceString nss("test.user");
- const OID initEpoch(OID::gen());
-
- {
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 0));
- chunk.setMax(BSON("x" << 10));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
- }
-
- const ChunkVersion newVersion(3, 0, initEpoch);
- auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- ASSERT_OK(shardingState()->onStaleShardVersion(operationContext(), nss, newVersion));
- });
-
- onFindCommand([&nss, &initEpoch](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(initEpoch);
- coll.setKeyPattern(BSON("x" << 1));
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- onFindCommand([this, &nss, &initEpoch](const RemoteCommandRequest& request) {
- auto diffQueryStatus = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
- ASSERT_OK(diffQueryStatus.getStatus());
-
- auto diffQuery = std::move(diffQueryStatus.getValue());
- ASSERT_BSONOBJ_EQ(BSON("ns" << nss.ns() << "lastmod" << BSON("$gte" << Timestamp(2, 0))),
- diffQuery->getFilter());
-
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 10));
- chunk.setMax(BSON("x" << 20));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(3, 10, initEpoch));
- return std::vector<BSONObj>{chunk.toConfigBSON()};
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-/**
- * Test where the epoch changed right before the chunk diff query.
- */
-TEST_F(ShardingStateTest, MetadataRefreshShouldUseFullQueryOnEpochMismatch) {
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
-
- ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
-
- const NamespaceString nss("test.user");
- const OID initEpoch(OID::gen());
-
- {
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 0));
- chunk.setMax(BSON("x" << 10));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
- }
-
-
- auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- ASSERT_OK(shardingState()->onStaleShardVersion(
- operationContext(), nss, ChunkVersion(3, 0, initEpoch)));
- });
-
- onFindCommand([&nss, &initEpoch](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(initEpoch);
- coll.setKeyPattern(BSON("x" << 1));
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- // Now when the diff query is performed, it will get chunks with a different epoch.
- const ChunkVersion newVersion(3, 0, OID::gen());
- onFindCommand([this, &nss, &newVersion](const RemoteCommandRequest& request) {
- auto diffQueryStatus = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
- ASSERT_OK(diffQueryStatus.getStatus());
-
- auto diffQuery = std::move(diffQueryStatus.getValue());
- ASSERT_BSONOBJ_EQ(BSON("ns" << nss.ns() << "lastmod" << BSON("$gte" << Timestamp(2, 0))),
- diffQuery->getFilter());
-
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 10));
- chunk.setMax(BSON("x" << 20));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(3, 10, newVersion.epoch()));
- return std::vector<BSONObj>{chunk.toConfigBSON()};
- });
-
- // Retry the refresh again. Now doing a full reload.
-
- onFindCommand([&nss, &newVersion](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(newVersion.epoch());
- coll.setKeyPattern(BSON("x" << 1));
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- onFindCommand([this, &nss, &newVersion](const RemoteCommandRequest& request) {
- auto diffQueryStatus = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
- ASSERT_OK(diffQueryStatus.getStatus());
-
- auto diffQuery = std::move(diffQueryStatus.getValue());
- ASSERT_BSONOBJ_EQ(BSON("ns" << nss.ns() << "lastmod" << BSON("$gte" << Timestamp(0, 0))),
- diffQuery->getFilter());
-
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 10));
- chunk.setMax(BSON("x" << 20));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(3, 10, newVersion.epoch()));
- return std::vector<BSONObj>{chunk.toConfigBSON()};
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingStateTest, FullMetadataOnEpochMismatchShouldStopAfterMaxRetries) {
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
-
- ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
-
- const NamespaceString nss("test.user");
- const OID initEpoch(OID::gen());
-
- {
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 0));
- chunk.setMax(BSON("x" << 10));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
- }
-
-
- auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- auto status = shardingState()->onStaleShardVersion(
- operationContext(), nss, ChunkVersion(3, 0, initEpoch));
- ASSERT_EQ(ErrorCodes::RemoteChangeDetected, status);
- });
-
- OID lastEpoch(initEpoch);
- OID nextEpoch(OID::gen());
- for (int tries = 0; tries < 3; tries++) {
- onFindCommand([&nss, &lastEpoch](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(lastEpoch);
- coll.setKeyPattern(BSON("x" << 1));
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- onFindCommand([this, &nss, &nextEpoch, tries](const RemoteCommandRequest& request) {
- auto diffQueryStatus = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
- ASSERT_OK(diffQueryStatus.getStatus());
-
- auto diffQuery = std::move(diffQueryStatus.getValue());
- Timestamp expectedLastMod = (tries == 0) ? Timestamp(2, 0) : Timestamp(0, 0);
- ASSERT_BSONOBJ_EQ(
- BSON("ns" << nss.ns() << "lastmod" << BSON("$gte" << expectedLastMod)),
- diffQuery->getFilter());
-
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 10));
- chunk.setMax(BSON("x" << 20));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(3, 10, nextEpoch));
- return std::vector<BSONObj>{chunk.toConfigBSON()};
- });
-
- lastEpoch = nextEpoch;
- nextEpoch = OID::gen();
- }
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingStateTest, MetadataRefreshShouldBeOkWhenCollectionWasDropped) {
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
-
- ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
-
- const NamespaceString nss("test.user");
- const OID initEpoch(OID::gen());
-
- {
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 0));
- chunk.setMax(BSON("x" << 10));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
- }
-
- const ChunkVersion newVersion(3, 0, initEpoch);
- auto future = launchAsync([&] {
- Client::initThreadIfNotAlready();
- ASSERT_OK(shardingState()->onStaleShardVersion(operationContext(), nss, newVersion));
- });
-
- onFindCommand([&nss, &initEpoch](const RemoteCommandRequest&) {
- CollectionType coll;
- coll.setNs(nss);
- coll.setUpdatedAt(Date_t());
- coll.setEpoch(initEpoch);
- coll.setKeyPattern(BSON("x" << 1));
- coll.setDropped(true);
- return std::vector<BSONObj>{coll.toBSON()};
- });
-
- future.timed_get(kFutureTimeout);
-}
-
-TEST_F(ShardingStateTest, MetadataRefreshShouldNotRetryOtherTypesOfError) {
- ShardIdentityType shardIdentity;
- shardIdentity.setConfigsvrConnString(
- ConnectionString(ConnectionString::SET, "a:1,b:2", "config"));
- shardIdentity.setShardName(shardName());
- shardIdentity.setClusterId(OID::gen());
-
- ASSERT_OK(shardingState()->initializeFromShardIdentity(operationContext(), shardIdentity));
-
- const NamespaceString nss("test.user");
- const OID initEpoch(OID::gen());
-
- {
- ChunkType chunk;
- chunk.setNS(nss.ns());
- chunk.setMin(BSON("x" << 0));
- chunk.setMax(BSON("x" << 10));
- chunk.setShard(ShardId(shardName()));
- chunk.setVersion(ChunkVersion(2, 0, initEpoch));
- setupCollectionMetadata(nss, initEpoch, std::vector<BSONObj>{chunk.toConfigBSON()});
- }
-
- auto configTargeter =
- RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter());
- configTargeter->setFindHostReturnValue({ErrorCodes::HostNotFound, "host erased by test"});
-
- auto status = shardingState()->onStaleShardVersion(
- operationContext(), nss, ChunkVersion(3, 0, initEpoch));
- ASSERT_EQ(ErrorCodes::HostNotFound, status);
-}
-
} // namespace
} // namespace mongo