From c9c340ad6e9e1f33cb001a8375c62d6b16138c74 Mon Sep 17 00:00:00 2001 From: Kaloian Manassiev Date: Sun, 1 Jul 2018 12:52:02 -0400 Subject: SERVER-36054 Remove ScopedCollectionMetadata's operator bool --- src/mongo/db/s/SConscript | 1 + src/mongo/db/s/cleanup_orphaned_cmd.cpp | 2 +- src/mongo/db/s/collection_metadata.cpp | 57 ++++- src/mongo/db/s/collection_metadata.h | 85 +++++--- .../db/s/collection_metadata_filtering_test.cpp | 229 +++++++++++++++++++++ src/mongo/db/s/collection_range_deleter.cpp | 12 +- src/mongo/db/s/collection_sharding_state.cpp | 10 +- src/mongo/db/s/collection_sharding_state_test.cpp | 2 +- src/mongo/db/s/get_shard_version_command.cpp | 6 +- src/mongo/db/s/merge_chunks_command.cpp | 4 +- src/mongo/db/s/metadata_manager.cpp | 29 +-- src/mongo/db/s/metadata_manager.h | 32 +-- src/mongo/db/s/metadata_manager_test.cpp | 170 +++++++-------- .../migration_chunk_cloner_source_legacy_test.cpp | 26 ++- src/mongo/db/s/migration_destination_manager.cpp | 4 +- src/mongo/db/s/migration_source_manager.cpp | 10 +- src/mongo/db/s/set_shard_version_command.cpp | 16 +- .../db/s/shard_filtering_metadata_refresh.cpp | 7 +- src/mongo/db/s/shard_server_op_observer.cpp | 7 +- src/mongo/db/s/split_chunk.cpp | 2 +- 20 files changed, 499 insertions(+), 212 deletions(-) create mode 100644 src/mongo/db/s/collection_metadata_filtering_test.cpp (limited to 'src/mongo/db/s') diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index 4571f53be06..f1c615a0940 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -354,6 +354,7 @@ env.CppUnitTest( env.CppUnitTest( target='collection_sharding_state_test', source=[ + 'collection_metadata_filtering_test.cpp', 'collection_metadata_test.cpp', 'collection_range_deleter_test.cpp', 'collection_sharding_state_test.cpp', diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp index fdea6235019..8e8ab3572eb 100644 --- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp +++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp @@ -80,7 +80,7 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx, AutoGetCollection autoColl(opCtx, ns, MODE_IX); const auto css = CollectionShardingState::get(opCtx, ns); auto metadata = css->getMetadata(opCtx); - if (!metadata) { + if (!metadata->isSharded()) { log() << "skipping orphaned data cleanup for " << ns.toString() << ", collection is not sharded"; return CleanupResult_Done; diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index fa2455ee981..dde0c3dea81 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -34,6 +34,7 @@ #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/builder.h" +#include "mongo/db/bson/dotted_path_support.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/stdx/memory.h" #include "mongo/util/log.h" @@ -42,13 +43,11 @@ namespace mongo { CollectionMetadata::CollectionMetadata(std::shared_ptr cm, const ShardId& thisShardId) - : _cm(std::move(cm)), _thisShardId(thisShardId) { - - invariant(_cm->getVersion().isSet()); - invariant(_cm->getVersion() >= getShardVersion()); -} + : _cm(std::move(cm)), _thisShardId(thisShardId) {} RangeMap CollectionMetadata::getChunks() const { + invariant(isSharded()); + RangeMap chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap()); for (const auto& chunk : _cm->chunks()) { @@ -62,6 +61,8 @@ RangeMap CollectionMetadata::getChunks() const { } bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const { + invariant(isSharded()); + auto foundIt = _cm->getNextChunkOnShard(lookupKey, _thisShardId); if (foundIt.begin() == foundIt.end()) return false; @@ -73,6 +74,8 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk } Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const { + invariant(isSharded()); + ChunkType existingChunk; if (!getNextChunk(chunk.getMin(), &existingChunk)) { @@ -94,13 +97,41 @@ Status CollectionMetadata::checkChunkIsValid(const ChunkType& chunk) const { return Status::OK(); } +BSONObj CollectionMetadata::extractDocumentKey(BSONObj const& doc) const { + BSONObj key; + + if (isSharded()) { + auto const& pattern = getChunkManager()->getShardKeyPattern(); + key = dotted_path_support::extractElementsBasedOnTemplate(doc, pattern.toBSON()); + if (pattern.hasId()) { + return key; + } + // else, try to append an _id field from the document. + } + + if (auto id = doc["_id"]) { + return key.isEmpty() ? id.wrap() : BSONObjBuilder(std::move(key)).append(id).obj(); + } + + // For legacy documents that lack an _id, use the document itself as its key. + return doc; +} + void CollectionMetadata::toBSONBasic(BSONObjBuilder& bb) const { - _cm->getVersion().appendLegacyWithField(&bb, "collVersion"); - getShardVersion().appendLegacyWithField(&bb, "shardVersion"); - bb.append("keyPattern", _cm->getShardKeyPattern().toBSON()); + if (isSharded()) { + _cm->getVersion().appendLegacyWithField(&bb, "collVersion"); + getShardVersion().appendLegacyWithField(&bb, "shardVersion"); + bb.append("keyPattern", _cm->getShardKeyPattern().toBSON()); + } else { + ChunkVersion::UNSHARDED().appendLegacyWithField(&bb, "collVersion"); + ChunkVersion::UNSHARDED().appendLegacyWithField(&bb, "shardVersion"); + } } void CollectionMetadata::toBSONChunks(BSONArrayBuilder& bb) const { + if (!isSharded()) + return; + for (const auto& chunk : _cm->chunks()) { if (chunk.getShardId() == _thisShardId) { BSONArrayBuilder chunkBB(bb.subarrayStart()); @@ -112,12 +143,18 @@ void CollectionMetadata::toBSONChunks(BSONArrayBuilder& bb) const { } std::string CollectionMetadata::toStringBasic() const { - return str::stream() << "collection version: " << _cm->getVersion().toString() - << ", shard version: " << getShardVersion().toString(); + if (isSharded()) { + return str::stream() << "collection version: " << _cm->getVersion().toString() + << ", shard version: " << getShardVersion().toString(); + } else { + return "collection version: "; + } } boost::optional CollectionMetadata::getNextOrphanRange( RangeMap const& receivingChunks, BSONObj const& origLookupKey) const { + invariant(isSharded()); + const BSONObj maxKey = getMaxKey(); BSONObj lookupKey = origLookupKey; diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h index 0c97a0531ec..c744cc15a77 100644 --- a/src/mongo/db/s/collection_metadata.h +++ b/src/mongo/db/s/collection_metadata.h @@ -47,6 +47,12 @@ namespace mongo { */ class CollectionMetadata { public: + /** + * Instantiates a metadata object, which represents an unsharded collection. This 'isSharded' + * for this object will return false and it is illegal to use it for filtering. + */ + CollectionMetadata() = default; + /** * The main way to construct CollectionMetadata is through MetadataLoader or clone() methods. * @@ -55,10 +61,54 @@ public: */ CollectionMetadata(std::shared_ptr cm, const ShardId& thisShardId); + /** + * Returns whether this metadata object represents a sharded collection which requires + * filtering. + */ + bool isSharded() const { + return bool(_cm); + } + + /** + * Returns the current shard version for the collection or UNSHARDED if it is not sharded. + */ + ChunkVersion getShardVersion() const { + return (isSharded() ? _cm->getVersion(_thisShardId) : ChunkVersion::UNSHARDED()); + } + + /** + * Returns the current collection version or UNSHARDED if it is not sharded. + */ + ChunkVersion getCollVersion() const { + return (isSharded() ? _cm->getVersion() : ChunkVersion::UNSHARDED()); + } + + /** + * Returns just the shard key fields, if the collection is sharded, and the _id field, from + * `doc`. Does not alter any field values (e.g. by hashing); values are copied verbatim. + */ + BSONObj extractDocumentKey(const BSONObj& doc) const; + + /** + * BSON output of the basic metadata information (chunk and shard version). + */ + void toBSONBasic(BSONObjBuilder& bb) const; + + /** + * BSON output of the chunks metadata into a BSONArray + */ + void toBSONChunks(BSONArrayBuilder& bb) const; + + /** + * String output of the collection and shard versions. + */ + std::string toStringBasic() const; + /** * Obtains the shard id with which this collection metadata is configured. */ const ShardId& shardId() const { + invariant(isSharded()); return _thisShardId; } @@ -66,6 +116,7 @@ public: * Returns true if 'key' contains exactly the same fields as the shard key pattern. */ bool isValidKey(const BSONObj& key) const { + invariant(isSharded()); return _cm->getShardKeyPattern().isShardKey(key); } @@ -74,6 +125,7 @@ public: * returns false. If key is not a valid shard key, the behaviour is undefined. */ bool keyBelongsToMe(const BSONObj& key) const { + invariant(isSharded()); return _cm->keyBelongsToShard(key, _thisShardId); } @@ -94,6 +146,7 @@ public: * Returns true if the argument range overlaps any chunk. */ bool rangeOverlapsChunk(ChunkRange const& range) const { + invariant(isSharded()); return _cm->rangeOverlapsShard(range, _thisShardId); } @@ -118,52 +171,38 @@ public: boost::optional getNextOrphanRange(RangeMap const& receiveMap, BSONObj const& lookupKey) const; - ChunkVersion getCollVersion() const { - return _cm->getVersion(); - } - - ChunkVersion getShardVersion() const { - return _cm->getVersion(_thisShardId); - } - + /** + * Returns all the chunks which are contained on this shard. + */ RangeMap getChunks() const; const BSONObj& getKeyPattern() const { + invariant(isSharded()); return _cm->getShardKeyPattern().toBSON(); } const std::vector>& getKeyPatternFields() const { + invariant(isSharded()); return _cm->getShardKeyPattern().getKeyPatternFields(); } BSONObj getMinKey() const { + invariant(isSharded()); return _cm->getShardKeyPattern().getKeyPattern().globalMin(); } BSONObj getMaxKey() const { + invariant(isSharded()); return _cm->getShardKeyPattern().getKeyPattern().globalMax(); } - /** - * BSON output of the basic metadata information (chunk and shard version). - */ - void toBSONBasic(BSONObjBuilder& bb) const; - - /** - * BSON output of the chunks metadata into a BSONArray - */ - void toBSONChunks(BSONArrayBuilder& bb) const; - - /** - * String output of the collection and shard versions. - */ - std::string toStringBasic() const; - std::shared_ptr getChunkManager() const { + invariant(isSharded()); return _cm; } bool uuidMatches(UUID uuid) const { + invariant(isSharded()); return _cm->uuidMatches(uuid); } diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp new file mode 100644 index 00000000000..c2fe5fd9272 --- /dev/null +++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp @@ -0,0 +1,229 @@ +/** + * Copyright (C) 2018 MongoDB Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License, version 3, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the GNU Affero General Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include "mongo/db/catalog_raii.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/shard_server_test_fixture.h" + +namespace mongo { +namespace { + +const NamespaceString kNss("TestDB", "TestColl"); + +class CollectionMetadataFilteringTest : public ShardServerTestFixture { +protected: + void setUp() override { + ShardServerTestFixture::setUp(); + _manager = std::make_shared(getServiceContext(), kNss, executor()); + } + + // Prepares data with a history array populated: + // chunk1 - [min, -100) + // chunk2 - [100, 0) + // chunk3 - [0, 100) + // chunk4 - [100, max) + // and the history: + // time (now,75) shard0(chunk1, chunk3) shard1(chunk2, chunk4) + // time (75,25) shard0(chunk2, chunk4) shard1(chunk1, chunk3) + // time (25,0) - no history + void prepareTestData() { + const OID epoch = OID::gen(); + const ShardKeyPattern shardKeyPattern(BSON("_id" << 1)); + + auto rt = RoutingTableHistory::makeNew( + kNss, UUID::gen(), shardKeyPattern.getKeyPattern(), nullptr, false, epoch, [&] { + ChunkVersion version(1, 0, epoch); + + ChunkType chunk1(kNss, + {shardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)}, + version, + {"0"}); + chunk1.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")), + ChunkHistory(Timestamp(25, 0), ShardId("1"))}); + version.incMinor(); + + ChunkType chunk2(kNss, {BSON("_id" << -100), BSON("_id" << 0)}, version, {"1"}); + chunk2.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")), + ChunkHistory(Timestamp(25, 0), ShardId("0"))}); + version.incMinor(); + + ChunkType chunk3(kNss, {BSON("_id" << 0), BSON("_id" << 100)}, version, {"0"}); + chunk3.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("0")), + ChunkHistory(Timestamp(25, 0), ShardId("1"))}); + version.incMinor(); + + ChunkType chunk4(kNss, + {BSON("_id" << 100), shardKeyPattern.getKeyPattern().globalMax()}, + version, + {"1"}); + chunk4.setHistory({ChunkHistory(Timestamp(75, 0), ShardId("1")), + ChunkHistory(Timestamp(25, 0), ShardId("0"))}); + version.incMinor(); + + return std::vector{chunk1, chunk2, chunk3, chunk4}; + }()); + + auto cm = std::make_shared(rt, Timestamp(100, 0)); + ASSERT_EQ(4, cm->numChunks()); + { + AutoGetCollection autoColl(operationContext(), kNss, MODE_X); + auto const css = CollectionShardingState::get(operationContext(), kNss); + css->refreshMetadata(operationContext(), + std::make_unique(cm, ShardId("0"))); + } + + _manager->refreshActiveMetadata(std::make_unique(cm, ShardId("0"))); + } + + std::shared_ptr _manager; +}; + +// Verifies that right set of documents is visible. +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsPresent) { + prepareTestData(); + + auto metadata = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(100, 0))); + + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -500))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 50))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -50))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 500))); +} + +// Verifies that a different set of documents is visible for a timestamp in the past. +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsPast) { + prepareTestData(); + + auto metadata = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(50, 0))); + + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -500))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 50))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -50))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 500))); +} + +// Verifies that when accessing too far into the past we get the stale error. +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsStale) { + prepareTestData(); + + auto metadata = _manager->getActiveMetadata(_manager, LogicalTime(Timestamp(10, 0))); + + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -500)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 50)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -50)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 500)), + AssertionException, + ErrorCodes::StaleChunkHistory); +} + +// The same test as FilterDocumentsPresent but using "readConcern" +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsPresentShardingState) { + prepareTestData(); + + BSONObj readConcern = BSON("readConcern" << BSON("level" + << "snapshot" + << "atClusterTime" + << Timestamp(100, 0))); + + auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext()); + ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"])); + + AutoGetCollection autoColl(operationContext(), kNss, MODE_IS); + auto const css = CollectionShardingState::get(operationContext(), kNss); + auto metadata = css->getMetadata(operationContext()); + + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -500))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 50))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -50))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 500))); +} + +// The same test as FilterDocumentsPast but using "readConcern" +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsPastShardingState) { + prepareTestData(); + + BSONObj readConcern = BSON("readConcern" << BSON("level" + << "snapshot" + << "atClusterTime" + << Timestamp(50, 0))); + + auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext()); + ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"])); + + AutoGetCollection autoColl(operationContext(), kNss, MODE_IS); + auto const css = CollectionShardingState::get(operationContext(), kNss); + auto metadata = css->getMetadata(operationContext()); + + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << -500))); + ASSERT_FALSE(metadata->keyBelongsToMe(BSON("_id" << 50))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << -50))); + ASSERT_TRUE(metadata->keyBelongsToMe(BSON("_id" << 500))); +} + +// The same test as FilterDocumentsStale but using "readConcern" +TEST_F(CollectionMetadataFilteringTest, FilterDocumentsStaleShardingState) { + prepareTestData(); + + BSONObj readConcern = BSON("readConcern" << BSON("level" + << "snapshot" + << "atClusterTime" + << Timestamp(10, 0))); + + auto&& readConcernArgs = repl::ReadConcernArgs::get(operationContext()); + ASSERT_OK(readConcernArgs.initialize(readConcern["readConcern"])); + + AutoGetCollection autoColl(operationContext(), kNss, MODE_IS); + auto const css = CollectionShardingState::get(operationContext(), kNss); + auto metadata = css->getMetadata(operationContext()); + + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -500)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 50)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << -50)), + AssertionException, + ErrorCodes::StaleChunkHistory); + ASSERT_THROWS_CODE(metadata->keyBelongsToMe(BSON("_id" << 500)), + AssertionException, + ErrorCodes::StaleChunkHistory); +} + + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp index 3ccebb88675..cd78d1b5aab 100644 --- a/src/mongo/db/s/collection_range_deleter.cpp +++ b/src/mongo/db/s/collection_range_deleter.cpp @@ -117,11 +117,13 @@ boost::optional CollectionRangeDeleter::cleanUpNextRange( auto* const collection = autoColl.getCollection(); auto* const css = CollectionShardingState::get(opCtx, nss); - auto* const self = forTestOnly ? forTestOnly : &css->_metadataManager->_rangesToClean; + auto& metadataManager = css->_metadataManager; + auto* const self = forTestOnly ? forTestOnly : &metadataManager->_rangesToClean; - auto scopedCollectionMetadata = css->getMetadata(opCtx); + const auto scopedCollectionMetadata = + metadataManager->getActiveMetadata(metadataManager, boost::none); - if (!forTestOnly && (!collection || !scopedCollectionMetadata)) { + if (!forTestOnly && (!collection || !scopedCollectionMetadata->isSharded())) { if (!collection) { LOG(0) << "Abandoning any range deletions left over from dropped " << nss.ns(); } else { @@ -206,8 +208,8 @@ boost::optional CollectionRangeDeleter::cleanUpNextRange( } try { - const auto keyPattern = scopedCollectionMetadata->getKeyPattern(); - wrote = self->_doDeletion(opCtx, collection, keyPattern, *range, maxToDelete); + wrote = self->_doDeletion( + opCtx, collection, scopedCollectionMetadata->getKeyPattern(), *range, maxToDelete); } catch (const DBException& e) { wrote = e.toStatus(); warning() << e.what(); diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp index ebbfc83891b..59f5b09f4d2 100644 --- a/src/mongo/db/s/collection_sharding_state.cpp +++ b/src/mongo/db/s/collection_sharding_state.cpp @@ -143,7 +143,7 @@ public: for (auto& coll : _collections) { ScopedCollectionMetadata metadata = coll.second->getMetadata(opCtx); - if (metadata) { + if (metadata->isSharded()) { versionB.appendTimestamp(coll.first, metadata->getShardVersion().toLong()); } else { versionB.appendTimestamp(coll.first, ChunkVersion::UNSHARDED().toLong()); @@ -280,7 +280,7 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx) // Set this for error messaging purposes before potentially returning false. auto metadata = getMetadata(opCtx); const auto wantedShardVersion = - metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED(); + metadata->isSharded() ? metadata->getShardVersion() : ChunkVersion::UNSHARDED(); auto criticalSectionSignal = _critSec.getSignal(opCtx->lockState()->isWriteLocked() ? ShardingMigrationCriticalSection::kWrite @@ -352,8 +352,10 @@ Status CollectionShardingState::waitForClean(OperationContext* opCtx, // not hold reference on it, which would make it appear in use auto metadata = css->_metadataManager->getActiveMetadata(css->_metadataManager, boost::none); - if (!metadata || metadata->getCollVersion().epoch() != epoch) { - return {ErrorCodes::StaleShardVersion, "Collection being migrated was dropped"}; + + if (!metadata->isSharded() || metadata->getCollVersion().epoch() != epoch) { + return {ErrorCodes::ConflictingOperationInProgress, + "Collection being migrated was dropped"}; } } diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp index 401edadd7a0..6934bddb5e3 100644 --- a/src/mongo/db/s/collection_sharding_state_test.cpp +++ b/src/mongo/db/s/collection_sharding_state_test.cpp @@ -29,11 +29,11 @@ #include "mongo/db/catalog_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/namespace_string.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/shard_server_op_observer.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/shard_server_test_fixture.h" namespace mongo { diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp index cd5a8febb8e..19f2d965b56 100644 --- a/src/mongo/db/s/get_shard_version_command.cpp +++ b/src/mongo/db/s/get_shard_version_command.cpp @@ -110,15 +110,15 @@ public: CollectionShardingState* const css = CollectionShardingState::get(opCtx, nss); const auto metadata = css->getMetadata(opCtx); - if (metadata) { + if (metadata->isSharded()) { result.appendTimestamp("global", metadata->getShardVersion().toLong()); } else { - result.appendTimestamp("global", ChunkVersion(0, 0, OID()).toLong()); + result.appendTimestamp("global", ChunkVersion::UNSHARDED().toLong()); } if (cmdObj["fullMetadata"].trueValue()) { BSONObjBuilder metadataBuilder(result.subobjStart("metadata")); - if (metadata) { + if (metadata->isSharded()) { metadata->toBSONBasic(metadataBuilder); BSONArrayBuilder chunksArr(metadataBuilder.subarrayStart("chunks")); diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index a1fab846dc3..c8d67321745 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -67,7 +67,7 @@ bool checkMetadataForSuccess(OperationContext* opCtx, uassert(ErrorCodes::StaleEpoch, str::stream() << "Collection " << nss.ns() << " became unsharded", - metadataAfterMerge); + metadataAfterMerge->isSharded()); ChunkType chunk; if (!metadataAfterMerge->getNextChunk(minKey, &chunk)) { @@ -110,7 +110,7 @@ Status mergeChunks(OperationContext* opCtx, return CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx); }(); - if (!metadata) { + if (!metadata->isSharded()) { std::string errmsg = stream() << "could not merge chunks, collection " << nss.ns() << " is not sharded"; diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp index 9a0131cc8f2..50442916a0c 100644 --- a/src/mongo/db/s/metadata_manager.cpp +++ b/src/mongo/db/s/metadata_manager.cpp @@ -35,7 +35,6 @@ #include "mongo/base/string_data.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/builder.h" -#include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/range_arithmetic.h" #include "mongo/db/s/collection_sharding_state.h" @@ -185,7 +184,8 @@ ScopedCollectionMetadata MetadataManager::getActiveMetadata( std::shared_ptr self, const boost::optional& atClusterTime) { stdx::lock_guard lg(_managerLock); if (_metadata.empty()) { - return ScopedCollectionMetadata(); + return ScopedCollectionMetadata( + std::make_shared(CollectionMetadata())); } auto metadataTracker = _metadata.back(); @@ -540,8 +540,6 @@ boost::optional MetadataManager::getNextOrphanRange(BSONObj const& f return _metadata.back()->metadata.getNextOrphanRange(_receivingChunks, from); } -ScopedCollectionMetadata::ScopedCollectionMetadata() = default; - ScopedCollectionMetadata::ScopedCollectionMetadata( WithLock, std::shared_ptr metadataManager, @@ -575,29 +573,6 @@ ScopedCollectionMetadata& ScopedCollectionMetadata::operator=(ScopedCollectionMe return *this; } -CollectionMetadata* ScopedCollectionMetadata::getMetadata() const { - return _metadataTracker ? &_metadataTracker->metadata : nullptr; -} - -BSONObj ScopedCollectionMetadata::extractDocumentKey(BSONObj const& doc) const { - BSONObj key; - if (*this) { // is sharded - auto const& pattern = _metadataTracker->metadata.getChunkManager()->getShardKeyPattern(); - key = dotted_path_support::extractElementsBasedOnTemplate(doc, pattern.toBSON()); - if (pattern.hasId()) { - return key; - } - // else, try to append an _id field from the document. - } - - if (auto id = doc["_id"]) { - return key.isEmpty() ? id.wrap() : BSONObjBuilder(std::move(key)).append(id).obj(); - } - - // For legacy documents that lack an _id, use the document itself as its key. - return doc; -} - void ScopedCollectionMetadata::_clear() { if (!_metadataManager) { return; diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h index 9bc61361c8b..e9256de46fe 100644 --- a/src/mongo/db/s/metadata_manager.h +++ b/src/mongo/db/s/metadata_manager.h @@ -265,28 +265,18 @@ public: ScopedCollectionMetadata(ScopedCollectionMetadata&& other); ScopedCollectionMetadata& operator=(ScopedCollectionMetadata&& other); - /** - * Dereferencing the ScopedCollectionMetadata dereferences the private CollectionMetadata. - */ - CollectionMetadata* getMetadata() const; - - CollectionMetadata* operator->() const { - return getMetadata(); + const CollectionMetadata& get() const { + invariant(_metadataTracker); + return _metadataTracker->metadata; } - /** - * True if the ScopedCollectionMetadata stores a metadata (is not empty) and the collection is - * sharded. - */ - operator bool() const { - return getMetadata() != nullptr; + const auto* operator-> () const { + return &get(); } - /** - * Returns just the shard key fields, if collection is sharded, and the _id field, from `doc`. - * Does not alter any field values (e.g. by hashing); values are copied verbatim. - */ - BSONObj extractDocumentKey(BSONObj const& doc) const; + const auto& operator*() const { + return get(); + } private: friend ScopedCollectionMetadata MetadataManager::getActiveMetadata( @@ -295,12 +285,6 @@ private: friend std::vector MetadataManager::overlappingMetadata( std::shared_ptr const&, ChunkRange const&); - /** - * Creates an empty ScopedCollectionMetadata, which is interpreted as if the collection is - * unsharded. - */ - ScopedCollectionMetadata(); - /** * Increments the usageCounter in the specified CollectionMetadata. * diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp index 5f5f5ddbc7a..bd60db74e23 100644 --- a/src/mongo/db/s/metadata_manager_test.cpp +++ b/src/mongo/db/s/metadata_manager_test.cpp @@ -33,10 +33,8 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/metadata_manager.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" @@ -68,6 +66,9 @@ protected: _manager = std::make_shared(getServiceContext(), kNss, executor()); } + /** + * Returns an instance of CollectionMetadata which has no chunks owned by 'thisShard'. + */ static std::unique_ptr makeEmptyMetadata() { const OID epoch = OID::gen(); @@ -97,59 +98,58 @@ protected: * chunk version is lower than the maximum one. */ static std::unique_ptr cloneMetadataPlusChunk( - const CollectionMetadata& metadata, const BSONObj& minKey, const BSONObj& maxKey) { - invariant(minKey.woCompare(maxKey) < 0); - invariant(!rangeMapOverlaps(metadata.getChunks(), minKey, maxKey)); + const ScopedCollectionMetadata& metadata, const ChunkRange range) { + const BSONObj& minKey = range.getMin(); + const BSONObj& maxKey = range.getMax(); + + ASSERT(SimpleBSONObjComparator::kInstance.evaluate(minKey < maxKey)) + << "minKey == " << minKey << "; maxKey == " << maxKey; + ASSERT(!rangeMapOverlaps(metadata->getChunks(), minKey, maxKey)); - auto cm = metadata.getChunkManager(); + auto cm = metadata->getChunkManager(); const auto chunkToSplit = cm->findIntersectingChunkWithSimpleCollation(minKey); ASSERT(SimpleBSONObjComparator::kInstance.evaluate(maxKey <= chunkToSplit.getMax())) << "maxKey == " << maxKey << " and chunkToSplit.getMax() == " << chunkToSplit.getMax(); + auto v1 = cm->getVersion(); v1.incMajor(); auto v2 = v1; v2.incMajor(); auto v3 = v2; v3.incMajor(); + auto rt = cm->getRoutingHistory()->makeUpdated( {ChunkType{kNss, ChunkRange{chunkToSplit.getMin(), minKey}, v1, kOtherShard}, ChunkType{kNss, ChunkRange{minKey, maxKey}, v2, kThisShard}, ChunkType{kNss, ChunkRange{maxKey, chunkToSplit.getMax()}, v3, kOtherShard}}); - cm = std::make_shared(rt, boost::none); - - return stdx::make_unique(cm, kThisShard); - } - - CollectionMetadata* addChunk(std::shared_ptr& manager) { - ScopedCollectionMetadata scopedMetadata1 = manager->getActiveMetadata(manager, boost::none); - std::unique_ptr cm2 = cloneMetadataPlusChunk( - *scopedMetadata1.getMetadata(), BSON("key" << 0), BSON("key" << 20)); - auto cm2Ptr = cm2.get(); - - manager->refreshActiveMetadata(std::move(cm2)); - return cm2Ptr; + return stdx::make_unique( + std::make_shared(rt, boost::none), kThisShard); } std::shared_ptr _manager; }; -// In the following tests, the ranges-to-clean is not drained by the background deleter thread -// because the collection involved has no CollectionShardingState, so the task just returns without -// doing anything. - TEST_F(MetadataManagerTest, CleanUpForMigrateIn) { _manager->refreshActiveMetadata(makeEmptyMetadata()); + // Sanity checks + ASSERT(_manager->getActiveMetadata(_manager, boost::none)->isSharded()); + ASSERT_EQ(0UL, _manager->getActiveMetadata(_manager, boost::none)->getChunks().size()); + ChunkRange range1(BSON("key" << 0), BSON("key" << 10)); ChunkRange range2(BSON("key" << 10), BSON("key" << 20)); + auto notif1 = _manager->beginReceive(range1); - ASSERT_TRUE(!notif1.ready()); + ASSERT(!notif1.ready()); + auto notif2 = _manager->beginReceive(range2); - ASSERT_TRUE(!notif2.ready()); - ASSERT_EQ(_manager->numberOfRangesToClean(), 2UL); - ASSERT_EQ(_manager->numberOfRangesToCleanStillInUse(), 0UL); + ASSERT(!notif2.ready()); + + ASSERT_EQ(2UL, _manager->numberOfRangesToClean()); + ASSERT_EQ(0UL, _manager->numberOfRangesToCleanStillInUse()); + notif1.abandon(); notif2.abandon(); } @@ -170,89 +170,94 @@ TEST_F(MetadataManagerTest, AddRangeNotificationsBlockAndYield) { } TEST_F(MetadataManagerTest, NotificationBlocksUntilDeletion) { - ChunkRange cr1(BSON("key" << 20), BSON("key" << 30)); _manager->refreshActiveMetadata(makeEmptyMetadata()); + + ChunkRange cr1(BSON("key" << 20), BSON("key" << 30)); auto optNotif = _manager->trackOrphanedDataCleanup(cr1); - ASSERT_FALSE(optNotif); // nothing to track yet + ASSERT(!optNotif); + { ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL); ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); auto scm1 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount - ASSERT_TRUE(bool(scm1)); - ASSERT_EQ(0ULL, scm1->getChunks().size()); - addChunk(_manager); // push new metadata + const auto addChunk = [this] { + _manager->refreshActiveMetadata( + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), + {BSON("key" << 0), BSON("key" << 20)})); + }; + + addChunk(); // push new metadata auto scm2 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount ASSERT_EQ(1ULL, scm2->getChunks().size()); - // this is here solely to pacify an invariant in addChunk + // Simulate drop and recreate _manager->refreshActiveMetadata(makeEmptyMetadata()); - addChunk(_manager); // push new metadata + addChunk(); // push new metadata auto scm3 = _manager->getActiveMetadata(_manager, boost::none); // and increment refcount ASSERT_EQ(1ULL, scm3->getChunks().size()); auto overlaps = _manager->overlappingMetadata( _manager, ChunkRange(BSON("key" << 0), BSON("key" << 10))); ASSERT_EQ(2ULL, overlaps.size()); - std::vector ref; - ref.push_back(std::move(scm3)); - ref.push_back(std::move(scm2)); - ASSERT(ref == overlaps); + + ASSERT_EQ(scm2->getShardVersion(), overlaps[1]->getShardVersion()); + ASSERT_EQ(scm3->getShardVersion(), overlaps[0]->getShardVersion()); ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 3UL); ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); // not yet... optNotif = _manager->cleanUpRange(cr1, Date_t{}); + ASSERT(optNotif); ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 3UL); ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL); - } // scm1,2,3 destroyed, refcount of each metadata goes to zero + } + + // At this point scm1,2,3 above are destroyed and the refcount of each metadata goes to zero + ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 0UL); ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL); - ASSERT_FALSE(optNotif->ready()); + ASSERT(!optNotif->ready()); + auto optNotif2 = _manager->trackOrphanedDataCleanup(cr1); // now tracking it in _rangesToClean - ASSERT_TRUE(optNotif && !optNotif->ready()); - ASSERT_TRUE(optNotif2 && !optNotif2->ready()); + ASSERT(optNotif2); + + ASSERT(!optNotif->ready()); + ASSERT(!optNotif2->ready()); ASSERT(*optNotif == *optNotif2); + optNotif->abandon(); optNotif2->abandon(); } TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); - ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 0UL); + + ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); _manager->refreshActiveMetadata( - cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none).getMetadata(), - cr1.getMin(), - cr1.getMax())); + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), cr1)); ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 1UL); } - TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); - const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); - ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 0UL); + ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); + ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); { - _manager->refreshActiveMetadata(cloneMetadataPlusChunk( - *_manager->getActiveMetadata(_manager, boost::none).getMetadata(), - cr1.getMin(), - cr1.getMax())); + _manager->refreshActiveMetadata( + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), cr1)); ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 1UL); } { - _manager->refreshActiveMetadata(cloneMetadataPlusChunk( - *_manager->getActiveMetadata(_manager, boost::none).getMetadata(), - cr2.getMin(), - cr2.getMax())); + _manager->refreshActiveMetadata( + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), cr2)); ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 2UL); } } @@ -260,43 +265,42 @@ TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) { TEST_F(MetadataManagerTest, RefreshAfterNotYetCompletedMigrationMultiplePending) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); - const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); - ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 0UL); + ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); + ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); _manager->refreshActiveMetadata( - cloneMetadataPlusChunk(*_manager->getActiveMetadata(_manager, boost::none).getMetadata(), - BSON("key" << 50), - BSON("key" << 60))); + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), + {BSON("key" << 50), BSON("key" << 60)})); ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 1UL); } TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - const ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); - const ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); - const ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35)); + ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); + ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); - ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 0UL); + _manager->refreshActiveMetadata( + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), cr1)); + _manager->refreshActiveMetadata( + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), cr2)); + + ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35)); } TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - - { - auto metadata = _manager->getActiveMetadata(_manager, boost::none); - _manager->refreshActiveMetadata( - cloneMetadataPlusChunk(*metadata.getMetadata(), BSON("key" << 0), BSON("key" << 10))); - } + _manager->refreshActiveMetadata(cloneMetadataPlusChunk( + _manager->getActiveMetadata(_manager, boost::none), {BSON("key" << 0), BSON("key" << 10)})); // Now, pretend that the collection was dropped and recreated - auto recreateMetadata = makeEmptyMetadata(); + _manager->refreshActiveMetadata(makeEmptyMetadata()); _manager->refreshActiveMetadata( - cloneMetadataPlusChunk(*recreateMetadata, BSON("key" << 20), BSON("key" << 30))); - ASSERT_EQ(_manager->getActiveMetadata(_manager, boost::none)->getChunks().size(), 1UL); + cloneMetadataPlusChunk(_manager->getActiveMetadata(_manager, boost::none), + {BSON("key" << 20), BSON("key" << 30)})); const auto chunks = _manager->getActiveMetadata(_manager, boost::none)->getChunks(); + ASSERT_EQ(1UL, chunks.size()); const auto chunkEntry = chunks.begin(); ASSERT_BSONOBJ_EQ(BSON("key" << 20), chunkEntry->first); ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second); @@ -306,12 +310,14 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) { TEST_F(MetadataManagerTest, RangesToCleanMembership) { _manager->refreshActiveMetadata(makeEmptyMetadata()); - ASSERT(_manager->numberOfRangesToClean() == 0UL); + ChunkRange cr(BSON("key" << 0), BSON("key" << 10)); - ChunkRange cr1 = ChunkRange(BSON("key" << 0), BSON("key" << 10)); - auto notifn = _manager->cleanUpRange(cr1, Date_t{}); + ASSERT_EQ(0UL, _manager->numberOfRangesToClean()); + + auto notifn = _manager->cleanUpRange(cr, Date_t{}); ASSERT(!notifn.ready()); - ASSERT(_manager->numberOfRangesToClean() == 1UL); + ASSERT_EQ(1UL, _manager->numberOfRangesToClean()); + notifn.abandon(); } diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp index e92f24d0468..911792e33bc 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp @@ -116,17 +116,25 @@ protected: return _client.get_ptr(); } + /** + * Inserts the specified docs in 'kNss' and ensures the insert succeeded. + */ + void insertDocsInShardedCollection(const std::vector& docs) { + if (docs.empty()) + return; + + client()->insert(kNss.ns(), docs); + ASSERT_EQ("", client()->getLastError()); + } + /** * Creates a collection, which contains an index corresponding to kShardKeyPattern and insers * the specified initial documents. */ - void createShardedCollection(std::vector initialDocs) { + void createShardedCollection(const std::vector& initialDocs) { ASSERT(_client->createCollection(kNss.ns())); _client->createIndex(kNss.ns(), kShardKeyPattern); - - if (!initialDocs.empty()) { - _client->insert(kNss.ns(), initialDocs); - } + insertDocsInShardedCollection(initialDocs); } /** @@ -232,13 +240,13 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, CorrectDocumentsFetched) { } // Insert some documents in the chunk range to be included for migration - client()->insert(kNss.ns(), createCollectionDocument(150)); - client()->insert(kNss.ns(), createCollectionDocument(151)); + insertDocsInShardedCollection({createCollectionDocument(150)}); + insertDocsInShardedCollection({createCollectionDocument(151)}); // Insert some documents which are outside of the chunk range and should not be included for // migration - client()->insert(kNss.ns(), createCollectionDocument(90)); - client()->insert(kNss.ns(), createCollectionDocument(210)); + insertDocsInShardedCollection({createCollectionDocument(90)}); + insertDocsInShardedCollection({createCollectionDocument(210)}); // Normally the insert above and the onInsert/onDelete callbacks below will happen under the // same lock and write unit of work diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 674213d8d81..791c82f5261 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -1114,7 +1114,7 @@ CollectionShardingState::CleanupNotification MigrationDestinationManager::_noteP // This can currently happen because drops aren't synchronized with in-migrations. The idea for // checking this here is that in the future we shouldn't have this problem. - if (!metadata || metadata->getCollVersion().epoch() != _epoch) { + if (!metadata->isSharded() || metadata->getCollVersion().epoch() != _epoch) { return Status{ErrorCodes::StaleShardVersion, str::stream() << "not noting chunk " << redact(range.toString()) << " as pending because the epoch of " @@ -1145,7 +1145,7 @@ void MigrationDestinationManager::_forgetPending(OperationContext* opCtx, ChunkR // This can currently happen because drops aren't synchronized with in-migrations. The idea for // checking this here is that in the future we shouldn't have this problem. - if (!metadata || metadata->getCollVersion().epoch() != _epoch) { + if (!metadata->isSharded() || metadata->getCollVersion().epoch() != _epoch) { log() << "no need to forget pending chunk " << redact(range.toString()) << " because the epoch for " << _nss.ns() << " changed"; return; diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 7391648af31..e0855bfbf60 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -110,7 +110,7 @@ void refreshRecipientRoutingTable(OperationContext* opCtx, } Status checkCollectionEpochMatches(const ScopedCollectionMetadata& metadata, OID expectedEpoch) { - if (metadata && metadata->getCollVersion().epoch() == expectedEpoch) + if (metadata->isSharded() && metadata->getCollVersion().epoch() == expectedEpoch) return Status::OK(); return {ErrorCodes::IncompatibleShardingMetadata, @@ -118,8 +118,8 @@ Status checkCollectionEpochMatches(const ScopedCollectionMetadata& metadata, OID << "Expected collection epoch: " << expectedEpoch.toString() << ", but found: " - << (metadata ? metadata->getCollVersion().epoch().toString() - : "unsharded collection.")}; + << (metadata->isSharded() ? metadata->getCollVersion().epoch().toString() + : "unsharded collection.")}; } } // namespace @@ -170,7 +170,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, auto metadata = CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx); uassert(ErrorCodes::IncompatibleShardingMetadata, str::stream() << "cannot move chunks for an unsharded collection", - metadata); + metadata->isSharded()); return std::make_tuple(std::move(metadata), std::move(collectionUUID)); }(); @@ -529,7 +529,7 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC return CollectionShardingState::get(opCtx, getNss())->getMetadata(opCtx); }(); - if (!refreshedMetadata) { + if (!refreshedMetadata->isSharded()) { return {ErrorCodes::NamespaceNotSharded, str::stream() << "Chunk move failed because collection '" << getNss().ns() << "' is no longer sharded. The migration commit error was: " diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp index acfd347345c..034eec0b9ae 100644 --- a/src/mongo/db/s/set_shard_version_command.cpp +++ b/src/mongo/db/s/set_shard_version_command.cpp @@ -230,10 +230,12 @@ public: boost::optional collLock; collLock.emplace(opCtx->lockState(), nss.ns(), MODE_IS); - auto css = CollectionShardingState::get(opCtx, nss); - const ChunkVersion collectionShardVersion = - (css->getMetadata(opCtx) ? css->getMetadata(opCtx)->getShardVersion() - : ChunkVersion::UNSHARDED()); + auto const css = CollectionShardingState::get(opCtx, nss); + const ChunkVersion collectionShardVersion = [&] { + auto metadata = css->getMetadata(opCtx); + return metadata->isSharded() ? metadata->getShardVersion() + : ChunkVersion::UNSHARDED(); + }(); if (requestedVersion.isWriteCompatibleWith(collectionShardVersion)) { // MongoS and MongoD agree on what is the collection's shard version @@ -347,9 +349,9 @@ public: AutoGetCollection autoColl(opCtx, nss, MODE_IS); ChunkVersion currVersion = ChunkVersion::UNSHARDED(); - auto collMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx); - if (collMetadata) { - currVersion = collMetadata->getShardVersion(); + auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx); + if (metadata->isSharded()) { + currVersion = metadata->getShardVersion(); } if (!status.isOK()) { diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index 53a63ccc122..ae7d67b8253 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -75,7 +75,7 @@ Status onShardVersionMismatch(OperationContext* opCtx, const auto currentShardVersion = [&] { AutoGetCollection autoColl(opCtx, nss, MODE_IS); const auto currentMetadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx); - if (currentMetadata) { + if (currentMetadata->isSharded()) { return currentMetadata->getShardVersion(); } @@ -129,7 +129,8 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx, auto metadata = CollectionShardingState::get(opCtx, nss)->getMetadata(opCtx); // We already have newer version - if (metadata && metadata->getCollVersion().epoch() == cm->getVersion().epoch() && + if (metadata->isSharded() && + metadata->getCollVersion().epoch() == cm->getVersion().epoch() && metadata->getCollVersion() >= cm->getVersion()) { LOG(1) << "Skipping refresh of metadata for " << nss << " " << metadata->getCollVersion() << " with an older " << cm->getVersion(); @@ -144,7 +145,7 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx, auto metadata = css->getMetadata(opCtx); // We already have newer version - if (metadata && metadata->getCollVersion().epoch() == cm->getVersion().epoch() && + if (metadata->isSharded() && metadata->getCollVersion().epoch() == cm->getVersion().epoch() && metadata->getCollVersion() >= cm->getVersion()) { LOG(1) << "Skipping refresh of metadata for " << nss << " " << metadata->getCollVersion() << " with an older " << cm->getVersion(); diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp index 75c533f34cf..9bf83e498f3 100644 --- a/src/mongo/db/s/shard_server_op_observer.cpp +++ b/src/mongo/db/s/shard_server_op_observer.cpp @@ -214,7 +214,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx, } } - if (metadata) { + if (metadata->isSharded()) { incrementChunkOnInsertOrUpdate( opCtx, nss, *metadata->getChunkManager(), insertedDoc, insertedDoc.objsize()); } @@ -311,7 +311,7 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE } } - if (metadata) { + if (metadata->isSharded()) { incrementChunkOnInsertOrUpdate(opCtx, args.nss, *metadata->getChunkManager(), @@ -430,7 +430,8 @@ ShardObserverDeleteState ShardObserverDeleteState::make(OperationContext* opCtx, CollectionShardingState* css, const BSONObj& docToDelete) { auto msm = MigrationSourceManager::get(css); - return {css->getMetadata(opCtx).extractDocumentKey(docToDelete).getOwned(), + auto metadata = css->getMetadata(opCtx); + return {metadata->extractDocumentKey(docToDelete).getOwned(), msm && msm->getCloner()->isDocumentInMigratingChunk(docToDelete)}; } diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index 0b7e287a774..12470da163a 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -103,7 +103,7 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx, uassert(ErrorCodes::StaleEpoch, str::stream() << "Collection " << nss.ns() << " became unsharded", - metadataAfterSplit); + metadataAfterSplit->isSharded()); auto newChunkBounds(splitKeys); auto startKey = chunkRange.getMin(); -- cgit v1.2.1