diff options
Diffstat (limited to 'src/mongo')
-rw-r--r-- | src/mongo/db/catalog/rename_collection.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/commands/dbcommands.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/commands/distinct.cpp | 15 | ||||
-rw-r--r-- | src/mongo/db/exec/update_stage.cpp | 8 | ||||
-rw-r--r-- | src/mongo/db/exec/upsert_stage.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/index_builds_coordinator.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/op_observer_impl.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/query/get_executor.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/cleanup_orphaned_cmd.cpp | 12 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_runtime.cpp | 10 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_runtime.h | 2 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_runtime_test.cpp | 104 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_state.h | 8 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_state_factory_embedded.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/s/collection_sharding_state_factory_standalone.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/s/database_sharding_state.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/s/migration_util.cpp | 32 |
17 files changed, 119 insertions, 95 deletions
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index e87efd91d74..ac0d9bcdd2d 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -73,7 +73,7 @@ boost::optional<NamespaceString> getNamespaceFromUUID(OperationContext* opCtx, c bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) { return opCtx->writesAreReplicated() && - CollectionShardingState::get(opCtx, nss)->getCollectionDescription().isSharded(); + CollectionShardingState::get(opCtx, nss)->getCollectionDescription(opCtx).isSharded(); } // From a replicated to an unreplicated collection or vice versa. diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 514dc83c026..49df53c4224 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -458,7 +458,8 @@ public: AutoGetCollectionForReadCommand ctx(opCtx, nss); Collection* collection = ctx.getCollection(); - const auto collDesc = CollectionShardingState::get(opCtx, nss)->getCollectionDescription(); + const auto collDesc = + CollectionShardingState::get(opCtx, nss)->getCollectionDescription(opCtx); if (collDesc.isSharded()) { const ShardKeyPattern shardKeyPattern(collDesc.getKeyPattern()); diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index 7854a3445bf..09451fa653d 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -195,13 +195,14 @@ public: // Distinct doesn't filter orphan documents so it is not allowed to run on sharded // collections in multi-document transactions. - uassert( - ErrorCodes::OperationNotSupportedInTransaction, - "Cannot run 'distinct' on a sharded collection in a multi-document transaction. " - "Please see http://dochub.mongodb.org/core/transaction-distinct for a recommended " - "alternative.", - !opCtx->inMultiDocumentTransaction() || - !CollectionShardingState::get(opCtx, nss)->getCollectionDescription().isSharded()); + uassert(ErrorCodes::OperationNotSupportedInTransaction, + "Cannot run 'distinct' on a sharded collection in a multi-document transaction. " + "Please see http://dochub.mongodb.org/core/transaction-distinct for a recommended " + "alternative.", + !opCtx->inMultiDocumentTransaction() || + !CollectionShardingState::get(opCtx, nss) + ->getCollectionDescription(opCtx) + .isSharded()); const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); auto defaultCollation = diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp index ab30f56867f..dd7f405115e 100644 --- a/src/mongo/db/exec/update_stage.cpp +++ b/src/mongo/db/exec/update_stage.cpp @@ -181,8 +181,8 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco const bool isInsert = false; FieldRefSet immutablePaths; if (isUserInitiatedWrite) { - const auto collDesc = - CollectionShardingState::get(opCtx(), collection()->ns())->getCollectionDescription(); + const auto collDesc = CollectionShardingState::get(opCtx(), collection()->ns()) + ->getCollectionDescription(opCtx()); if (collDesc.isSharded() && !OperationShardingState::isOperationVersioned(opCtx())) { immutablePaths.fillFrom(collDesc.getKeyPatternFields()); } @@ -255,7 +255,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco args.update = logObj; if (isUserInitiatedWrite) { args.criteria = CollectionShardingState::get(opCtx(), collection()->ns()) - ->getCollectionDescription() + ->getCollectionDescription(opCtx()) .extractDocumentKey(newObj); } else { const auto docId = newObj[idFieldName]; @@ -705,7 +705,7 @@ PlanStage::StageState UpdateStage::prepareToRetryWSM(WorkingSetID idToRetry, Wor bool UpdateStage::checkUpdateChangesShardKeyFields(const Snapshotted<BSONObj>& oldObj) { auto* const css = CollectionShardingState::get(opCtx(), collection()->ns()); - const auto collDesc = css->getCollectionDescription(); + const auto collDesc = css->getCollectionDescription(opCtx()); if (!collDesc.isSharded()) { return false; } diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp index a157ca9b762..dc81610bb96 100644 --- a/src/mongo/db/exec/upsert_stage.cpp +++ b/src/mongo/db/exec/upsert_stage.cpp @@ -175,7 +175,7 @@ BSONObj UpsertStage::_produceNewDocumentForInsert() { if (!isInternalRequest) { optCollDesc.emplace( CollectionShardingState::get(opCtx(), _params.request->getNamespaceString()) - ->getCollectionDescription()); + ->getCollectionDescription(opCtx())); // If the collection is sharded, add all fields from the shard key to the 'shardKeyPaths' // set. diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index 8cb25a3c15b..1d11dead00c 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -90,7 +90,7 @@ void checkShardKeyRestrictions(OperationContext* opCtx, const BSONObj& newIdxKey) { UncommittedCollections::get(opCtx).invariantHasExclusiveAccessToCollection(opCtx, nss); - const auto collDesc = CollectionShardingState::get(opCtx, nss)->getCollectionDescription(); + const auto collDesc = CollectionShardingState::get(opCtx, nss)->getCollectionDescription(opCtx); if (!collDesc.isSharded()) return; diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp index 8735e82e287..6c755ee2390 100644 --- a/src/mongo/db/op_observer_impl.cpp +++ b/src/mongo/db/op_observer_impl.cpp @@ -260,7 +260,7 @@ OpObserverImpl::DocumentKey OpObserverImpl::getDocumentKey(OperationContext* opC // if running on standalone or primary. Skip this completely on secondaries since they are // not expected to have the collection metadata cached. if (opCtx->writesAreReplicated()) { - auto collDesc = CollectionShardingState::get(opCtx, nss)->getCollectionDescription(); + auto collDesc = CollectionShardingState::get(opCtx, nss)->getCollectionDescription(opCtx); if (collDesc.isSharded()) { shardKey = dotted_path_support::extractElementsBasedOnTemplate(doc, collDesc.getKeyPattern()) diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 9a512acafdb..b1bae642936 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -287,8 +287,8 @@ void fillOutPlannerParams(OperationContext* opCtx, // If the caller wants a shard filter, make sure we're actually sharded. if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) { - auto collDesc = - CollectionShardingState::get(opCtx, canonicalQuery->nss())->getCollectionDescription(); + auto collDesc = CollectionShardingState::get(opCtx, canonicalQuery->nss()) + ->getCollectionDescription(opCtx); if (collDesc.isSharded()) { plannerParams->shardKey = collDesc.getKeyPattern(); } else { diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp index 97b78e600ba..4278d442a5f 100644 --- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp +++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp @@ -85,9 +85,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx, } collectionUuid.emplace(autoColl.getCollection()->uuid()); - auto* const css = CollectionShardingRuntime::get(opCtx, ns); - const auto collDesc = css->getCollectionDescription(); - if (!collDesc.isSharded()) { + auto* const csr = CollectionShardingRuntime::get(opCtx, ns); + const auto optCollDescr = csr->getCurrentMetadataIfKnown(); + if (!optCollDescr || !optCollDescr->isSharded()) { LOGV2(4416001, "cleanupOrphaned skipping waiting for orphaned data cleanup because " "{namespace} is not sharded", @@ -96,13 +96,13 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx, "namespace"_attr = ns.ns()); return CleanupResult::kDone; } - range.emplace(collDesc.getMinKey(), collDesc.getMaxKey()); + range.emplace(optCollDescr->getMinKey(), optCollDescr->getMaxKey()); // Though the 'startingFromKey' parameter is not used as the min key of the range to // wait for, we still validate that 'startingFromKey' in the same way as the original // cleanupOrphaned logic did if 'startingFromKey' is present. - BSONObj keyPattern = collDesc.getKeyPattern(); - if (!startingFromKeyConst.isEmpty() && !collDesc.isValidKey(startingFromKeyConst)) { + BSONObj keyPattern = optCollDescr->getKeyPattern(); + if (!startingFromKeyConst.isEmpty() && !optCollDescr->isValidKey(startingFromKeyConst)) { LOGV2_ERROR_OPTIONS( 4416002, {logv2::UserAssertAfterLog(ErrorCodes::OrphanedRangeCleanUpFailed)}, diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp index 92722a29439..54b7cce9178 100644 --- a/src/mongo/db/s/collection_sharding_runtime.cpp +++ b/src/mongo/db/s/collection_sharding_runtime.cpp @@ -123,10 +123,14 @@ ScopedCollectionFilter CollectionShardingRuntime::getOwnershipFilter( repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime()); } -ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription() { +ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription( + OperationContext* opCtx) { + auto& oss = OperationShardingState::get(opCtx); // If the server has been started with --shardsvr, but hasn't been added to a cluster we should - // consider all collections as unsharded. - if (!ShardingState::get(_serviceContext)->enabled()) { + // consider all collections as unsharded. Also, return unsharded if no shard version or db + // version is present on the context. + if (!ShardingState::get(_serviceContext)->enabled() || + (!oss.hasShardVersion() && !oss.hasDbVersion())) { return {kUnshardedCollection}; } diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h index 45511c3a19a..6908536c817 100644 --- a/src/mongo/db/s/collection_sharding_runtime.h +++ b/src/mongo/db/s/collection_sharding_runtime.h @@ -87,7 +87,7 @@ public: ScopedCollectionFilter getOwnershipFilter(OperationContext* opCtx, OrphanCleanupPolicy orphanCleanupPolicy) override; - ScopedCollectionDescription getCollectionDescription() override; + ScopedCollectionDescription getCollectionDescription(OperationContext* opCtx) override; ScopedCollectionDescription getCollectionDescription_DEPRECATED() override; void checkShardVersionOrThrow(OperationContext* opCtx) override; diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp index 47d0edc786a..c20f65db9d0 100644 --- a/src/mongo/db/s/collection_sharding_runtime_test.cpp +++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp @@ -33,6 +33,7 @@ #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/wait_for_majority_service.h" #include "mongo/s/shard_server_test_fixture.h" #include "mongo/util/fail_point.h" @@ -46,20 +47,31 @@ const BSONObj kShardKeyPattern = BSON(kShardKey << 1); using CollectionShardingRuntimeTest = ShardServerTestFixture; -CollectionMetadata makeShardedMetadata(UUID uuid = UUID::gen()) { +CollectionMetadata makeShardedMetadata(OperationContext* opCtx, UUID uuid = UUID::gen()) { const OID epoch = OID::gen(); auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)); auto chunk = ChunkType(kTestNss, std::move(range), ChunkVersion(1, 0, epoch), ShardId("other")); auto rt = RoutingTableHistory::makeNew( kTestNss, uuid, kShardKeyPattern, nullptr, false, epoch, {std::move(chunk)}); std::shared_ptr<ChunkManager> cm = std::make_shared<ChunkManager>(rt, boost::none); - return CollectionMetadata(std::move(cm), ShardId("this")); + + auto& oss = OperationShardingState::get(opCtx); + if (!oss.hasShardVersion()) { + const auto version = cm->getVersion(ShardId("0")); + BSONObjBuilder builder; + version.appendToCommand(&builder); + oss.initializeClientRoutingVersionsFromCommand(kTestNss, builder.obj()); + } + return CollectionMetadata(std::move(cm), ShardId("0")); } TEST_F(CollectionShardingRuntimeTest, - GetCollectionDescriptionThrowsStaleConfigBeforeSetFilteringMetadataIsCalled) { + GetCollectionDescriptionThrowsStaleConfigBeforeSetFilteringMetadataIsCalledAndNoOSSSet) { + OperationContext* opCtx = operationContext(); CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); - ASSERT_THROWS_CODE(csr.getCollectionDescription(), DBException, ErrorCodes::StaleConfig); + ASSERT_FALSE(csr.getCollectionDescription(opCtx).isSharded()); + makeShardedMetadata(opCtx); + ASSERT_THROWS_CODE(csr.getCollectionDescription(opCtx), DBException, ErrorCodes::StaleConfig); } TEST_F( @@ -67,14 +79,15 @@ TEST_F( GetCollectionDescriptionReturnsUnshardedAfterSetFilteringMetadataIsCalledWithUnshardedMetadata) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); csr.setFilteringMetadata(operationContext(), CollectionMetadata()); - ASSERT_FALSE(csr.getCollectionDescription().isSharded()); + ASSERT_FALSE(csr.getCollectionDescription(operationContext()).isSharded()); } TEST_F(CollectionShardingRuntimeTest, GetCollectionDescriptionReturnsShardedAfterSetFilteringMetadataIsCalledWithShardedMetadata) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); - csr.setFilteringMetadata(operationContext(), makeShardedMetadata()); - ASSERT_TRUE(csr.getCollectionDescription().isSharded()); + OperationContext* opCtx = operationContext(); + csr.setFilteringMetadata(opCtx, makeShardedMetadata(opCtx)); + ASSERT_TRUE(csr.getCollectionDescription(opCtx).isSharded()); } TEST_F(CollectionShardingRuntimeTest, @@ -100,8 +113,9 @@ TEST_F( GetCurrentMetadataIfKnownReturnsShardedAfterSetFilteringMetadataIsCalledWithShardedMetadata) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); - auto metadata = makeShardedMetadata(); - csr.setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx); + csr.setFilteringMetadata(opCtx, metadata); const auto optCurrMetadata = csr.getCurrentMetadataIfKnown(); ASSERT_TRUE(optCurrMetadata); ASSERT_TRUE(optCurrMetadata->isSharded()); @@ -111,7 +125,8 @@ TEST_F( TEST_F(CollectionShardingRuntimeTest, GetCurrentMetadataIfKnownReturnsNoneAfterClearFilteringMetadataIsCalled) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); - csr.setFilteringMetadata(operationContext(), makeShardedMetadata()); + OperationContext* opCtx = operationContext(); + csr.setFilteringMetadata(opCtx, makeShardedMetadata(opCtx)); csr.clearFilteringMetadata(); ASSERT_FALSE(csr.getCurrentMetadataIfKnown()); } @@ -119,12 +134,13 @@ TEST_F(CollectionShardingRuntimeTest, TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithSameUUIDKeepsSameMetadataManager) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 0); - auto metadata = makeShardedMetadata(); - csr.setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx); + csr.setFilteringMetadata(opCtx, metadata); // Should create a new MetadataManager object, bumping the count to 1. ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 1); // Set it again. - csr.setFilteringMetadata(operationContext(), metadata); + csr.setFilteringMetadata(opCtx, metadata); // Should not have reset metadata, so the counter should still be 1. ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 1); } @@ -132,18 +148,19 @@ TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithSameUUIDKeepsSameM TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithDifferentUUIDReplacesPreviousMetadataManager) { CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); - - auto metadata = makeShardedMetadata(); - csr.setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx); + csr.setFilteringMetadata(opCtx, metadata); ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 1); // Set it again with a different metadata object (UUID is generated randomly in // makeShardedMetadata()). - auto newMetadata = makeShardedMetadata(); - csr.setFilteringMetadata(operationContext(), newMetadata); + auto newMetadata = makeShardedMetadata(opCtx); + csr.setFilteringMetadata(opCtx, newMetadata); ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 2); - ASSERT(csr.getCollectionDescription().uuidMatches(*newMetadata.getChunkManager()->getUUID())); + ASSERT( + csr.getCollectionDescription(opCtx).uuidMatches(*newMetadata.getChunkManager()->getUUID())); } /** @@ -206,12 +223,13 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, WaitForCleanReturnsErrorIfCollectionUUIDDoesNotMatchFilteringMetadata) { - auto metadata = makeShardedMetadata(uuid()); - csr().setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx, uuid()); + csr().setFilteringMetadata(opCtx, metadata); auto randomUuid = UUID::gen(); auto status = CollectionShardingRuntime::waitForClean( - operationContext(), + opCtx, kTestNss, randomUuid, ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); @@ -220,14 +238,12 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, WaitForCleanReturnsOKIfNoDeletionsAreScheduled) { - auto metadata = makeShardedMetadata(uuid()); - csr().setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx, uuid()); + csr().setFilteringMetadata(opCtx, metadata); auto status = CollectionShardingRuntime::waitForClean( - operationContext(), - kTestNss, - uuid(), - ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); + opCtx, kTestNss, uuid(), ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); ASSERT_OK(status); } @@ -236,21 +252,19 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, WaitForCleanBlocksBehindOneScheduledDeletion) { // Enable fail point to suspendRangeDeletion. globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::alwaysOn); + OperationContext* opCtx = operationContext(); - auto metadata = makeShardedMetadata(uuid()); - csr().setFilteringMetadata(operationContext(), metadata); + auto metadata = makeShardedMetadata(opCtx, uuid()); + csr().setFilteringMetadata(opCtx, metadata); auto cleanupComplete = csr().cleanUpRange(ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)), boost::none, CollectionShardingRuntime::CleanWhen::kNow); - operationContext()->setDeadlineAfterNowBy(Milliseconds(100), ErrorCodes::MaxTimeMSExpired); + opCtx->setDeadlineAfterNowBy(Milliseconds(100), ErrorCodes::MaxTimeMSExpired); auto status = CollectionShardingRuntime::waitForClean( - operationContext(), - kTestNss, - uuid(), - ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); + opCtx, kTestNss, uuid(), ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); ASSERT_EQ(status.code(), ErrorCodes::MaxTimeMSExpired); @@ -260,8 +274,9 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, WaitForCleanBlocksBehindAllScheduledDeletions) { - auto metadata = makeShardedMetadata(uuid()); - csr().setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx, uuid()); + csr().setFilteringMetadata(opCtx, metadata); const auto middleKey = 5; @@ -276,10 +291,7 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, CollectionShardingRuntime::CleanWhen::kNow); auto status = CollectionShardingRuntime::waitForClean( - operationContext(), - kTestNss, - uuid(), - ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); + opCtx, kTestNss, uuid(), ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); // waitForClean should block until both cleanup tasks have run. This is a best-effort check, // since even if it did not block, it is possible that the cleanup tasks could complete before @@ -292,8 +304,9 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, WaitForCleanReturnsOKAfterSuccessfulDeletion) { - auto metadata = makeShardedMetadata(uuid()); - csr().setFilteringMetadata(operationContext(), metadata); + OperationContext* opCtx = operationContext(); + auto metadata = makeShardedMetadata(opCtx, uuid()); + csr().setFilteringMetadata(opCtx, metadata); auto cleanupComplete = csr().cleanUpRange(ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)), @@ -301,10 +314,7 @@ TEST_F(CollectionShardingRuntimeWithRangeDeleterTest, CollectionShardingRuntime::CleanWhen::kNow); auto status = CollectionShardingRuntime::waitForClean( - operationContext(), - kTestNss, - uuid(), - ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); + opCtx, kTestNss, uuid(), ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY))); ASSERT_OK(status); ASSERT(cleanupComplete.isReady()); diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h index 00f10d6761e..4e79c7d172e 100644 --- a/src/mongo/db/s/collection_sharding_state.h +++ b/src/mongo/db/s/collection_sharding_state.h @@ -87,9 +87,11 @@ public: * If the shard currently doesn't know whether the collection is sharded or not, it will throw * StaleShardVersion. * + * If the request doesn't have a shard version all collections will be treated as UNSHARDED. + * * The returned object *is not safe* to access after the collection lock has been dropped. */ - virtual ScopedCollectionDescription getCollectionDescription() = 0; + virtual ScopedCollectionDescription getCollectionDescription(OperationContext* opCtx) = 0; // TODO (SERVER-32198): This method must not be used in any new code because it does not provide // the necessary guarantees that getCollectionDescription above does. Specifically, it silently @@ -113,6 +115,8 @@ public: * destroyed. The intended users of this mode are read operations, which need to yield the * collection lock, but still perform filtering. * + * If the request doesn't have a shard version all collections will be treated as UNSHARDED. + * * Use 'getCollectionDescription' for other cases, like obtaining information about * sharding-related properties of the collection are necessary that won't change under * collection IX/IS lock (e.g., isSharded or the shard key). @@ -127,6 +131,8 @@ public: * Checks whether the shard version in the operation context is compatible with the shard * version of the collection and if not, throws StaleConfigException populated with the received * and wanted versions. + * + * If the request is not versioned all collections will be treated as UNSHARDED. */ virtual void checkShardVersionOrThrow(OperationContext* opCtx) = 0; diff --git a/src/mongo/db/s/collection_sharding_state_factory_embedded.cpp b/src/mongo/db/s/collection_sharding_state_factory_embedded.cpp index e05f9a1ac94..eb1ac0e9a30 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_embedded.cpp +++ b/src/mongo/db/s/collection_sharding_state_factory_embedded.cpp @@ -53,7 +53,7 @@ const auto kUnshardedCollection = std::make_shared<UnshardedCollection>(); class CollectionShardingStateEmbedded final : public CollectionShardingState { public: - ScopedCollectionDescription getCollectionDescription() override { + ScopedCollectionDescription getCollectionDescription(OperationContext* opCtx) override { return {kUnshardedCollection}; } ScopedCollectionDescription getCollectionDescription_DEPRECATED() override { diff --git a/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp b/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp index 47ff950444f..d3c81b7a5cc 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp +++ b/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp @@ -52,7 +52,7 @@ const auto kUnshardedCollection = std::make_shared<UnshardedCollection>(); class CollectionShardingStateStandalone final : public CollectionShardingState { public: - ScopedCollectionDescription getCollectionDescription() override { + ScopedCollectionDescription getCollectionDescription(OperationContext* opCtx) override { return {kUnshardedCollection}; } ScopedCollectionDescription getCollectionDescription_DEPRECATED() override { diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp index 99b81682df7..f7c31837fcb 100644 --- a/src/mongo/db/s/database_sharding_state.cpp +++ b/src/mongo/db/s/database_sharding_state.cpp @@ -147,10 +147,10 @@ void DatabaseShardingState::checkDbVersion(OperationContext* opCtx, DSSLock&) co } uassert(StaleDbRoutingVersion(_dbName, *clientDbVersion, boost::none), - "don't know dbVersion", + str::stream() << "don't know dbVersion for database " << _dbName, _dbVersion); uassert(StaleDbRoutingVersion(_dbName, *clientDbVersion, *_dbVersion), - "dbVersion mismatch", + str::stream() << "dbVersion mismatch for database " << _dbName, databaseVersion::equal(*clientDbVersion, *_dbVersion)); } diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp index 57bbe3f635a..ea0ed0fa467 100644 --- a/src/mongo/db/s/migration_util.cpp +++ b/src/mongo/db/s/migration_util.cpp @@ -280,11 +280,12 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx, return ExecutorFuture<void>(getMigrationUtilExecutor()) .then([=] { auto deletionTaskUuidMatchesFilteringMetadataUuid = - [](CollectionShardingRuntime* csr, const RangeDeletionTask& deletionTask) { - return csr->getCurrentMetadataIfKnown() && - csr->getCollectionDescription().isSharded() && - csr->getCollectionDescription().uuidMatches( - deletionTask.getCollectionUuid()); + [](OperationContext* opCtx, + CollectionShardingRuntime* csr, + const RangeDeletionTask& deletionTask) { + auto optCollDescr = csr->getCurrentMetadataIfKnown(); + return optCollDescr && optCollDescr->isSharded() && + optCollDescr->uuidMatches(deletionTask.getCollectionUuid()); }; ThreadClient tc(kRangeDeletionThreadName, serviceContext); @@ -307,22 +308,22 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx, boost::optional<AutoGetCollection> autoColl; autoColl.emplace(opCtx, deletionTask.getNss(), MODE_IS); auto csr = CollectionShardingRuntime::get(opCtx, deletionTask.getNss()); - if (!deletionTaskUuidMatchesFilteringMetadataUuid(csr, deletionTask)) { + if (!deletionTaskUuidMatchesFilteringMetadataUuid(opCtx, csr, deletionTask)) { // If the collection's filtering metadata is not known, is unsharded, or its // UUID does not match the UUID of the deletion task, force a filtering metadata // refresh once, because this node may have just stepped up and therefore may // have a stale cache. + auto optCollDescr = csr->getCurrentMetadataIfKnown(); LOGV2(22024, "Filtering metadata for this range deletion task may be outdated; " "forcing refresh", "deletionTask"_attr = redact(deletionTask.toBSON()), "error"_attr = - (csr->getCurrentMetadataIfKnown() - ? (csr->getCollectionDescription().isSharded() - ? "Collection has UUID that does not match UUID " - "of the deletion task" - : "Collection is unsharded") - : "Collection's sharding state is not known"), + (optCollDescr ? (optCollDescr->isSharded() + ? "Collection has UUID that does not match " + "UUID of the deletion task" + : "Collection is unsharded") + : "Collection's sharding state is not known"), "namespace"_attr = deletionTask.getNss(), "migrationId"_attr = deletionTask.getId()); @@ -335,16 +336,17 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx, auto csr = CollectionShardingRuntime::get(opCtx, deletionTask.getNss()); // Keep the collection metadata from changing for the rest of this scope. auto csrLock = CollectionShardingRuntime::CSRLock::lockShared(opCtx, csr); + auto optCollDescr = csr->getCurrentMetadataIfKnown(); uassert(ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist, str::stream() << "Even after forced refresh, filtering metadata for namespace in " "deletion task " - << (csr->getCurrentMetadataIfKnown() - ? (csr->getCollectionDescription().isSharded() + << (optCollDescr + ? (optCollDescr->isSharded() ? " has UUID that does not match UUID of the deletion task" : " is unsharded") : " is not known"), - deletionTaskUuidMatchesFilteringMetadataUuid(csr, deletionTask)); + deletionTaskUuidMatchesFilteringMetadataUuid(opCtx, csr, deletionTask)); LOGV2(22026, "Submitting range deletion task", |