diff options
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/catalog/sharding_catalog_client_impl.cpp | 132 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache.cpp | 14 | ||||
-rw-r--r-- | src/mongo/s/catalog_cache_refresh_test.cpp | 16 |
3 files changed, 20 insertions, 142 deletions
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index 7b0c7cc57ad..c3b92012b0e 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -157,16 +157,12 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC stages.emplace_back(DocumentSourceMatch::create( Doc{{CollectionType::kNssFieldName, nss.toString()}}.toBson(), expCtx)); - // 2. Four $unionWith stages, each one of them guarded by a mutually exclusive condition on - // metadata format ('timestamp' exists) and whether the refresh is incremental ('lastmodEpoch' - // matches sinceVersion.epoch), so that only a single one of them will possibly execute their - // $lookup stage. This is necessary because the query optimizer is not able to use indexes when - // a $match inside a $lookup includes a $cond operator. Also note that depending on the metadata - // format (indicated by the presence of 'timestamp'), we have different guarantees about what - // indexes are present (ns_1_lastmod_1 or uuid_1_lastmod_1), so we must avoid possibly executing - // a $lookup that would need an inexistent index, even if it was to return empty results. + // 2. Two $unionWith stages guarded by a mutually exclusive condition on whether the refresh is + // incremental ('lastmodEpoch' matches sinceVersion.epoch), so that only a single one of them + // will possibly execute their $lookup stage. This is necessary because the query optimizer is + // not able to use indexes when a $match inside a $lookup includes a $cond operator. // - // The $lookup stages get the config.chunks documents according to the metadata format and the + // The $lookup stages get the config.chunks documents according to the // type of refresh (incremental or full), sorted by ascending 'lastmod'. The $lookup is // immediately followed by $unwind to take advantage of the $lookup + $unwind coalescence // optimization which avoids creating large intermediate documents. @@ -183,47 +179,6 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC // coll: "collections", // pipeline: [ // { $match: { _id: <nss> } }, - // { $match: { timestamp: { $exists: 0 } } }, - // { $match: { lastmodEpoch: <sinceVersion.epoch> } }, - // { - // $lookup: { - // from: "chunks", - // as: "chunks", - // let: { local_ns: "$_id" }, - // pipeline: [ - // { - // $match: { - // $expr: { - // $eq: ["$ns", "$$local_ns"], - // }, - // } - // }, - // { $match: { lastmod: { $gte: <sinceVersion> } } }, - // { - // $sort: { - // lastmod: 1 - // } - // } - // ] - // } - // }, - // { - // $unwind: { - // path: "$chunks" - // } - // }, - // { - // $project: { _id: false, chunks: true } - // } - // ] - // } - // }, - // { - // $unionWith: { - // coll: "collections", - // pipeline: [ - // { $match: { _id: <nss> } }, - // { $match: { timestamp: { $exists: 1 } } }, // { $match: { lastmodEpoch: <sinceVersion.epoch> } }, // { // $lookup: { @@ -263,46 +218,6 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC // coll: "collections", // pipeline: [ // { $match: { _id: <nss> } }, - // { $match: { timestamp: { $exists: 0 } } }, - // { $match: { lastmodEpoch: { $ne: <sinceVersion.epoch> } } }, - // { - // $lookup: { - // from: "chunks", - // as: "chunks", - // let: { local_ns: "$_id" }, - // pipeline: [ - // { - // $match: { - // $expr: { - // $eq: ["$ns", "$$local_ns"], - // }, - // } - // }, - // { - // $sort: { - // lastmod: 1 - // } - // } - // ] - // } - // }, - // { - // $unwind: { - // path: "$chunks" - // } - // }, - // { - // $project: { _id: false, chunks: true } - // }, - // ] - // } - // }, - // { - // $unionWith: { - // coll: "collections", - // pipeline: [ - // { $match: { _id: <nss> } }, - // { $match: { timestamp: { $exists: 1 } } }, // { $match: { lastmodEpoch: { $ne: <sinceVersion.epoch> } } }, // { // $lookup: { @@ -336,15 +251,13 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC // ] // } // } - const auto buildUnionWithFn = [&](bool withUUID, bool incremental) { + const auto buildUnionWithFn = [&](bool incremental) { const auto lastmodEpochMatch = Doc{{incremental ? "$eq" : "$ne", sinceVersion.epoch()}}; - const auto letExpr = withUUID ? Doc{{"local_uuid", "$" + CollectionType::kUuidFieldName}} - : Doc{{"local_ns", "$" + CollectionType::kNssFieldName}}; + const auto letExpr = Doc{{"local_uuid", "$" + CollectionType::kUuidFieldName}}; - const auto eqNsOrUuidExpr = withUUID - ? Arr{Value{"$" + ChunkType::collectionUUID.name()}, Value{"$$local_uuid"_sd}} - : Arr{Value{"$" + ChunkType::ns.name()}, Value{"$$local_ns"_sd}}; + const auto uuidExpr = + Arr{Value{"$" + ChunkType::collectionUUID.name()}, Value{"$$local_uuid"_sd}}; constexpr auto chunksLookupOutputFieldName = "chunks"_sd; @@ -354,7 +267,7 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC {"as", chunksLookupOutputFieldName}, {"let", letExpr}, {"pipeline", - Arr{Value{Doc{{"$match", Doc{{"$expr", Doc{{"$eq", eqNsOrUuidExpr}}}}}}}, + Arr{Value{Doc{{"$match", Doc{{"$expr", Doc{{"$eq", uuidExpr}}}}}}}, incremental ? Value{Doc{{"$match", Doc{{ChunkType::lastmod.name(), @@ -367,9 +280,6 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC {"coll", CollectionType::ConfigNS.coll()}, {"pipeline", Arr{Value{Doc{{"$match", Doc{{CollectionType::kNssFieldName, nss.toString()}}}}}, - Value{ - Doc{{"$match", - Doc{{CollectionType::kTimestampFieldName, Doc{{"$exists", withUUID}}}}}}}, Value{Doc{{"$match", Doc{{CollectionType::kEpochFieldName, lastmodEpochMatch}}}}}, Value{Doc{{"$lookup", lookupPipeline}}}, Value{Doc{{"$unwind", Doc{{"path", "$" + chunksLookupOutputFieldName}}}}}, @@ -377,30 +287,12 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC {"$project", Doc{{"_id", false}, {chunksLookupOutputFieldName, true}}}}}}}}; }; - // TODO SERVER-53283: Once 5.0 has branched out this stage is no longer necessary. - stages.emplace_back(DocumentSourceUnionWith::createFromBson( - Doc{{"$unionWith", buildUnionWithFn(false /* withUUID */, true /* incremental */)}} - .toBson() - .firstElement(), - expCtx)); - - stages.emplace_back(DocumentSourceUnionWith::createFromBson( - Doc{{"$unionWith", buildUnionWithFn(true /* withUUID */, true /* incremental */)}} - .toBson() - .firstElement(), - expCtx)); - - // TODO SERVER-53283: Once 5.0 has branched out this stage is no longer necessary. stages.emplace_back(DocumentSourceUnionWith::createFromBson( - Doc{{"$unionWith", buildUnionWithFn(false /* withUUID */, false /* incremental */)}} - .toBson() - .firstElement(), + Doc{{"$unionWith", buildUnionWithFn(true /* incremental */)}}.toBson().firstElement(), expCtx)); stages.emplace_back(DocumentSourceUnionWith::createFromBson( - Doc{{"$unionWith", buildUnionWithFn(true /* withUUID */, false /* incremental */)}} - .toBson() - .firstElement(), + Doc{{"$unionWith", buildUnionWithFn(false /* incremental */)}}.toBson().firstElement(), expCtx)); auto pipeline = Pipeline::create(std::move(stages), expCtx); diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 9957a4e3a32..eed8c08e7af 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -219,20 +219,6 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionRoutingInfoAt( if (acquireTries == kMaxInconsistentRoutingInfoRefreshAttempts) { return ex.toStatus(); } - } catch (ExceptionFor<ErrorCodes::QueryPlanKilled>& ex) { - // TODO SERVER-53283: Remove once 5.0 has branched out. - // This would happen when the query to config.chunks is killed because the index it - // relied on has been dropped while the query was ongoing. - LOGV2_FOR_CATALOG_REFRESH(5310503, - 0, - "Collection refresh failed", - "namespace"_attr = nss, - "exception"_attr = redact(ex)); - _stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros()); - acquireTries++; - if (acquireTries == kMaxInconsistentRoutingInfoRefreshAttempts) { - return ex.toStatus(); - } } catch (ExceptionForCat<ErrorCategory::SnapshotError>& ex) { LOGV2_FOR_CATALOG_REFRESH(5487402, 0, diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp index f37436cecf9..6a527696088 100644 --- a/src/mongo/s/catalog_cache_refresh_test.cpp +++ b/src/mongo/s/catalog_cache_refresh_test.cpp @@ -633,10 +633,10 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft const auto& pipeline = aggRequest.getPipeline(); ASSERT_BSONOBJ_EQ( - pipeline[1]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[1]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$eq" << oldVersion.epoch())); ASSERT_BSONOBJ_EQ( - pipeline[3]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[2]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$ne" << oldVersion.epoch())); const auto collBSON = @@ -671,10 +671,10 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft const auto& pipeline = aggRequest.getPipeline(); ASSERT_BSONOBJ_EQ( - pipeline[1]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[1]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$eq" << oldVersion.epoch())); ASSERT_BSONOBJ_EQ( - pipeline[3]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[2]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$ne" << oldVersion.epoch())); const auto collBSON = getDefaultCollectionType(newEpoch, shardKeyPattern).toBSON(); @@ -733,10 +733,10 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) { const auto& pipeline = aggRequest.getPipeline(); ASSERT_BSONOBJ_EQ( - pipeline[1]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[1]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$eq" << oldVersion.epoch())); ASSERT_BSONOBJ_EQ( - pipeline[3]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[2]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$ne" << oldVersion.epoch())); const auto collBSON = @@ -789,13 +789,13 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) { ASSERT_BSONOBJ_EQ( pipeline[1]["$unionWith"]["pipeline"] - .Array()[3]["$lookup"]["pipeline"] + .Array()[2]["$lookup"]["pipeline"] .Array()[1]["$match"]["lastmod"] .Obj(), BSON("$gte" << Timestamp(version.majorVersion(), version.minorVersion()))); ASSERT_BSONOBJ_EQ( - pipeline[1]["$unionWith"]["pipeline"].Array()[2]["$match"]["lastmodEpoch"].Obj(), + pipeline[1]["$unionWith"]["pipeline"].Array()[1]["$match"]["lastmodEpoch"].Obj(), BSON("$eq" << version.epoch())); const auto collBSON = getDefaultCollectionType(version.epoch(), shardKeyPattern).toBSON(); |