diff options
author | Yoonsoo Kim <yoonsoo.kim@mongodb.com> | 2023-04-03 19:15:29 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2023-04-03 21:20:47 +0000 |
commit | 140387a6925f88990dfea22429228bd89da04e02 (patch) | |
tree | 99edf6db8e60457cc2d549d95a52a9cec27c3563 | |
parent | ef5113992e87df2d0e1a9f28c74ffa4da12f28e0 (diff) | |
download | mongo-140387a6925f88990dfea22429228bd89da04e02.tar.gz |
SERVER-74525 Support explain for timeseries deletes
-rw-r--r-- | jstests/core/timeseries/timeseries_delete_with_meta.js | 25 | ||||
-rw-r--r-- | jstests/core/timeseries/timeseries_explain_delete.js | 215 | ||||
-rw-r--r-- | src/mongo/db/commands/write_commands.cpp | 49 | ||||
-rw-r--r-- | src/mongo/db/exec/plan_stats.h | 7 | ||||
-rw-r--r-- | src/mongo/db/exec/timeseries_modify.cpp | 22 | ||||
-rw-r--r-- | src/mongo/db/query/plan_executor_impl.cpp | 4 | ||||
-rw-r--r-- | src/mongo/db/query/plan_explainer_impl.cpp | 10 |
7 files changed, 305 insertions, 27 deletions
diff --git a/jstests/core/timeseries/timeseries_delete_with_meta.js b/jstests/core/timeseries/timeseries_delete_with_meta.js index a2d3f4c14fe..c90f3099dfb 100644 --- a/jstests/core/timeseries/timeseries_delete_with_meta.js +++ b/jstests/core/timeseries/timeseries_delete_with_meta.js @@ -13,8 +13,8 @@ (function() { "use strict"; -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +load("jstests/libs/analyze_plan.js"); // For planHasStage(). load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. if (FixtureHelpers.isMongos(db) && @@ -50,13 +50,24 @@ TimeseriesTest.run((insert) => { docsToInsert.forEach(doc => { assert.commandWorked(insert(coll, doc)); }); + + const deleteCommand = {delete: coll.getName(), deletes: deleteQuery, ordered, let : letDoc}; + + // Explain for delete command only works for single delete when the arbitrary timeseries + // delete feature is enabled and we check whether the explain works only when it's supposed + // to work without an error because we verify it with 'executionStats' explain. + if (isArbitraryDeleteEnabled && deleteQuery.length === 1 && expectedErrorCode === null) { + const explain = assert.commandWorked( + testDB.runCommand({explain: deleteCommand, verbosity: "executionStats"})); + jsTestLog(tojson(explain)); + assert(planHasStage(testDB, explain.queryPlanner.winningPlan, "BATCHED_DELETE") || + planHasStage(testDB, explain.queryPlanner.winningPlan, "DELETE") || + planHasStage(testDB, explain.queryPlanner.winningPlan, "TS_MODIFY")); + } + const res = expectedErrorCode - ? assert.commandFailedWithCode( - testDB.runCommand( - {delete: coll.getName(), deletes: deleteQuery, ordered, let : letDoc}), - expectedErrorCode) - : assert.commandWorked(testDB.runCommand( - {delete: coll.getName(), deletes: deleteQuery, ordered, let : letDoc})); + ? assert.commandFailedWithCode(testDB.runCommand(deleteCommand), expectedErrorCode) + : assert.commandWorked(testDB.runCommand(deleteCommand)); const docs = coll.find({}, {_id: 0}).toArray(); assert.eq(res["n"], expectedNRemoved); assert.sameMembers(docs, expectedRemainingDocs); diff --git a/jstests/core/timeseries/timeseries_explain_delete.js b/jstests/core/timeseries/timeseries_explain_delete.js new file mode 100644 index 00000000000..17476bdeb96 --- /dev/null +++ b/jstests/core/timeseries/timeseries_explain_delete.js @@ -0,0 +1,215 @@ +/** + * Tests whether the explain works for a single delete operation on a timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid multiversion tests + * requires_fcv_70, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * featureFlagTimeseriesDeletesSupport, + * ] + */ + +(function() { +"use strict"; + +load("jstests/libs/analyze_plan.js"); // For getPlanStage() and getExecutionStages(). + +const timeFieldName = "time"; +const metaFieldName = "tag"; +const dateTime = ISODate("2021-07-12T16:00:00Z"); +const collNamePrefix = "timeseries_explain_delete_"; +let testCaseId = 0; + +const testDB = db.getSiblingDB(jsTestName()); +assert.commandWorked(testDB.dropDatabase()); + +const docs = [ + {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: 1}, + {_id: 2, [timeFieldName]: dateTime, [metaFieldName]: 1}, + {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 2}, + {_id: 4, [timeFieldName]: dateTime, [metaFieldName]: 2}, +]; + +function testDeleteExplain({ + singleDeleteOp, + expectedOpType, + expectedBucketFilter, + expectedResidualFilter, + expectedNumDeleted, + expectedNumUnpacked, + expectedUsedIndexName = null +}) { + // Prepares a timeseries collection. + const coll = testDB.getCollection(collNamePrefix + testCaseId++); + coll.drop(); + + assert.commandWorked(testDB.createCollection( + coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + + // Creates an index same as the one in the hint so as to verify that the index hint is honored. + if (singleDeleteOp.hasOwnProperty("hint")) { + assert.commandWorked(coll.createIndex(singleDeleteOp.hint)); + } + + assert.commandWorked(coll.insert(docs)); + + // Verifies the TS_MODIFY stage in the plan. + const innerDeleteCommand = {delete: coll.getName(), deletes: [singleDeleteOp]}; + const deleteExplainPlanCommand = {explain: innerDeleteCommand, verbosity: "queryPlanner"}; + let explain = assert.commandWorked(testDB.runCommand(deleteExplainPlanCommand)); + jsTestLog(tojson(explain)); + const tsModifyStage = getPlanStage(explain.queryPlanner.winningPlan, "TS_MODIFY"); + assert.neq(null, tsModifyStage, `TS_MODIFY stage not found in the plan: ${tojson(explain)}`); + assert.eq(expectedOpType, + tsModifyStage.opType, + `TS_MODIFY opType is wrong: ${tojson(tsModifyStage)}`); + assert.eq(expectedBucketFilter, + tsModifyStage.bucketFilter, + `TS_MODIFY bucketFilter is wrong: ${tojson(tsModifyStage)}`); + assert.eq(expectedResidualFilter, + tsModifyStage.residualFilter, + `TS_MODIFY residualFilter is wrong: ${tojson(tsModifyStage)}`); + + if (expectedUsedIndexName) { + const ixscanStage = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); + jsTestLog(tojson(ixscanStage)); + assert.eq(expectedUsedIndexName, + ixscanStage.indexName, + `Wrong index used: ${tojson(ixscanStage)}`); + } + + // Verifies the TS_MODIFY stage in the execution stats. + const deleteExplainStatsCommand = {explain: innerDeleteCommand, verbosity: "executionStats"}; + explain = assert.commandWorked(testDB.runCommand(deleteExplainStatsCommand)); + jsTestLog(tojson(explain)); + const execStages = getExecutionStages(explain); + assert.eq("TS_MODIFY", + execStages[0].stage, + `TS_MODIFY stage not found in executionStages: ${tojson(explain)}`); + assert.eq(expectedNumDeleted, + execStages[0].nMeasurementsDeleted, + `Got wrong nMeasurementsDeleted: ${tojson(execStages[0])}`); + assert.eq(expectedNumUnpacked, + execStages[0].nBucketsUnpacked, + `Got wrong nBucketsUnpacked: ${tojson(execStages[0])}`); + + assert.sameMembers( + docs, coll.find().toArray(), "Explain command must not touch documents in the collection"); +} + +(function testDeleteManyWithEmptyBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is 2. + q: {_id: 2}, + limit: 0, + }, + expectedOpType: "deleteMany", + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 2" + expectedBucketFilter: {}, + expectedResidualFilter: {_id: {$eq: 2}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 2 + }); +})(); + +(function testDeleteManyWithBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 3}}, + limit: 0, + }, + expectedOpType: "deleteMany", + // The bucket filter is the one with metaFieldName translated to 'meta'. + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" + expectedBucketFilter: {meta: {$eq: 2}}, + expectedResidualFilter: {_id: {$gte: 3}}, + expectedNumDeleted: 2, + expectedNumUnpacked: 1 + }); +})(); + +(function testDeleteManyWithBucketFilterAndIndexHint() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 3}}, + limit: 0, + hint: {[metaFieldName]: 1} + }, + expectedOpType: "deleteMany", + // The bucket filter is the one with metaFieldName translated to 'meta'. + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" + expectedBucketFilter: {meta: {$eq: 2}}, + expectedResidualFilter: {_id: {$gte: 3}}, + expectedNumDeleted: 2, + expectedNumUnpacked: 1, + expectedUsedIndexName: metaFieldName + "_1" + }); +})(); + +// TODO SERVER-75518: Enable following three test cases. +/* +(function testDeleteOneWithEmptyBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is 2. + q: {_id: 3}, + limit: 1, + }, + expectedOpType: "deleteOne", + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" + expectedBucketFilter: {}, + expectedResidualFilter: {_id: {$eq: 3}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 2 + }); +})(); + +(function testDeleteOneWithBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + limit: 1, + }, + expectedOpType: "deleteOne", + // The bucket filter is the one with metaFieldName translated to 'meta'. + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 2" + expectedBucketFilter: {meta: {$eq: 2}}, + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 1 + }); +})(); + +(function testDeleteOneWithBucketFilterAndIndexHint() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + limit: 1, + hint: {[metaFieldName]: 1} + }, + expectedOpType: "deleteOne", + // The bucket filter is the one with metaFieldName translated to 'meta'. + // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" + expectedBucketFilter: {meta: {$eq: 2}}, + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 1, + expectedUsedIndexName: metaFieldName + "_1" + }); +})(); +*/ +})(); diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp index 62ce1650472..b10383f968b 100644 --- a/src/mongo/db/commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands.cpp @@ -68,6 +68,7 @@ #include "mongo/db/stats/counters.h" #include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/transaction/retryable_writes_stats.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction_validation.h" @@ -712,30 +713,60 @@ public: request().getDeletes().size() == 1); auto deleteRequest = DeleteRequest{}; - deleteRequest.setNsString(request().getNamespace()); + auto isRequestToTimeseries = isTimeseries(opCtx, request()); + auto nss = [&] { + auto nss = request().getNamespace(); + if (!isRequestToTimeseries) { + return nss; + } + return nss.isTimeseriesBucketsCollection() ? nss + : nss.makeTimeseriesBucketsNamespace(); + }(); + deleteRequest.setNsString(nss); deleteRequest.setLegacyRuntimeConstants(request().getLegacyRuntimeConstants().value_or( Variables::generateRuntimeConstants(opCtx))); deleteRequest.setLet(request().getLet()); - BSONObj query = request().getDeletes()[0].getQ(); + const auto& firstDelete = request().getDeletes()[0]; + BSONObj query = firstDelete.getQ(); if (shouldDoFLERewrite(request())) { query = processFLEWriteExplainD( - opCtx, write_ops::collationOf(request().getDeletes()[0]), request(), query); + opCtx, write_ops::collationOf(firstDelete), request(), query); } deleteRequest.setQuery(std::move(query)); deleteRequest.setCollation(write_ops::collationOf(request().getDeletes()[0])); - deleteRequest.setMulti(request().getDeletes()[0].getMulti()); + deleteRequest.setMulti(firstDelete.getMulti()); deleteRequest.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); - deleteRequest.setHint(request().getDeletes()[0].getHint()); + deleteRequest.setHint(firstDelete.getHint()); deleteRequest.setIsExplain(true); - ParsedDelete parsedDelete(opCtx, &deleteRequest); - uassertStatusOK(parsedDelete.parseRequest()); - // Explains of write commands are read-only, but we take write locks so that timing // info is more accurate. - AutoGetCollection collection(opCtx, request().getNamespace(), MODE_IX); + AutoGetCollection collection(opCtx, deleteRequest.getNsString(), MODE_IX); + + if (isRequestToTimeseries) { + uassert(ErrorCodes::NamespaceNotFound, + "Could not find time-series buckets collection for write explain", + *collection); + auto timeseriesOptions = collection->getTimeseriesOptions(); + uassert(ErrorCodes::InvalidOptions, + "Time-series buckets collection is missing time-series options", + timeseriesOptions); + + if (timeseries::isHintIndexKey(firstDelete.getHint())) { + deleteRequest.setHint( + uassertStatusOK(timeseries::createBucketsIndexSpecFromTimeseriesIndexSpec( + *timeseriesOptions, firstDelete.getHint()))); + } + } + + ParsedDelete parsedDelete(opCtx, + &deleteRequest, + isRequestToTimeseries && collection + ? collection->getTimeseriesOptions() + : boost::none); + uassertStatusOK(parsedDelete.parseRequest()); // Explain the plan tree. auto exec = uassertStatusOK(getExecutorDelete(&CurOp::get(opCtx)->debug(), diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h index 90e0170b1f6..6917e684993 100644 --- a/src/mongo/db/exec/plan_stats.h +++ b/src/mongo/db/exec/plan_stats.h @@ -1181,8 +1181,11 @@ struct TimeseriesModifyStats final : public SpecificStats { visitor->visit(this); } - size_t bucketsUnpacked = 0u; - size_t measurementsDeleted = 0u; + std::string opType; + BSONObj bucketFilter; + BSONObj residualFilter; + size_t nBucketsUnpacked = 0u; + size_t nMeasurementsDeleted = 0u; }; struct SampleFromTimeseriesBucketStats final : public SpecificStats { diff --git a/src/mongo/db/exec/timeseries_modify.cpp b/src/mongo/db/exec/timeseries_modify.cpp index a17c858395f..2db371be9ec 100644 --- a/src/mongo/db/exec/timeseries_modify.cpp +++ b/src/mongo/db/exec/timeseries_modify.cpp @@ -55,10 +55,24 @@ TimeseriesModifyStage::TimeseriesModifyStage(ExpressionContext* expCtx, "multi is true and no residual predicate was specified", _isDeleteOne() || _residualPredicate); _children.emplace_back(std::move(child)); + + // These three properties are only used for the queryPlanner explain and will not change while + // executing this stage. + _specificStats.opType = [&] { + if (_isDeleteOne()) { + return "deleteOne"; + } else { + return "deleteMany"; + } + }(); + _specificStats.bucketFilter = _params->canonicalQuery->getQueryObj(); + if (_residualPredicate) { + _specificStats.residualFilter = _residualPredicate->serialize(); + } } bool TimeseriesModifyStage::isEOF() { - if (_isDeleteOne() && _specificStats.measurementsDeleted > 0) { + if (_isDeleteOne() && _specificStats.nMeasurementsDeleted > 0) { return true; } return child()->isEOF() && _retryBucketId == WorkingSet::INVALID_ID; @@ -80,7 +94,7 @@ PlanStage::StageState TimeseriesModifyStage::_writeToTimeseriesBuckets( const std::vector<BSONObj>& deletedMeasurements, bool bucketFromMigrate) { if (_params->isExplain) { - _specificStats.measurementsDeleted += deletedMeasurements.size(); + _specificStats.nMeasurementsDeleted += deletedMeasurements.size(); return PlanStage::NEED_TIME; } @@ -151,7 +165,7 @@ PlanStage::StageState TimeseriesModifyStage::_writeToTimeseriesBuckets( return yieldAndRetry(7309301); } } - _specificStats.measurementsDeleted += deletedMeasurements.size(); + _specificStats.nMeasurementsDeleted += deletedMeasurements.size(); // As restoreState may restore (recreate) cursors, cursors are tied to the // transaction in which they are created, and a WriteUnitOfWork is a transaction, @@ -286,7 +300,7 @@ PlanStage::StageState TimeseriesModifyStage::doWork(WorkingSetID* out) { // This bucket is closed, skip it. return PlanStage::NEED_TIME; } - ++_specificStats.bucketsUnpacked; + ++_specificStats.nBucketsUnpacked; std::vector<BSONObj> unchangedMeasurements; std::vector<BSONObj> deletedMeasurements; diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index 623a6769e55..2c25777d8f2 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -613,9 +613,9 @@ long long PlanExecutorImpl::executeDelete() { return static_cast<const DeleteStats*>(stats)->docsDeleted; } case StageType::STAGE_TIMESERIES_MODIFY: { - const auto* tsWriteStats = + const auto* tsModifyStats = static_cast<const TimeseriesModifyStats*>(_root->getSpecificStats()); - return tsWriteStats->measurementsDeleted; + return tsModifyStats->nMeasurementsDeleted; } default: { invariant(StageType::STAGE_DELETE == _root->stageType() || diff --git a/src/mongo/db/query/plan_explainer_impl.cpp b/src/mongo/db/query/plan_explainer_impl.cpp index ebf0170bacb..5ff5187c5fe 100644 --- a/src/mongo/db/query/plan_explainer_impl.cpp +++ b/src/mongo/db/query/plan_explainer_impl.cpp @@ -485,10 +485,14 @@ void statsToBSON(const PlanStageStats& stats, } else if (STAGE_TIMESERIES_MODIFY == stats.stageType) { TimeseriesModifyStats* spec = static_cast<TimeseriesModifyStats*>(stats.specific.get()); + bob->append("opType", spec->opType); + bob->append("bucketFilter", spec->bucketFilter); + bob->append("residualFilter", spec->residualFilter); + if (verbosity >= ExplainOptions::Verbosity::kExecStats) { - bob->appendNumber("bucketsUnpacked", static_cast<long long>(spec->bucketsUnpacked)); - bob->appendNumber("measurementsDeleted", - static_cast<long long>(spec->measurementsDeleted)); + bob->appendNumber("nBucketsUnpacked", static_cast<long long>(spec->nBucketsUnpacked)); + bob->appendNumber("nMeasurementsDeleted", + static_cast<long long>(spec->nMeasurementsDeleted)); } } else if (STAGE_UNPACK_TIMESERIES_BUCKET == stats.stageType) { UnpackTimeseriesBucketStats* spec = |