From 06797deec0c5806e56a70a7d156556e0b028da7f Mon Sep 17 00:00:00 2001 From: Jason Zhang Date: Wed, 17 May 2023 02:23:47 +0000 Subject: SERVER-69922 Implement explain for updateOne, deleteOne, and findAndModify --- ...writes_without_shard_key_jscore_passthrough.yml | 2 +- jstests/sharding/query/collation_targeting.js | 19 +- .../query/collation_targeting_inherited.js | 17 +- jstests/sharding/query/explain_cmd.js | 12 +- .../query/explain_find_and_modify_sharded.js | 62 ++- .../updateOne_without_shard_key/explain.js | 490 +++++++++++++++++++++ .../s/commands/cluster_find_and_modify_cmd.cpp | 128 ++++-- src/mongo/s/commands/cluster_find_and_modify_cmd.h | 18 +- .../cluster_query_without_shard_key_cmd.cpp | 93 +++- src/mongo/s/commands/cluster_write_cmd.cpp | 82 +++- src/mongo/s/commands/cluster_write_cmd.h | 9 + .../cluster_write_without_shard_key_cmd.cpp | 52 +++ .../s/write_ops/write_without_shard_key_util.cpp | 87 ++++ .../s/write_ops/write_without_shard_key_util.h | 12 + 14 files changed, 986 insertions(+), 97 deletions(-) create mode 100644 jstests/sharding/updateOne_without_shard_key/explain.js diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml index 1cafef5dd6c..7e0eb0d7568 100644 --- a/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml @@ -15,7 +15,7 @@ selector: # Test assertions expect a certain document is deleted whereas updateOnes, deleteOnes, and # findAndModify without shard key can pick and modify any matching document. - jstests/core/**/crud_api.js - # TODO SERVER-69922: Requires explain for single writes without shard key. + # TODO SERVER-76907: Include hint stage if the original query provides one. - jstests/core/**/update_hint.js - jstests/core/**/delete_hint.js - jstests/core/**/find_and_modify_hint.js diff --git a/jstests/sharding/query/collation_targeting.js b/jstests/sharding/query/collation_targeting.js index 602c63c990a..329e65567c5 100644 --- a/jstests/sharding/query/collation_targeting.js +++ b/jstests/sharding/query/collation_targeting.js @@ -190,11 +190,10 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); assert(res.a === "foo" || res.a === "FOO"); - // TODO: SERVER-69925 Implement explain for findAndModify. - // assert.throws(function() { - // coll.explain().findAndModify( - // {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); - // }); + explain = coll.explain().findAndModify( + {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // Sharded findAndModify on strings with non-simple collation should fail, because findAndModify // must target a single shard. @@ -318,9 +317,13 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { assert.eq(1, writeRes.nRemoved); let afterNumDocsMatch = coll.find({a: "foo"}).collation(caseInsensitive).count(); assert.eq(beforeNumDocsMatch - 1, afterNumDocsMatch); + + explain = coll.explain().remove({a: "foo"}, {justOne: true, collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); + coll.insert(a_foo); coll.insert(a_FOO); - // TODO: SERVER-69924 Implement explain for deleteOne } else { // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is // exact-ID if it contains an equality on _id and either has the collection default collation or @@ -421,7 +424,9 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { writeRes = assert.commandWorked(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive})); assert.eq(1, writeRes.nMatched); - // TODO: SERVER-69922 Implement explain for updateOne + explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if // it contains an equality on _id and either has the collection default collation or _id is not diff --git a/jstests/sharding/query/collation_targeting_inherited.js b/jstests/sharding/query/collation_targeting_inherited.js index 7662baeee0f..4cc9e23765d 100644 --- a/jstests/sharding/query/collation_targeting_inherited.js +++ b/jstests/sharding/query/collation_targeting_inherited.js @@ -208,11 +208,10 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length); if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { let res = collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); assert(res.a === "foo" || res.a === "FOO"); - - // TODO: SERVER-69925 Implement explain for findAndModify. - // assert.throws(function() { - // collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); - // }); + explain = + collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // Sharded findAndModify on strings with non-simple collation inherited from the collection // default should fail, because findAndModify must target a single shard. @@ -340,7 +339,9 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { assert.eq(1, writeRes.nRemoved); let afterNumDocsMatch = collCaseInsensitive.find({a: "foo"}).collation(caseInsensitive).count(); assert.eq(beforeNumDocsMatch - 1, afterNumDocsMatch); - // TODO: SERVER-69924 Implement explain for deleteOne + explain = collCaseInsensitive.explain().remove({a: "foo"}, {justOne: true}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); // Re-insert documents for later test cases. collCaseInsensitive.insert(a_foo); @@ -451,7 +452,9 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length); if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { writeRes = assert.commandWorked(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}})); assert.eq(1, writeRes.nMatched); - // TODO: SERVER-69922 Implement explain for updateOne + explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if // it diff --git a/jstests/sharding/query/explain_cmd.js b/jstests/sharding/query/explain_cmd.js index 544c426f546..249c7e30f42 100644 --- a/jstests/sharding/query/explain_cmd.js +++ b/jstests/sharding/query/explain_cmd.js @@ -135,15 +135,21 @@ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); // Check that the upsert didn't actually happen. assert.eq(0, collSharded.count({a: 10})); +// Sharded updateOne that does not target a single shard can now be executed with a two phase +// write protocol that will target at most 1 matching document. if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(collSharded.getDB())) { // Explain an upsert operation which cannot be targeted and verify that it is successful. - // TODO SERVER-69922: Verify expected response. explain = db.runCommand({ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]}, verbosity: "allPlansExecution" }); - assert.commandWorked(explain, tojson(explain)); - assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); + assert(explain.queryPlanner); + assert(explain.executionStats); + assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(explain.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + assert.eq(explain.executionStats.executionStages.stage, "SHARD_WRITE"); + assert.eq(explain.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); + // Check that the upsert didn't actually happen. assert.eq(0, collSharded.count({b: 10})); } else { diff --git a/jstests/sharding/query/explain_find_and_modify_sharded.js b/jstests/sharding/query/explain_find_and_modify_sharded.js index 65f5dc17d6c..29dab239f5e 100644 --- a/jstests/sharding/query/explain_find_and_modify_sharded.js +++ b/jstests/sharding/query/explain_find_and_modify_sharded.js @@ -5,6 +5,8 @@ (function() { 'use strict'; +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + var collName = 'explain_find_and_modify'; // Create a cluster with 2 shards. @@ -37,21 +39,53 @@ assert.commandWorked(testDB.adminCommand( var res; -// Queries that do not involve the shard key are invalid. -res = testDB.runCommand( - {explain: {findAndModify: collName, query: {b: 1}, remove: true}, verbosity: 'queryPlanner'}); -assert.commandFailed(res); +// Sharded updateOne that does not target a single shard can now be executed with a two phase +// write protocol that will target at most 1 matching document. +if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { + res = assert.commandWorked(testDB.runCommand({ + explain: {findAndModify: collName, query: {b: 1}, remove: true}, + verbosity: 'queryPlanner' + })); -// Queries that have non-equality queries on the shard key are invalid. -res = testDB.runCommand({ - explain: { - findAndModify: collName, - query: {a: {$gt: 5}}, - update: {$inc: {b: 7}}, - }, - verbosity: 'allPlansExecution' -}); -assert.commandFailed(res); + assert(res.queryPlanner); + assert(!res.executionStats); + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + + res = assert.commandWorked(testDB.runCommand({ + explain: { + findAndModify: collName, + query: {a: {$gt: 5}}, + update: {$inc: {b: 7}}, + }, + verbosity: 'allPlansExecution' + })); + + assert(res.queryPlanner); + assert(res.executionStats); + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + assert.eq(res.executionStats.executionStages.stage, "SHARD_WRITE"); + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); +} else { + // Queries that do not involve the shard key are invalid. + res = testDB.runCommand({ + explain: {findAndModify: collName, query: {b: 1}, remove: true}, + verbosity: 'queryPlanner' + }); + assert.commandFailed(res); + + // Queries that have non-equality queries on the shard key are invalid. + res = testDB.runCommand({ + explain: { + findAndModify: collName, + query: {a: {$gt: 5}}, + update: {$inc: {b: 7}}, + }, + verbosity: 'allPlansExecution' + }); + assert.commandFailed(res); +} // Asserts that the explain command ran on the specified shard and used the given stage // for performing the findAndModify command. diff --git a/jstests/sharding/updateOne_without_shard_key/explain.js b/jstests/sharding/updateOne_without_shard_key/explain.js new file mode 100644 index 00000000000..1decea44604 --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/explain.js @@ -0,0 +1,490 @@ +/** + * Test explain output for updateOne, deleteOne, and findAndModify without shard key. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * featureFlagUpdateOneWithoutShardKey, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const dbConn = st.s.getDB(dbName); +const docsToInsert = [ + {_id: 0, x: -2, y: 1, a: [1, 2, 3]}, + {_id: 1, x: -1, y: 1, a: [1, 2, 3]}, + {_id: 2, x: 1, y: 1, a: [1, 2, 3]}, + {_id: 3, x: 2, y: 1, a: [1, 2, 3]} +]; + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 splitPoint >= 0. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +assert.commandWorked(dbConn[collName].insert(docsToInsert)); + +let listCollRes = assert.commandWorked(dbConn.runCommand({listCollections: 1})); +// There should only be one collection created in this test. +const usingClusteredIndex = listCollRes.cursor.firstBatch[0].options.clusteredIndex != null; + +let testCases = [ + { + logMessage: "Running explain for findAndModify update with sort.", + hasSort: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + sort: {x: 1}, + update: {$inc: {z: 1}}, + } + }, + }, + { + logMessage: "Running explain for findAndModify update with sort and upsert: true.", + hasSort: true, + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 5}, // Query matches no documents. + sort: {x: 1}, + update: {$inc: {z: 1}}, + upsert: true + } + }, + }, + { + logMessage: "Running explain for findAndModify update.", + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + update: {$inc: {z: 1}}, + } + }, + }, + { + logMessage: "Running explain for findAndModify update without sort and upsert: true.", + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 5}, // Query matches no documents. + update: {$inc: {z: 1}}, + upsert: true, + } + }, + }, + { + logMessage: "Running explain for findAndModify remove with sort.", + hasSort: true, + opType: "DELETE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + sort: {x: 1}, + remove: true, + } + }, + }, + { + logMessage: "Running explain for findAndModify remove without sort.", + opType: "DELETE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + remove: true, + } + }, + }, + { + logMessage: + "Running explain for findAndModify remove with positional projection with sort.", + opType: "DELETE", + hasSort: true, + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + sort: {x: 1}, + remove: true, + } + } + }, + { + logMessage: + "Running explain for findAndModify remove with positional projection without sort.", + opType: "DELETE", + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + remove: true, + } + } + }, + { + logMessage: + "Running explain for findAndModify update with positional projection with sort.", + opType: "UPDATE", + hasSort: true, + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + sort: {x: 1}, + fields: {'a.$': 1}, + update: {$inc: {z: 1}}, + } + } + }, + { + logMessage: + "Running explain for findAndModify update with positional projection without sort.", + opType: "UPDATE", + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + update: {$inc: {z: 1}}, + } + } + }, + { + logMessage: "Running explain for findAndModify update with positional update with sort.", + opType: "UPDATE", + hasSort: true, + isPositionalUpdate: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + sort: {x: 1}, + update: {$set: {"a.$": 3}}, + } + } + }, + { + logMessage: "Running explain for findAndModify update with positional update without sort.", + opType: "UPDATE", + isPositionalUpdate: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + update: {$set: {"a.$": 3}}, + } + } + }, + { + logMessage: "Running explain for updateOne.", + opType: "UPDATE", + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 1}, + u: {$set: {z: 1}}, + multi: false, + upsert: false, + }], + }, + }, + }, + { + logMessage: "Running explain for updateOne and upsert: true.", + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 5}, + u: {$set: {z: 1}}, + multi: false, + upsert: true, + }], // Query matches no documents. + }, + }, + }, + { + logMessage: "Running explain for updateOne and with positional update.", + opType: "UPDATE", + isPositionalUpdate: true, + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 1, a: 1}, + u: {$set: {"a.$": 3}}, + multi: false, + upsert: false, + }], + }, + }, + }, + { + logMessage: "Running explain for deleteOne.", + opType: "DELETE", + cmdObj: { + explain: { + delete: collName, + deletes: [{q: {y: 1}, limit: 1}], + }, + }, + }, +]; + +function runTestCase(testCase) { + jsTestLog(testCase.logMessage + "\n" + tojson(testCase)); + + let verbosityLevels = ["queryPlanner", "executionStats", "allPlansExecution"]; + verbosityLevels.forEach(verbosityLevel => { + jsTestLog("Running with verbosity level: " + verbosityLevel); + let explainCmdObj = Object.assign(testCase.cmdObj, {verbosity: verbosityLevel}); + let res = assert.commandWorked(dbConn.runCommand(explainCmdObj)); + validateResponse(res, testCase, verbosityLevel); + }); +} + +function validateResponse(res, testCase, verbosity) { + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + + if (testCase.hasSort) { + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE_SORT"); + } else { + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + } + + if (testCase.isPositionalProjection) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, "PROJECTION_DEFAULT"); + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "FETCH", + res); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else if (testCase.isPositionalUpdate) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, testCase.opType); + if (testCase.hasSort) { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "FETCH"); + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else { + if (usingClusteredIndex) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, testCase.opType); + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "IDHACK"); + } + } + + assert.eq(res.queryPlanner.winningPlan.shards.length, + 1); // Only 1 shard targeted by the write. + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.shards.length, + 2); // 2 shards had matching documents. + + if (verbosity === "queryPlanner") { + assert.eq(res.executionStats, null); + } else { + assert.eq(res.executionStats.executionStages.stage, "SHARD_WRITE"); + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + "PROJECTION_DEFAULT"); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else if (testCase.isPositionalUpdate) { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else { + if (usingClusteredIndex) { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + "CLUSTERED_IXSCAN"); + + } else { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + testCase.opType); + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + "IDHACK"); + } + } + assert.eq(res.executionStats.executionStages.shards.length, + 1); // Only 1 shard targeted by the write. + assert.eq(res.executionStats.inputStage.executionStages.shards.length, + 2); // 2 shards had matching documents. + + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isUpsert) { + assert.eq(res.executionStats.nReturned, 0); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.nWouldModify, 0); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.nWouldUpsert, 1); + assert.eq(res.executionStats.inputStage.nReturned, 0); + } else { + // TODO SERVER-29449: Properly report explain results for sharded queries with a + // limit. assert.eq(res.executionStats.nReturned, 1); + if (testCase.opType === "DELETE") { + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldDelete, + 0); + } else { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldDelete, + 0); + } + } else { + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldModify, + 0); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldUpsert, + 0); + } else { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldModify, + 0); + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldUpsert, + 0); + } + } + assert.eq(res.executionStats.inputStage.nReturned, 2); + } + + if (testCase.hasSort) { + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE_SORT"); + } else { + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); + } + } + + assert(res.serverInfo); + assert(res.serverParameters); + assert(res.command); + + // Checks that 'command' field of the explain output is the same command that we originally + // wanted to explain. + for (const [key, value] of Object.entries(testCase.cmdObj.explain)) { + assert.eq(res.command[key], value); + } +} + +testCases.forEach(testCase => { + runTestCase(testCase); +}); + +st.stop(); +})(); diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp index 3d223e1e3fc..600b2ff3dd3 100644 --- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp +++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp @@ -517,6 +517,7 @@ Status FindAndModifyCmd::explain(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) const { const DatabaseName dbName = DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase()); + auto bodyBuilder = result->getBodyBuilder(); const BSONObj& cmdObj = [&]() { // Check whether the query portion needs to be rewritten for FLE. auto findAndModifyRequest = write_ops::FindAndModifyCommandRequest::parse( @@ -538,20 +539,11 @@ Status FindAndModifyCmd::explain(OperationContext* opCtx, const auto& cm = cri.cm; std::shared_ptr shard; - if (cm.isSharded()) { - const BSONObj query = cmdObj.getObjectField("query"); - const BSONObj collation = getCollation(cmdObj); - const auto let = getLet(cmdObj); - const auto rc = getLegacyRuntimeConstants(cmdObj); - const BSONObj shardKey = - getShardKey(makeExpCtx(opCtx, nss, collation, boost::none, let, rc), cm, query); - const auto chunk = cm.findIntersectingChunk(shardKey, collation); - - shard = - uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk.getShardId())); - } else { - shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary())); - } + const BSONObj query = cmdObj.getObjectField("query"); + const BSONObj collation = getCollation(cmdObj); + const auto isUpsert = cmdObj.getBoolField("upsert"); + const auto let = getLet(cmdObj); + const auto rc = getLegacyRuntimeConstants(cmdObj); const auto explainCmd = ClusterExplain::wrapAsExplain( appendLegacyRuntimeConstantsToCommandObject(opCtx, cmdObj), verbosity); @@ -561,16 +553,31 @@ Status FindAndModifyCmd::explain(OperationContext* opCtx, BSONObjBuilder bob; if (cm.isSharded()) { - _runCommand(opCtx, - shard->getId(), - cri.getShardVersion(shard->getId()), - boost::none, - nss, - applyReadWriteConcern(opCtx, false, false, explainCmd), - true /* isExplain */, - boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, - &bob); + if (write_without_shard_key::useTwoPhaseProtocol( + opCtx, nss, false /* isUpdateOrDelete */, isUpsert, query, collation, let, rc)) { + _runExplainWithoutShardKey(opCtx, nss, explainCmd, verbosity, &bob); + bodyBuilder.appendElementsUnique(bob.obj()); + return Status::OK(); + } else { + const BSONObj shardKey = + getShardKey(makeExpCtx(opCtx, nss, collation, boost::none, let, rc), cm, query); + const auto chunk = cm.findIntersectingChunk(shardKey, collation); + shard = uassertStatusOK( + Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk.getShardId())); + + _runCommand(opCtx, + shard->getId(), + cri.getShardVersion(shard->getId()), + boost::none, + nss, + applyReadWriteConcern(opCtx, false, false, explainCmd), + true /* isExplain */, + boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, + &bob); + } } else { + shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary())); + _runCommand(opCtx, shard->getId(), boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()), @@ -591,7 +598,6 @@ Status FindAndModifyCmd::explain(OperationContext* opCtx, AsyncRequestsSender::Response arsResponse{ shard->getId(), response, shard->getConnString().getServers().front()}; - auto bodyBuilder = result->getBodyBuilder(); return ClusterExplain::buildExplainResult( opCtx, {arsResponse}, ClusterExplain::kSingleShard, millisElapsed, cmdObj, &bodyBuilder); } @@ -681,12 +687,8 @@ bool FindAndModifyCmd::run(OperationContext* opCtx, cmdObjForShard = replaceNamespaceByBucketNss(cmdObjForShard, nss); } - _runCommandWithoutShardKey(opCtx, - nss, - applyReadWriteConcern(opCtx, this, cmdObjForShard), - false /* isExplain */, - allowShardKeyUpdatesWithoutFullShardKeyInQuery, - &result); + _runCommandWithoutShardKey( + opCtx, nss, applyReadWriteConcern(opCtx, this, cmdObjForShard), &result); } } else { findAndModifyTargetedShardedCount.increment(1); @@ -801,19 +803,18 @@ void FindAndModifyCmd::_constructResult(OperationContext* opCtx, } // Two-phase protocol to run a findAndModify command without a shard key or _id. -void FindAndModifyCmd::_runCommandWithoutShardKey( - OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& cmdObj, - bool isExplain, - boost::optional allowShardKeyUpdatesWithoutFullShardKeyInQuery, - BSONObjBuilder* result) { +void FindAndModifyCmd::_runCommandWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + BSONObjBuilder* result) { + auto allowShardKeyUpdatesWithoutFullShardKeyInQuery = + opCtx->isRetryableWrite() || opCtx->inMultiDocumentTransaction(); auto cmdObjForPassthrough = prepareCmdObjForPassthrough(opCtx, cmdObj, nss, - isExplain, + false /* isExplain */, boost::none /* dbVersion */, boost::none /* shardVersion */, allowShardKeyUpdatesWithoutFullShardKeyInQuery); @@ -855,6 +856,50 @@ void FindAndModifyCmd::_runCommandWithoutShardKey( result); } +// Two-phase protocol to run an explain for a findAndModify command without a shard key or _id. +void FindAndModifyCmd::_runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result) { + auto cmdObjForPassthrough = prepareCmdObjForPassthrough( + opCtx, + cmdObj, + nss, + true /* isExplain */, + boost::none /* dbVersion */, + boost::none /* shardVersion */, + boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */); + + // Explain currently cannot be run within a transaction, so each command is instead run + // separately outside of a transaction, and we compose the results at the end. + auto clusterQueryWithoutShardKeyExplainRes = [&] { + ClusterQueryWithoutShardKey clusterQueryWithoutShardKeyCommand(cmdObjForPassthrough); + const auto explainClusterQueryWithoutShardKeyCmd = + ClusterExplain::wrapAsExplain(clusterQueryWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = OpMsgRequest::fromDBAndBody(nss.db(), explainClusterQueryWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto clusterWriteWithoutShardKeyExplainRes = [&] { + // Since 'explain' does not return the results of the query, we do not have an _id + // document to target by from the 'Read Phase'. We instead will use a dummy _id + // target document for the 'Write Phase'. + ClusterWriteWithoutShardKey clusterWriteWithoutShardKeyCommand( + cmdObjForPassthrough, + clusterQueryWithoutShardKeyExplainRes.getStringField("targetShardId").toString(), + write_without_shard_key::targetDocForExplain); + const auto explainClusterWriteWithoutShardKeyCmd = + ClusterExplain::wrapAsExplain(clusterWriteWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = OpMsgRequest::fromDBAndBody(nss.db(), explainClusterWriteWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto output = write_without_shard_key::generateExplainResponseForTwoPhaseWriteProtocol( + clusterQueryWithoutShardKeyExplainRes, clusterWriteWithoutShardKeyExplainRes); + result->appendElementsUnique(output); +} + // Command invocation to be used if a shard key is specified or the collection is unsharded. void FindAndModifyCmd::_runCommand( OperationContext* opCtx, @@ -938,12 +983,7 @@ void FindAndModifyCmd::_handleWouldChangeOwningShardErrorRetryableWriteLegacy( getLet(cmdObj), getLegacyRuntimeConstants(cmdObj))) { findAndModifyNonTargetedShardedCount.increment(1); - _runCommandWithoutShardKey(opCtx, - nss, - stripWriteConcern(cmdObj), - false /* isExplain */, - true /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, - result); + _runCommandWithoutShardKey(opCtx, nss, stripWriteConcern(cmdObj), result); } else { findAndModifyTargetedShardedCount.increment(1); diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.h b/src/mongo/s/commands/cluster_find_and_modify_cmd.h index ca0ec63fff3..8c2837645a2 100644 --- a/src/mongo/s/commands/cluster_find_and_modify_cmd.h +++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.h @@ -156,13 +156,17 @@ private: BSONObjBuilder* result); // Two-phase protocol to run a findAndModify command without a shard key or _id. - static void _runCommandWithoutShardKey( - OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& cmdObj, - bool isExplain, - boost::optional allowShardKeyUpdatesWithoutFullShardKeyInQuery, - BSONObjBuilder* result); + static void _runCommandWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + BSONObjBuilder* result); + + // Two-phase protocol to run an explain for a findAndModify command without a shard key or _id. + static void _runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result); // Command invocation to be used if a shard key is specified or the collection is unsharded. static void _runCommand(OperationContext* opCtx, diff --git a/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp index cab695ddab4..e87fca927e2 100644 --- a/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp @@ -28,6 +28,7 @@ */ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/explain_gen.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/document_source_limit.h" @@ -39,6 +40,7 @@ #include "mongo/db/update/update_util.h" #include "mongo/logv2/log.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" @@ -190,9 +192,8 @@ BSONObj createAggregateCmdObj( return aggregate.toBSON({}); } -ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, - StringData commandName, - const BSONObj& writeCmdObj) { +ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, const BSONObj& writeCmdObj) { + auto commandName = writeCmdObj.firstElementFieldNameStringData(); ParsedCommandInfo parsedInfo; if (commandName == write_ops::UpdateCommandRequest::kCommandName) { auto updateRequest = write_ops::UpdateCommandRequest::parse( @@ -253,7 +254,8 @@ ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, parsedInfo.collation = *parsedCollation; } } else { - uasserted(ErrorCodes::InvalidOptions, "Not a supported write command"); + uasserted(ErrorCodes::InvalidOptions, + str::stream() << commandName << " is not a supported batch write command"); } return parsedInfo; } @@ -276,22 +278,19 @@ public: "Running read phase for a write without a shard key.", "clientWriteRequest"_attr = request().getWriteCmd()); + const auto writeCmdObj = request().getWriteCmd(); + // Get all shard ids for shards that have chunks in the desired namespace. - const NamespaceString nss( - CommandHelpers::parseNsCollectionRequired(ns().dbName(), request().getWriteCmd())); + const NamespaceString nss = + CommandHelpers::parseNsCollectionRequired(ns().dbName(), writeCmdObj); hangBeforeMetadataRefreshClusterQuery.pauseWhileSet(opCtx); const auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, nss)); // Parse into OpMsgRequest to append the $db field, which is required for command // parsing. - const auto opMsgRequest = - OpMsgRequest::fromDBAndBody(nss.db(), request().getWriteCmd()); - - auto parsedInfoFromRequest = - parseWriteCommand(opCtx, - request().getWriteCmd().firstElementFieldNameStringData(), - opMsgRequest.body); + const auto opMsgRequest = OpMsgRequest::fromDBAndBody(ns().db(), writeCmdObj); + auto parsedInfoFromRequest = parseWriteCommand(opCtx, opMsgRequest.body); auto allShardsContainingChunksForNs = getShardsToTarget(opCtx, cri.cm, nss, parsedInfoFromRequest); @@ -409,6 +408,74 @@ public: } private: + void explain(OperationContext* opCtx, + ExplainOptions::Verbosity verbosity, + rpc::ReplyBuilderInterface* result) override { + const auto writeCmdObj = [&] { + const auto explainCmdObj = request().getWriteCmd(); + const auto opMsgRequestExplainCmd = + OpMsgRequest::fromDBAndBody(ns().db(), explainCmdObj); + auto explainRequest = ExplainCommandRequest::parse( + IDLParserContext("_clusterQueryWithoutShardKeyExplain"), + opMsgRequestExplainCmd.body); + return explainRequest.getCommandParameter().getOwned(); + }(); + + // Get all shard ids for shards that have chunks in the desired namespace. + const NamespaceString nss = + CommandHelpers::parseNsCollectionRequired(ns().dbName(), writeCmdObj); + const auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, nss)); + + // Parse into OpMsgRequest to append the $db field, which is required for command + // parsing. + const auto opMsgRequestWriteCmd = OpMsgRequest::fromDBAndBody(ns().db(), writeCmdObj); + auto parsedInfoFromRequest = parseWriteCommand(opCtx, opMsgRequestWriteCmd.body); + + auto allShardsContainingChunksForNs = + getShardsToTarget(opCtx, cri.cm, nss, parsedInfoFromRequest); + auto cmdObj = createAggregateCmdObj(opCtx, parsedInfoFromRequest, nss, boost::none); + + const auto aggExplainCmdObj = ClusterExplain::wrapAsExplain(cmdObj, verbosity); + + std::vector requests; + for (const auto& shardId : allShardsContainingChunksForNs) { + requests.emplace_back( + shardId, appendShardVersion(aggExplainCmdObj, cri.getShardVersion(shardId))); + } + + Timer timer; + MultiStatementTransactionRequestsSender ars( + opCtx, + Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), + request().getDbName(), + requests, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kNoRetry); + + ShardId shardId; + std::vector responses; + + while (!ars.done()) { + auto response = ars.next(); + uassertStatusOK(response.swResponse); + responses.push_back(response); + shardId = response.shardId; + } + + const auto millisElapsed = timer.millis(); + + auto bodyBuilder = result->getBodyBuilder(); + uassertStatusOK(ClusterExplain::buildExplainResult( + opCtx, + responses, + parsedInfoFromRequest.sort ? ClusterExplain::kMergeSortFromShards + : ClusterExplain::kMergeFromShards, + millisElapsed, + writeCmdObj, + &bodyBuilder)); + bodyBuilder.append("targetShardId", shardId); + } + NamespaceString ns() const override { return NamespaceString(request().getDbName()); } diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp index 147f8ad0924..783433e02f1 100644 --- a/src/mongo/s/commands/cluster_write_cmd.cpp +++ b/src/mongo/s/commands/cluster_write_cmd.cpp @@ -50,11 +50,13 @@ #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/commands/document_shard_key_update_util.h" #include "mongo/s/grid.h" +#include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/session_catalog_router.h" #include "mongo/s/transaction_router.h" #include "mongo/s/transaction_router_resource_yielder.h" #include "mongo/s/would_change_owning_shard_exception.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/s/write_ops/write_without_shard_key_util.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -632,6 +634,78 @@ void ClusterWriteCmd::InvocationBase::run(OperationContext* opCtx, CommandHelpers::appendSimpleCommandStatus(bob, ok); } +bool ClusterWriteCmd::InvocationBase::_runExplainWithoutShardKey( + OperationContext* opCtx, + const NamespaceString& nss, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result) { + if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Delete || + _batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) { + bool isMultiWrite = false; + BSONObj query; + BSONObj collation; + bool isUpsert = false; + if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) { + auto updateOp = _batchedRequest.getUpdateRequest().getUpdates().begin(); + isMultiWrite = updateOp->getMulti(); + query = updateOp->getQ(); + collation = updateOp->getCollation().value_or(BSONObj()); + isUpsert = updateOp->getUpsert(); + } else { + auto deleteOp = _batchedRequest.getDeleteRequest().getDeletes().begin(); + isMultiWrite = deleteOp->getMulti(); + query = deleteOp->getQ(); + collation = deleteOp->getCollation().value_or(BSONObj()); + } + + if (!isMultiWrite && + write_without_shard_key::useTwoPhaseProtocol( + opCtx, + nss, + true /* isUpdateOrDelete */, + isUpsert, + query, + collation, + _batchedRequest.getLet(), + _batchedRequest.getLegacyRuntimeConstants())) { + // Explain currently cannot be run within a transaction, so each command is instead run + // separately outside of a transaction, and we compose the results at the end. + auto clusterQueryWithoutShardKeyExplainRes = [&] { + ClusterQueryWithoutShardKey clusterQueryWithoutShardKeyCommand( + ClusterExplain::wrapAsExplain(_batchedRequest.toBSON(), verbosity)); + const auto explainClusterQueryWithoutShardKeyCmd = ClusterExplain::wrapAsExplain( + clusterQueryWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = + OpMsgRequest::fromDBAndBody(nss.db(), explainClusterQueryWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + // Since 'explain' does not return the results of the query, we do not have an _id + // document to target by from the 'Read Phase'. We instead will use a dummy _id target + // document 'Write Phase'. + auto clusterWriteWithoutShardKeyExplainRes = [&] { + ClusterWriteWithoutShardKey clusterWriteWithoutShardKeyCommand( + ClusterExplain::wrapAsExplain(_batchedRequest.toBSON(), verbosity), + clusterQueryWithoutShardKeyExplainRes.getStringField("targetShardId") + .toString(), + write_without_shard_key::targetDocForExplain); + const auto explainClusterWriteWithoutShardKeyCmd = ClusterExplain::wrapAsExplain( + clusterWriteWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = + OpMsgRequest::fromDBAndBody(nss.db(), explainClusterWriteWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto output = write_without_shard_key::generateExplainResponseForTwoPhaseWriteProtocol( + clusterQueryWithoutShardKeyExplainRes, clusterWriteWithoutShardKeyExplainRes); + result->appendElementsUnique(output); + return true; + } + return false; + } + return false; +} + void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, ExplainOptions::Verbosity verbosity, rpc::ReplyBuilderInterface* result) { @@ -652,6 +726,13 @@ void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, auto nss = req ? req->getNS() : _batchedRequest.getNS(); auto requestBSON = req ? req->toBSON() : _request->body; auto requestPtr = req ? req.get() : &_batchedRequest; + auto bodyBuilder = result->getBodyBuilder(); + + // If we aren't running an explain for updateOne or deleteOne without shard key, continue and + // run the original explain path. + if (_runExplainWithoutShardKey(opCtx, nss, verbosity, &bodyBuilder)) { + return; + } const auto explainCmd = ClusterExplain::wrapAsExplain(requestBSON, verbosity); @@ -662,7 +743,6 @@ void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, BatchItemRef targetingBatchItem(requestPtr, 0); std::vector shardResponses; _commandOpWrite(opCtx, nss, explainCmd, targetingBatchItem, &shardResponses); - auto bodyBuilder = result->getBodyBuilder(); uassertStatusOK(ClusterExplain::buildExplainResult(opCtx, shardResponses, ClusterExplain::kWriteOnShards, diff --git a/src/mongo/s/commands/cluster_write_cmd.h b/src/mongo/s/commands/cluster_write_cmd.h index f29f4182808..fa8a70d51c6 100644 --- a/src/mongo/s/commands/cluster_write_cmd.h +++ b/src/mongo/s/commands/cluster_write_cmd.h @@ -155,6 +155,15 @@ private: return static_cast(definition()); } + /** + * Runs a two-phase protocol to explain an updateOne/deleteOne without a shard key or _id. + * Returns true if we successfully ran the protocol, false otherwise. + */ + bool _runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result); + const OpMsgRequest* _request; BatchedCommandRequest _batchedRequest; diff --git a/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp index c3668182621..40ccace8e72 100644 --- a/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp @@ -29,6 +29,7 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/explain_gen.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/projection_parser.h" @@ -36,6 +37,7 @@ #include "mongo/db/update/update_util.h" #include "mongo/logv2/log.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/commands/cluster_explain.h" #include "mongo/s/commands/cluster_find_and_modify_cmd.h" #include "mongo/s/commands/cluster_write_cmd.h" #include "mongo/s/grid.h" @@ -293,6 +295,56 @@ public: } private: + void explain(OperationContext* opCtx, + ExplainOptions::Verbosity verbosity, + rpc::ReplyBuilderInterface* result) override { + const auto shardId = ShardId(request().getShardId().toString()); + const auto writeCmdObj = [&] { + const auto explainCmdObj = request().getWriteCmd(); + const auto opMsgRequestExplainCmd = + OpMsgRequest::fromDBAndBody(ns().db(), explainCmdObj); + auto explainRequest = ExplainCommandRequest::parse( + IDLParserContext("_clusterWriteWithoutShardKeyExplain"), + opMsgRequestExplainCmd.body); + return explainRequest.getCommandParameter().getOwned(); + }(); + + const NamespaceString nss = + CommandHelpers::parseNsCollectionRequired(ns().dbName(), writeCmdObj); + const auto targetDocId = request().getTargetDocId(); + const auto commandName = writeCmdObj.firstElementFieldNameStringData(); + + const BSONObj cmdObj = + _createCmdObj(opCtx, shardId, nss, commandName, writeCmdObj, targetDocId); + + const auto explainCmdObj = ClusterExplain::wrapAsExplain(cmdObj, verbosity); + + AsyncRequestsSender::Request arsRequest(shardId, explainCmdObj); + std::vector arsRequestVector({arsRequest}); + + Timer timer; + MultiStatementTransactionRequestsSender ars( + opCtx, + Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), + request().getDbName(), + std::move(arsRequestVector), + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kNoRetry); + + auto response = ars.next(); + uassertStatusOK(response.swResponse); + + const auto millisElapsed = timer.millis(); + + auto bodyBuilder = result->getBodyBuilder(); + uassertStatusOK(ClusterExplain::buildExplainResult(opCtx, + {response}, + ClusterExplain::kWriteOnShards, + millisElapsed, + writeCmdObj, + &bodyBuilder)); + } + NamespaceString ns() const override { return NamespaceString(request().getDbName()); } diff --git a/src/mongo/s/write_ops/write_without_shard_key_util.cpp b/src/mongo/s/write_ops/write_without_shard_key_util.cpp index f3d1ed0e4ab..ae0fb38587e 100644 --- a/src/mongo/s/write_ops/write_without_shard_key_util.cpp +++ b/src/mongo/s/write_ops/write_without_shard_key_util.cpp @@ -302,5 +302,92 @@ StatusWith runTwoPhaseWriteProtocol(Operati return StatusWith(swResult.getStatus()); } } + +BSONObj generateExplainResponseForTwoPhaseWriteProtocol( + const BSONObj& clusterQueryWithoutShardKeyExplainObj, + const BSONObj& clusterWriteWithoutShardKeyExplainObj) { + // To express the two phase nature of the two phase write protocol, we use the output of the + // 'Read Phase' explain as the 'inputStage' of the 'Write Phase' explain for both queryPlanner + // and executionStats sections. + // + // An example output would look like: + + // "queryPlanner" : { + // "winningPlan" : { + // "stage" : "SHARD_WRITE", + // ... + // “inputStage”: { + // queryPlanner: { + // winningPlan: { + // stage: "SHARD_MERGE", + // ... + // + // } + // } + // } + // } + // } + // + // executionStats : { + // "executionStages" : { + // "stage" : "SHARD_WRITE", + // ... + // }, + // "inputStage" : { + // "stage" : "SHARD_MERGE", + // ... + // } + // + // } + // ... other explain result fields ... + + auto queryPlannerOutput = [&] { + auto clusterQueryWithoutShardKeyQueryPlannerObj = + clusterQueryWithoutShardKeyExplainObj.getObjectField("queryPlanner"); + auto clusterWriteWithoutShardKeyQueryPlannerObj = + clusterWriteWithoutShardKeyExplainObj.getObjectField("queryPlanner"); + + auto winningPlan = clusterWriteWithoutShardKeyQueryPlannerObj.getObjectField("winningPlan"); + BSONObjBuilder newWinningPlanBuilder(winningPlan); + newWinningPlanBuilder.appendObject("inputStage", + clusterQueryWithoutShardKeyQueryPlannerObj.objdata()); + auto newWinningPlan = newWinningPlanBuilder.obj(); + + auto queryPlannerObjNoWinningPlan = + clusterWriteWithoutShardKeyQueryPlannerObj.removeField("winningPlan"); + BSONObjBuilder newQueryPlannerBuilder(queryPlannerObjNoWinningPlan); + newQueryPlannerBuilder.appendObject("winningPlan", newWinningPlan.objdata()); + return newQueryPlannerBuilder.obj(); + }(); + + auto executionStatsOutput = [&] { + auto clusterQueryWithoutShardKeyExecutionStatsObj = + clusterQueryWithoutShardKeyExplainObj.getObjectField("executionStats"); + auto clusterWriteWithoutShardKeyExecutionStatsObj = + clusterWriteWithoutShardKeyExplainObj.getObjectField("executionStats"); + + if (clusterQueryWithoutShardKeyExecutionStatsObj.isEmpty() && + clusterWriteWithoutShardKeyExecutionStatsObj.isEmpty()) { + return BSONObj(); + } + + BSONObjBuilder newExecutionStatsBuilder(clusterWriteWithoutShardKeyExecutionStatsObj); + newExecutionStatsBuilder.appendObject( + "inputStage", clusterQueryWithoutShardKeyExecutionStatsObj.objdata()); + return newExecutionStatsBuilder.obj(); + }(); + + BSONObjBuilder explainOutputBuilder; + if (!queryPlannerOutput.isEmpty()) { + explainOutputBuilder.appendObject("queryPlanner", queryPlannerOutput.objdata()); + } + if (!executionStatsOutput.isEmpty()) { + explainOutputBuilder.appendObject("executionStats", executionStatsOutput.objdata()); + } + // This step is to get 'command', 'serverInfo', and 'serverParamter' fields to return in the + // final explain output. + explainOutputBuilder.appendElementsUnique(clusterWriteWithoutShardKeyExplainObj); + return explainOutputBuilder.obj(); +} } // namespace write_without_shard_key } // namespace mongo diff --git a/src/mongo/s/write_ops/write_without_shard_key_util.h b/src/mongo/s/write_ops/write_without_shard_key_util.h index bb7e4d0509b..5fb9bb35d68 100644 --- a/src/mongo/s/write_ops/write_without_shard_key_util.h +++ b/src/mongo/s/write_ops/write_without_shard_key_util.h @@ -38,6 +38,11 @@ namespace mongo { namespace write_without_shard_key { +// Used as a "dummy" target document for constructing explain responses for single writes without +// shard key. +const BSONObj targetDocForExplain = BSON("_id" + << "WriteWithoutShardKey"); + /** * Uses updateDriver to produce the document to insert. Only use when {upsert: true}. */ @@ -75,6 +80,13 @@ bool useTwoPhaseProtocol(OperationContext* opCtx, StatusWith runTwoPhaseWriteProtocol(OperationContext* opCtx, NamespaceString nss, BSONObj cmdObj); +/** + * Return a formatted 'explain' response that describes the work done in the two phase write + * protocol. + **/ +BSONObj generateExplainResponseForTwoPhaseWriteProtocol( + const BSONObj& clusterQueryWithoutShardKeyExplainObj, + const BSONObj& clusterWriteWithoutShardKeyExplainObj); } // namespace write_without_shard_key } // namespace mongo -- cgit v1.2.1