summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSara Golemon <sara.golemon@mongodb.com>2021-01-12 00:06:23 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-25 20:56:46 +0000
commit8d86a33ac0fb2decd8032b9d47f45bf0b388577d (patch)
tree2a50cfb8673d22ab38bbf2983881e276f2fa2b5d
parent5d2efc040b405871099fbd2a8547212db3755e02 (diff)
downloadmongo-8d86a33ac0fb2decd8032b9d47f45bf0b388577d.tar.gz
SERVER-52539 Convert createIndexes command to TypedCommand
-rw-r--r--jstests/core/apply_ops_index_collation.js32
-rw-r--r--jstests/core/create_indexes.js5
-rw-r--r--jstests/core/txns/commands_banning_txnnumber_outside_transactions.js2
-rw-r--r--jstests/replsets/apply_ops_create_indexes.js2
-rw-r--r--jstests/replsets/invalid_index_spec.js71
-rw-r--r--jstests/sharding/configsvr_retries_createindex_on_stale_config.js5
-rw-r--r--jstests/sharding/sessions_collection_auto_healing.js3
-rw-r--r--src/mongo/db/SConscript3
-rw-r--r--src/mongo/db/commands/SConscript1
-rw-r--r--src/mongo/db/commands/create_indexes.cpp398
-rw-r--r--src/mongo/db/create_indexes.idl235
-rw-r--r--src/mongo/db/sessions_collection.cpp13
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp11
-rw-r--r--src/mongo/s/cluster_commands_helpers.h3
-rw-r--r--src/mongo/s/commands/cluster_create_indexes_cmd.cpp107
15 files changed, 444 insertions, 447 deletions
diff --git a/jstests/core/apply_ops_index_collation.js b/jstests/core/apply_ops_index_collation.js
index cabb22ee232..a15baabc9ea 100644
--- a/jstests/core/apply_ops_index_collation.js
+++ b/jstests/core/apply_ops_index_collation.js
@@ -32,21 +32,23 @@ let res = db.adminCommand({
ui: uuid,
o: {
createIndexes: coll.getFullName(),
- v: 2,
- key: {a: 1},
- name: "a_1_en",
- collation: {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false,
- version: "57.1"
- }
+ indexes: [{
+ v: 2,
+ key: {a: 1},
+ name: "a_1_en",
+ collation: {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false,
+ version: "57.1"
+ }
+ }],
}
}]
});
diff --git a/jstests/core/create_indexes.js b/jstests/core/create_indexes.js
index 1ecab14f65b..fbf93ed3c66 100644
--- a/jstests/core/create_indexes.js
+++ b/jstests/core/create_indexes.js
@@ -1,11 +1,14 @@
/**
* @tags: [
+ * requires_fcv_49,
* assumes_superuser_permissions,
* ]
+ * fcv49 for the change to error code in createIndexes invalid field reply.
*/
(function() {
'use strict';
+const kUnknownIDLFieldError = 40415;
var isMongos = ("isdbgrid" == db.runCommand("hello").msg);
var extractResult = function(obj) {
@@ -148,7 +151,7 @@ assert.commandWorked(res, 'v1 index creation should succeed');
// Test that index creation fails with an invalid top-level field.
res = t.runCommand('createIndexes', {indexes: [{key: {e: 1}, name: 'e_1'}], 'invalidField': 1});
-assert.commandFailedWithCode(res, ErrorCodes.BadValue);
+assert.commandFailedWithCode(res, kUnknownIDLFieldError);
// Test that index creation fails with an invalid field in the index spec for index version V2.
res = t.runCommand('createIndexes',
diff --git a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
index 956c0b450a2..c33cc6df475 100644
--- a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
+++ b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js
@@ -28,7 +28,7 @@ const nonRetryableWriteCommands = [
{listCommands: 1},
{create: "c"},
{drop: "c"},
- {createIndexes: 1},
+ {createIndexes: "c", indexes: []},
{mapReduce: "c"}
];
diff --git a/jstests/replsets/apply_ops_create_indexes.js b/jstests/replsets/apply_ops_create_indexes.js
index e3641965ce6..e0ee562f012 100644
--- a/jstests/replsets/apply_ops_create_indexes.js
+++ b/jstests/replsets/apply_ops_create_indexes.js
@@ -30,7 +30,7 @@ cmd = {
op: "c",
ns: dbName + "." + collName,
ui: uuid,
- o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
+ o: {createIndexes: collName, indexes: [{v: 2, key: {a: 1}, name: cmdFormatIndexNameA}]}
}]
};
res = primaryTestDB.runCommand(cmd);
diff --git a/jstests/replsets/invalid_index_spec.js b/jstests/replsets/invalid_index_spec.js
deleted file mode 100644
index 2c78d86c393..00000000000
--- a/jstests/replsets/invalid_index_spec.js
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Confirm that replication of an invalid index specification causes server abort (where index
- * version is >= 2).
- * @tags: [live_record_incompatible]
- */
-
-(function() {
-"use strict";
-
-load("jstests/replsets/rslib.js");
-load("jstests/libs/logv2_helpers.js");
-
-const testName = "invalid_index_spec";
-const replTest = new ReplSetTest({nodes: 2});
-replTest.startSet();
-replTest.initiate();
-
-let primaryDB = replTest.getPrimary().getDB(testName);
-let secondary = replTest.getSecondary();
-let secondaryAdminDB = secondary.getDB("admin");
-
-// Set a fail point that allows for index creation with invalid spec fields.
-primaryDB.adminCommand(
- {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
-
-clearRawMongoProgramOutput();
-
-// Create a V1 index with invalid spec field. Expected to replicate without error or server
-// abort.
-assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 1, name: "w_1", key: {w: 1}, invalidOption1: 1}]}));
-
-// Create a V2 index with invalid spec field. Expected to cause server abort on replication.
-assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption2: 1}]}));
-
-assert.soon(function() {
- try {
- secondaryAdminDB.runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
-}, "Node did not terminate due to invalid index spec", 60 * 1000);
-
-// fassert() calls std::abort(), which returns a different exit code for Windows vs. other
-// platforms.
-const exitCode = MongoRunner.EXIT_ABRUPT;
-replTest.stop(secondary, undefined, {allowedExitCode: exitCode});
-
-// During the transition from the old code path in IndexBuilder to IndexBuildsCoordinator, we
-// will accept the fatal assertion code from either component.
-const msgIndexBuilder = "Fatal assertion 50769";
-const msgIndexBuildsCoordinator = "Fatal assertion 34437";
-const msgIndexErrorType = "InvalidIndexSpecificationOption";
-const msgIndexError = "The field 'invalidOption2'";
-
-if (isJsonLogNoConn()) {
- assert(
- rawMongoProgramOutput().search(
- /Fatal assertion.*(50769|34437).*InvalidIndexSpecificationOption.*The field 'invalidOption2'/),
- "Replication should have aborted on invalid index specification");
-} else {
- assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
- rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
- (rawMongoProgramOutput().match(msgIndexErrorType) &&
- rawMongoProgramOutput().match(msgIndexError)),
- "Replication should have aborted on invalid index specification");
-}
-replTest.stopSet();
-})();
diff --git a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
index 895344506f6..9d48f4b6fcb 100644
--- a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
+++ b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js
@@ -1,5 +1,6 @@
/*
- * Verifies creating the logical sessions collection TTL index retries on stale version errors.
+ * Verifies creating the logical sessions collection TTL index retries on stale version errors.
+ * @tags: [requires_fcv_49]
*/
(function() {
@@ -38,4 +39,4 @@ validateSessionsCollection(st.shard0, true, false);
validateSessionsCollection(st.shard1, true, true);
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js
index 1a11d13402a..6b3eb57b449 100644
--- a/jstests/sharding/sessions_collection_auto_healing.js
+++ b/jstests/sharding/sessions_collection_auto_healing.js
@@ -1,3 +1,6 @@
+/**
+ * @tags: [requires_fcv_49]
+ */
load('jstests/libs/sessions_collection.js');
(function() {
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 435f1497a61..e395e915490 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -1378,7 +1378,8 @@ env.Library(
source=[
'create_indexes.idl',
],
- LIBDEPS=[
+ LIBDEPS_PRIVATE=[
+ '$BUILD_DIR/mongo/db/catalog/commit_quorum_idl',
'$BUILD_DIR/mongo/idl/idl_parser',
],
)
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index 4d4ab5ea62c..fd473783fb7 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -330,6 +330,7 @@ env.Library(
'$BUILD_DIR/mongo/db/commands',
'$BUILD_DIR/mongo/db/concurrency/lock_manager',
'$BUILD_DIR/mongo/db/concurrency/write_conflict_exception',
+ '$BUILD_DIR/mongo/db/create_indexes_idl',
'$BUILD_DIR/mongo/db/curop_failpoint_helpers',
'$BUILD_DIR/mongo/db/index_builds_coordinator_interface',
'$BUILD_DIR/mongo/db/ops/write_ops_exec',
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 30a39516d60..ee2f2fa16ae 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/create_indexes_gen.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builds_coordinator.h"
@@ -84,156 +85,94 @@ MONGO_FAIL_POINT_DEFINE(hangAfterIndexBuildAbort);
// through the IndexBuildsCoordinator.
MONGO_FAIL_POINT_DEFINE(hangCreateIndexesBeforeStartingIndexBuild);
-constexpr auto kIndexesFieldName = "indexes"_sd;
constexpr auto kCommandName = "createIndexes"_sd;
-constexpr auto kCommitQuorumFieldName = "commitQuorum"_sd;
-constexpr auto kIgnoreUnknownIndexOptionsName = "ignoreUnknownIndexOptions"_sd;
-constexpr auto kCreateCollectionAutomaticallyFieldName = "createdCollectionAutomatically"_sd;
-constexpr auto kNumIndexesBeforeFieldName = "numIndexesBefore"_sd;
-constexpr auto kNumIndexesAfterFieldName = "numIndexesAfter"_sd;
-constexpr auto kNoteFieldName = "note"_sd;
+constexpr auto kAllIndexesAlreadyExist = "all indexes already exist"_sd;
+constexpr auto kIndexAlreadyExists = "index already exists"_sd;
/**
* Parses the index specifications from 'cmdObj', validates them, and returns equivalent index
* specifications that have any missing attributes filled in. If any index specification is
* malformed, then an error status is returned.
*/
-StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs(
- OperationContext* opCtx,
- const NamespaceString& ns,
- const BSONObj& cmdObj,
- const ServerGlobalParams::FeatureCompatibility& featureCompatibility) {
- bool hasIndexesField = false;
-
- bool ignoreUnknownIndexOptions = false;
- if (cmdObj.hasField(kIgnoreUnknownIndexOptionsName)) {
- auto ignoreUnknownIndexOptionsElement = cmdObj.getField(kIgnoreUnknownIndexOptionsName);
- if (ignoreUnknownIndexOptionsElement.type() != BSONType::Bool) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "The field '" << kIgnoreUnknownIndexOptionsName
- << "' must be a boolean, but got "
- << typeName(ignoreUnknownIndexOptionsElement.type())};
- }
- ignoreUnknownIndexOptions = ignoreUnknownIndexOptionsElement.boolean();
- }
+std::vector<BSONObj> parseAndValidateIndexSpecs(OperationContext* opCtx,
+ const CreateIndexesCommand& cmd) {
+ constexpr auto k_id_ = "_id_"_sd;
+ constexpr auto kStar = "*"_sd;
- std::vector<BSONObj> indexSpecs;
- for (auto&& cmdElem : cmdObj) {
- auto cmdElemFieldName = cmdElem.fieldNameStringData();
-
- if (kIndexesFieldName == cmdElemFieldName) {
- if (cmdElem.type() != BSONType::Array) {
- return {ErrorCodes::TypeMismatch,
- str::stream()
- << "The field '" << kIndexesFieldName << "' must be an array, but got "
- << typeName(cmdElem.type())};
- }
-
- for (auto&& indexesElem : cmdElem.Obj()) {
- if (indexesElem.type() != BSONType::Object) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "The elements of the '" << kIndexesFieldName
- << "' array must be objects, but got "
- << typeName(indexesElem.type())};
- }
+ const auto& featureCompatability = serverGlobalParams.featureCompatibility;
+ const auto ns = cmd.getNamespace();
+ const bool ignoreUnknownIndexOptions = cmd.getIgnoreUnknownIndexOptions();
- BSONObj parsedIndexSpec = indexesElem.Obj();
- if (ignoreUnknownIndexOptions) {
- parsedIndexSpec = index_key_validate::removeUnknownFields(parsedIndexSpec);
- }
-
- auto indexSpecStatus = index_key_validate::validateIndexSpec(
- opCtx, parsedIndexSpec, featureCompatibility);
- if (!indexSpecStatus.isOK()) {
- return indexSpecStatus.getStatus().withContext(
- str::stream() << "Error in specification " << parsedIndexSpec.toString());
- }
- auto indexSpec = indexSpecStatus.getValue();
-
- if (IndexDescriptor::isIdIndexPattern(
- indexSpec[IndexDescriptor::kKeyPatternFieldName].Obj())) {
- auto status = index_key_validate::validateIdIndexSpec(indexSpec);
- if (!status.isOK()) {
- return status;
- }
- } else if (indexSpec[IndexDescriptor::kIndexNameFieldName].String() == "_id_"_sd) {
- return {ErrorCodes::BadValue,
- str::stream() << "The index name '_id_' is reserved for the _id index, "
- "which must have key pattern {_id: 1}, found "
- << indexSpec[IndexDescriptor::kKeyPatternFieldName]};
- } else if (indexSpec[IndexDescriptor::kIndexNameFieldName].String() == "*"_sd) {
- // An index named '*' cannot be dropped on its own, because a dropIndex oplog
- // entry with a '*' as an index name means "drop all indexes in this
- // collection". We disallow creation of such indexes to avoid this conflict.
- return {ErrorCodes::BadValue, "The index name '*' is not valid."};
- } else if (ns.isSystem() && !indexSpec[IndexDescriptor::kHiddenFieldName].eoo()) {
- return {ErrorCodes::BadValue, "Can't hide index on system collection"};
- }
+ std::vector<BSONObj> indexSpecs;
+ for (const auto& index : cmd.getIndexes()) {
+ BSONObj parsedIndexSpec = index;
+ if (ignoreUnknownIndexOptions) {
+ parsedIndexSpec = index_key_validate::removeUnknownFields(parsedIndexSpec);
+ }
- indexSpecs.push_back(std::move(indexSpec));
- }
+ auto indexSpecStatus =
+ index_key_validate::validateIndexSpec(opCtx, parsedIndexSpec, featureCompatability);
+ uassertStatusOK(indexSpecStatus.getStatus().withContext(
+ str::stream() << "Error in specification " << parsedIndexSpec.toString()));
- hasIndexesField = true;
- } else if (kCommandName == cmdElemFieldName || kCommitQuorumFieldName == cmdElemFieldName ||
- kIgnoreUnknownIndexOptionsName == cmdElemFieldName ||
- isGenericArgument(cmdElemFieldName)) {
- continue;
+ auto indexSpec = indexSpecStatus.getValue();
+ if (IndexDescriptor::isIdIndexPattern(
+ indexSpec[IndexDescriptor::kKeyPatternFieldName].Obj())) {
+ uassertStatusOK(index_key_validate::validateIdIndexSpec(indexSpec));
} else {
- return {ErrorCodes::BadValue,
- str::stream() << "Invalid field specified for " << kCommandName
- << " command: " << cmdElemFieldName};
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "The index name '_id_' is reserved for the _id index, "
+ "which must have key pattern {_id: 1}, found "
+ << indexSpec[IndexDescriptor::kKeyPatternFieldName],
+ indexSpec[IndexDescriptor::kIndexNameFieldName].String() != k_id_);
+
+ // An index named '*' cannot be dropped on its own, because a dropIndex oplog
+ // entry with a '*' as an index name means "drop all indexes in this
+ // collection". We disallow creation of such indexes to avoid this conflict.
+ uassert(ErrorCodes::BadValue,
+ "The index name '*' is not valid.",
+ indexSpec[IndexDescriptor::kIndexNameFieldName].String() != kStar);
+
+ uassert(ErrorCodes::BadValue,
+ "Can't hide index on system collection",
+ !ns.isSystem() || indexSpec[IndexDescriptor::kHiddenFieldName].eoo());
}
- }
- if (!hasIndexesField) {
- return {ErrorCodes::FailedToParse,
- str::stream() << "The '" << kIndexesFieldName
- << "' field is a required argument of the " << kCommandName
- << " command"};
+ indexSpecs.push_back(std::move(indexSpec));
}
- if (indexSpecs.empty()) {
- return {ErrorCodes::BadValue, "Must specify at least one index to create"};
- }
+ uassert(ErrorCodes::BadValue, "Must specify at least one index to create", !indexSpecs.empty());
return indexSpecs;
}
-void appendFinalIndexFieldsToResult(int numIndexesBefore,
+void appendFinalIndexFieldsToResult(CreateIndexesReply* reply,
+ int numIndexesBefore,
int numIndexesAfter,
- BSONObjBuilder& result,
int numSpecs,
boost::optional<CommitQuorumOptions> commitQuorum) {
- result.append(kNumIndexesBeforeFieldName, numIndexesBefore);
- result.append(kNumIndexesAfterFieldName, numIndexesAfter);
+ reply->setNumIndexesBefore(numIndexesBefore);
+ reply->setNumIndexesAfter(numIndexesAfter);
if (numIndexesAfter == numIndexesBefore) {
- result.append(kNoteFieldName, "all indexes already exist");
+ reply->setNote(kAllIndexesAlreadyExist);
} else if (numIndexesAfter < numIndexesBefore + numSpecs) {
- result.append(kNoteFieldName, "index already exists");
+ reply->setNote(kIndexAlreadyExists);
}
// commitQuorum will be populated only when two phase index build is enabled.
- if (commitQuorum)
- commitQuorum->appendToBuilder(kCommitQuorumFieldName, &result);
+ if (commitQuorum) {
+ reply->setCommitQuorum(commitQuorum);
+ }
}
/**
* Ensures that the options passed in for TTL indexes are valid.
*/
-Status validateTTLOptions(OperationContext* opCtx, const BSONObj& cmdObj) {
- const std::string kExpireAfterSeconds = "expireAfterSeconds";
-
- const BSONElement& indexes = cmdObj[kIndexesFieldName];
- for (const auto& index : indexes.Array()) {
- BSONObj indexObj = index.Obj();
- auto status = index_key_validate::validateIndexSpecTTL(indexObj);
- if (!status.isOK()) {
- return status;
- }
+void validateTTLOptions(OperationContext* opCtx, const CreateIndexesCommand& cmd) {
+ for (const auto& index : cmd.getIndexes()) {
+ uassertStatusOK(index_key_validate::validateIndexSpecTTL(index));
}
-
- return Status::OK();
}
/**
@@ -242,11 +181,12 @@ Status validateTTLOptions(OperationContext* opCtx, const BSONObj& cmdObj) {
*/
boost::optional<CommitQuorumOptions> parseAndGetCommitQuorum(OperationContext* opCtx,
IndexBuildProtocol protocol,
- const BSONObj& cmdObj) {
+ const CreateIndexesCommand& cmd) {
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto commitQuorumEnabled = (enableIndexBuildCommitQuorum) ? true : false;
- if (cmdObj.hasField(kCommitQuorumFieldName)) {
+ auto commitQuorum = cmd.getCommitQuorum();
+ if (commitQuorum) {
uassert(ErrorCodes::BadValue,
str::stream() << "Standalones can't specify commitQuorum",
replCoord->isReplEnabled());
@@ -254,8 +194,6 @@ boost::optional<CommitQuorumOptions> parseAndGetCommitQuorum(OperationContext* o
str::stream() << "commitQuorum is supported only for two phase index builds with "
"commit quorum support enabled ",
(IndexBuildProtocol::kTwoPhase == protocol && commitQuorumEnabled));
- CommitQuorumOptions commitQuorum;
- uassertStatusOK(commitQuorum.parse(cmdObj.getField(kCommitQuorumFieldName)));
return commitQuorum;
}
@@ -285,28 +223,21 @@ std::vector<BSONObj> resolveDefaultsAndRemoveExistingIndexes(OperationContext* o
}
/**
- * Fills in command result with number of indexes when there are no indexes to add.
- */
-void fillCommandResultWithIndexesAlreadyExistInfo(int numIndexes, BSONObjBuilder* result) {
- result->append("numIndexesBefore", numIndexes);
- result->append("numIndexesAfter", numIndexes);
- result->append("note", "all indexes already exist");
-};
-
-/**
* Returns true, after filling in the command result, if the index creation can return early.
*/
bool indexesAlreadyExist(OperationContext* opCtx,
const CollectionPtr& collection,
const std::vector<BSONObj>& specs,
- BSONObjBuilder* result) {
+ CreateIndexesReply* reply) {
auto specsCopy = resolveDefaultsAndRemoveExistingIndexes(opCtx, collection, specs);
if (specsCopy.size() > 0) {
return false;
}
auto numIndexes = collection->getIndexCatalog()->numIndexesTotal(opCtx);
- fillCommandResultWithIndexesAlreadyExistInfo(numIndexes, result);
+ reply->setNumIndexesBefore(numIndexes);
+ reply->setNumIndexesAfter(numIndexes);
+ reply->setNote(kAllIndexesAlreadyExist);
return true;
}
@@ -352,13 +283,12 @@ void checkDatabaseShardingState(OperationContext* opCtx, const NamespaceString&
* unused.
* Expects to be run at the end of a larger writeConflictRetry loop.
*/
-BSONObj runCreateIndexesOnNewCollection(OperationContext* opCtx,
- const NamespaceString& ns,
- const std::vector<BSONObj>& specs,
- boost::optional<CommitQuorumOptions> commitQuorum,
- bool createCollImplicitly) {
- BSONObjBuilder createResult;
-
+CreateIndexesReply runCreateIndexesOnNewCollection(
+ OperationContext* opCtx,
+ const NamespaceString& ns,
+ const std::vector<BSONObj>& specs,
+ boost::optional<CommitQuorumOptions> commitQuorum,
+ bool createCollImplicitly) {
WriteUnitOfWork wunit(opCtx);
auto databaseHolder = DatabaseHolder::get(opCtx);
@@ -426,18 +356,18 @@ BSONObj runCreateIndexesOnNewCollection(OperationContext* opCtx,
}
wunit.commit();
+ CreateIndexesReply reply;
+
appendFinalIndexFieldsToResult(
- numIndexesBefore, numIndexesAfter, createResult, int(specs.size()), commitQuorum);
+ &reply, numIndexesBefore, numIndexesAfter, int(specs.size()), commitQuorum);
+ reply.setCreatedCollectionAutomatically(true);
- return createResult.obj();
+ return reply;
}
-bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
- const NamespaceString ns(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
-
+CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx,
+ const CreateIndexesCommand& cmd) {
+ const auto ns = cmd.getNamespace();
uassertStatusOK(userAllowedWriteNS(ns));
// Disallow users from creating new indexes on config.transactions since the sessions code
@@ -451,26 +381,25 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
<< " within a transaction.",
!opCtx->inMultiDocumentTransaction() || !ns.isSystem());
- auto specs = uassertStatusOK(
- parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility));
+ auto specs = parseAndValidateIndexSpecs(opCtx, cmd);
auto replCoord = repl::ReplicationCoordinator::get(opCtx);
auto indexBuildsCoord = IndexBuildsCoordinator::get(opCtx);
// Two phase index builds are designed to improve the availability of indexes in a replica set.
auto protocol = !replCoord->isOplogDisabledFor(opCtx, ns) ? IndexBuildProtocol::kTwoPhase
: IndexBuildProtocol::kSinglePhase;
- auto commitQuorum = parseAndGetCommitQuorum(opCtx, protocol, cmdObj);
+ auto commitQuorum = parseAndGetCommitQuorum(opCtx, protocol, cmd);
if (commitQuorum) {
- uassertStatusOK(replCoord->checkIfCommitQuorumCanBeSatisfied(*commitQuorum));
+ uassertStatusOK(replCoord->checkIfCommitQuorumCanBeSatisfied(commitQuorum.get()));
}
- Status validateTTL = validateTTLOptions(opCtx, cmdObj);
- uassertStatusOK(validateTTL);
+ validateTTLOptions(opCtx, cmd);
// Preliminary checks before handing control over to IndexBuildsCoordinator:
// 1) We are in a replication mode that allows for index creation.
// 2) Check sharding state.
// 3) Check if we can create the index without handing control to the IndexBuildsCoordinator.
OptionalCollectionUUID collectionUUID;
+ CreateIndexesReply reply;
{
Lock::DBLock dbLock(opCtx, ns.db(), MODE_IS);
checkDatabaseShardingState(opCtx, ns);
@@ -485,7 +414,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// Before potentially taking an exclusive collection lock, check if all indexes already
// exist while holding an intent lock.
if (collection &&
- indexesAlreadyExist(opCtx, collection.getCollection(), specs, &result)) {
+ indexesAlreadyExist(opCtx, collection.getCollection(), specs, &reply)) {
repl::ReplClientInfo::forClient(opCtx->getClient())
.setLastOpToSystemLastOpTime(opCtx);
return true;
@@ -496,24 +425,20 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
// The collection exists and was not created in the same multi-document transaction
// as the createIndexes.
collectionUUID = collection->uuid();
- result.appendBool(kCreateCollectionAutomaticallyFieldName, false);
+ reply.setCreatedCollectionAutomatically(false);
return false;
}
- bool createCollImplicitly = collection ? false : true;
+ const bool createCollImplicitly = collection ? false : true;
- auto createIndexesResult = runCreateIndexesOnNewCollection(
+ reply = runCreateIndexesOnNewCollection(
opCtx, ns, specs, commitQuorum, createCollImplicitly);
- // No further sources of WriteConflicts can occur at this point, so it is safe to
- // append elements to `result` inside the writeConflictRetry loop.
- result.appendBool(kCreateCollectionAutomaticallyFieldName, true);
- result.appendElements(createIndexesResult);
return true;
});
if (indexExists) {
// No need to proceed if the index either already existed or has just been built.
- return true;
+ return reply;
}
// If the index does not exist by this point, the index build must go through the index
@@ -548,8 +473,14 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
bool shouldContinueInBackground = false;
try {
- auto buildIndexFuture = uassertStatusOK(indexBuildsCoord->startIndexBuild(
- opCtx, dbname, *collectionUUID, specs, buildUUID, protocol, indexBuildOptions));
+ auto buildIndexFuture =
+ uassertStatusOK(indexBuildsCoord->startIndexBuild(opCtx,
+ cmd.getDbName().toString(),
+ *collectionUUID,
+ specs,
+ buildUUID,
+ protocol,
+ indexBuildOptions));
auto deadline = opCtx->getDeadline();
LOGV2(20440,
@@ -649,7 +580,7 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
"namespace"_attr = ns,
"collectionUUID"_attr = *collectionUUID,
"exception"_attr = ex);
- return true;
+ return reply;
}
if (shouldContinueInBackground) {
@@ -677,9 +608,9 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
appendFinalIndexFieldsToResult(
- stats.numIndexesBefore, stats.numIndexesAfter, result, int(specs.size()), commitQuorum);
+ &reply, stats.numIndexesBefore, stats.numIndexesAfter, int(specs.size()), commitQuorum);
- return true;
+ return reply;
}
/**
@@ -687,86 +618,79 @@ bool runCreateIndexesWithCoordinator(OperationContext* opCtx,
* indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ],
* commitQuorum: "majority" }
*/
-class CmdCreateIndex : public ErrmsgCommandDeprecated {
+class CmdCreateIndexes : public CreateIndexesCmdVersion1Gen<CmdCreateIndexes> {
public:
- CmdCreateIndex() : ErrmsgCommandDeprecated(kCommandName) {}
-
- const std::set<std::string>& apiVersions() const {
- return kApiVersions1;
- }
+ class Invocation final : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
- bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
-
- bool collectsResourceConsumptionMetrics() const override {
- return true;
- }
+ bool supportsWriteConcern() const final {
+ return true;
+ }
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
- return AllowedOnSecondary::kNever;
- }
+ NamespaceString ns() const final {
+ return request().getNamespace();
+ }
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- ActionSet actions;
- actions.addAction(ActionType::createIndex);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ void doCheckAuthorization(OperationContext* opCtx) const {
+ Privilege p(CommandHelpers::resourcePatternForNamespace(ns().toString()),
+ {ActionType::createIndex});
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())->isAuthorizedForPrivilege(p));
+ }
- bool errmsgRun(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- // If we encounter an IndexBuildAlreadyInProgress error for any of the requested index
- // specs, then we will wait for the build(s) to finish before trying again unless we are in
- // a multi-document transaction.
- const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
- bool shouldLogMessageOnAlreadyBuildingError = true;
- while (true) {
- try {
- return runCreateIndexesWithCoordinator(opCtx, dbname, cmdObj, result);
- } catch (const DBException& ex) {
- hangAfterIndexBuildAbort.pauseWhileSet();
- // We can only wait for an existing index build to finish if we are able to release
- // our locks, in order to allow the existing index build to proceed. We cannot
- // release locks in transactions, so we bypass the below logic in transactions.
- if (ex.toStatus() != ErrorCodes::IndexBuildAlreadyInProgress ||
- opCtx->inMultiDocumentTransaction()) {
- throw;
- }
- if (shouldLogMessageOnAlreadyBuildingError) {
- auto bsonElem = cmdObj.getField(kIndexesFieldName);
- LOGV2(20450,
- "Received a request to create indexes: '{indexesFieldName}', but found "
- "that at least one of the indexes is already being built, '{error}'. "
- "This request will wait for the pre-existing index build to finish "
- "before proceeding",
- "Received a request to create indexes, "
- "but found that at least one of the indexes is already being built."
- "This request will wait for the pre-existing index build to finish "
- "before proceeding",
- "indexesFieldName"_attr = bsonElem,
- "error"_attr = ex);
- shouldLogMessageOnAlreadyBuildingError = false;
+ CreateIndexesReply typedRun(OperationContext* opCtx) {
+ // If we encounter an IndexBuildAlreadyInProgress error for any of the requested index
+ // specs, then we will wait for the build(s) to finish before trying again unless we are
+ // in a multi-document transaction.
+ bool shouldLogMessageOnAlreadyBuildingError = true;
+ while (true) {
+ try {
+ return runCreateIndexesWithCoordinator(opCtx, request());
+ } catch (const DBException& ex) {
+ hangAfterIndexBuildAbort.pauseWhileSet();
+ // We can only wait for an existing index build to finish if we are able to
+ // release our locks, in order to allow the existing index build to proceed. We
+ // cannot release locks in transactions, so we bypass the below logic in
+ // transactions.
+ if (ex.toStatus() != ErrorCodes::IndexBuildAlreadyInProgress ||
+ opCtx->inMultiDocumentTransaction()) {
+ throw;
+ }
+ if (shouldLogMessageOnAlreadyBuildingError) {
+ LOGV2(
+ 20450,
+ "Received a request to create indexes: '{indexesFieldName}', but found "
+ "that at least one of the indexes is already being built, '{error}'. "
+ "This request will wait for the pre-existing index build to finish "
+ "before proceeding",
+ "Received a request to create indexes, "
+ "but found that at least one of the indexes is already being built."
+ "This request will wait for the pre-existing index build to finish "
+ "before proceeding",
+ "indexesFieldName"_attr = request().getIndexes(),
+ "error"_attr = ex);
+ shouldLogMessageOnAlreadyBuildingError = false;
+ }
+ // Reset the snapshot because we have released locks and need a fresh snapshot
+ // if we reacquire the locks again later.
+ opCtx->recoveryUnit()->abandonSnapshot();
+ // This is a bit racy since we are not holding a lock across discovering an
+ // in-progress build and starting to listen for completion. It is good enough,
+ // however: we can only wait longer than needed, not less.
+ IndexBuildsCoordinator::get(opCtx)->waitUntilAnIndexBuildFinishes(opCtx);
}
- // Unset the response fields so we do not write duplicate fields.
- errmsg = "";
- result.resetToEmpty();
- // Reset the snapshot because we have released locks and need a fresh snapshot
- // if we reacquire the locks again later.
- opCtx->recoveryUnit()->abandonSnapshot();
- // This is a bit racy since we are not holding a lock across discovering an
- // in-progress build and starting to listen for completion. It is good enough,
- // however: we can only wait longer than needed, not less.
- IndexBuildsCoordinator::get(opCtx)->waitUntilAnIndexBuildFinishes(opCtx);
}
}
+ };
+
+ bool collectsResourceConsumptionMetrics() const final {
+ return true;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const final {
+ return AllowedOnSecondary::kNever;
}
} cmdCreateIndex;
diff --git a/src/mongo/db/create_indexes.idl b/src/mongo/db/create_indexes.idl
index d9a58ba026e..204e505a6cc 100644
--- a/src/mongo/db/create_indexes.idl
+++ b/src/mongo/db/create_indexes.idl
@@ -1,4 +1,4 @@
-# Copyright (C) 2018-present MongoDB, Inc.
+# Copyright (C) 2019-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
@@ -26,102 +26,151 @@
# it in the license file.
#
-# This IDL file describes the BSON format for a LogicalSessionId, and
-# handles the serialization to and deserialization from its BSON representation
-# for that class.
-
global:
- cpp_namespace: "mongo"
+ cpp_namespace: "mongo"
imports:
- - "mongo/idl/basic_types.idl"
+ - "mongo/db/catalog/commit_quorum.idl"
+ - "mongo/idl/basic_types.idl"
structs:
+ CreateIndexesReply:
+ description: "Reply to 'createIndexes' command"
+ strict: true
+ fields:
+ numIndexesBefore:
+ description: 'Index count before create'
+ type: int
+ optional: true
+ numIndexesAfter:
+ description: 'Index count after create'
+ type: int
+ optional: true
+ createdCollectionAutomatically:
+ description: >-
+ Whether or not this createIndexes command resulted
+ in a newly created collection.
+ type: bool
+ optional: true
+ commitQuorum:
+ description: 'Commit Quorum options used'
+ type: CommitQuorum
+ optional: true
+ note:
+ description: 'Optional warning/error related to createIndex'
+ type: string
+ optional: true
+
+ # A built index appears with these fields, which must be the same as ListIndexReplyItem's fields
+ # in list_indexes.idl.
+ # TODO (SERVER-52539): Write a test that asserts they're the same.
+ NewIndexSpec:
+ description: "A type representing a spec for a new index"
+ strict: true
+ fields:
+ v:
+ description: 'Index spec version'
+ type: safeInt
+ optional: true
+ key:
+ description: 'Key to index on'
+ type: object_owned
+ name:
+ description: 'Descriptive name for the index'
+ type: string
+ ns:
+ # MongoDB 4.2 and older generate this field, see SERVER-41696.
+ type: string
+ ignore: true
+ background:
+ type: safeBool
+ optional: true
+ unique:
+ type: safeBool
+ optional: true
+ hidden:
+ type: safeBool
+ optional: true
+ partialFilterExpression:
+ type: object_owned
+ optional: true
+ sparse:
+ type: safeBool
+ optional: true
+ expireAfterSeconds:
+ type: safeInt
+ optional: true
+ storageEngine:
+ type: object_owned
+ optional: true
+ weights:
+ type: object_owned
+ optional: true
+ default_language:
+ type: string
+ optional: true
+ language_override:
+ type: string
+ optional: true
+ textIndexVersion:
+ type: safeInt
+ optional: true
+ 2dsphereIndexVersion:
+ type: safeInt
+ optional: true
+ bits:
+ type: safeInt
+ optional: true
+ min:
+ type: safeDouble
+ optional: true
+ max:
+ type: safeDouble
+ optional: true
+ bucketSize:
+ type: safeDouble
+ optional: true
+ collation:
+ type: object_owned
+ optional: true
+ wildcardProjection:
+ type: object_owned
+ optional: true
+ coarsestIndexedLevel:
+ type: safeInt
+ optional: true
+ finestIndexedLevel:
+ type: safeInt
+ optional: true
+ dropDups:
+ type: safeBool
+ optional: true
+ unstable: true
- # A built index appears with these fields, which must be the same as ListIndexReplyItem's fields
- # in list_indexes.idl.
- # TODO (SERVER-52539): Write a test that asserts they're the same.
- NewIndexSpec:
- description: "A type representing a spec for a new index"
- strict: true
- fields:
- v:
- type: safeInt
- optional: true
- key: object_owned
- name: string
- ns:
- # MongoDB 4.2 and older generate this field, see SERVER-41696.
- type: string
- ignore: true
- background:
- type: safeBool
- optional: true
- unique:
- type: safeBool
- optional: true
- hidden:
- type: safeBool
- optional: true
- partialFilterExpression:
- type: object_owned
- optional: true
- sparse:
- type: safeBool
- optional: true
- expireAfterSeconds:
- type: safeInt
- optional: true
- storageEngine:
- type: object_owned
- optional: true
- weights:
- type: object_owned
- optional: true
- default_language:
- type: string
- optional: true
- language_override:
- type: string
- optional: true
- textIndexVersion:
- type: safeInt
- optional: true
- 2dsphereIndexVersion:
- type: safeInt
- optional: true
- bits:
- type: safeInt
- optional: true
- min:
- type: safeDouble
- optional: true
- max:
- type: safeDouble
- optional: true
- bucketSize:
- type: safeDouble
- optional: true
- collation:
- type: object_owned
- optional: true
- wildcardProjection:
- type: object_owned
- optional: true
- coarsestIndexedLevel:
- type: safeInt
- optional: true
- finestIndexedLevel:
- type: safeInt
- optional: true
- dropDups:
- type: safeBool
- optional: true
- unstable: true
+commands:
+ createIndexes:
+ description: "Command for creating indexes on a collection"
+ command_name: createIndexes
+ namespace: concatenate_with_db
+ cpp_name: CreateIndexesCommand
+ api_version: "1"
+ reply_type: CreateIndexesReply
+ strict: true
+ fields:
+ v:
+ description: 'Index schema version'
+ type: safeInt
+ default: 2
+ indexes:
+ description: 'Indexes to be created'
+ # array<NewIndexSpec> but respect ignoreUnknownIndexOptions
+ type: array<object>
+ ignoreUnknownIndexOptions:
+ description: 'Ignore unknown options in index spec'
+ type: safeBool
+ default: false
+ commitQuorum:
+ description: 'Commit Quorum options'
+ type: CommitQuorum
+ optional: true
- CreateIndexesCmd:
- description: "A struct representing a createIndexes command"
- strict: false
- fields:
- createIndexes: string
- indexes: array<NewIndexSpec>
diff --git a/src/mongo/db/sessions_collection.cpp b/src/mongo/db/sessions_collection.cpp
index b883bebf76c..a219515ba7a 100644
--- a/src/mongo/db/sessions_collection.cpp
+++ b/src/mongo/db/sessions_collection.cpp
@@ -276,16 +276,11 @@ BSONObj SessionsCollection::generateCreateIndexesCmd() {
index.setName(kSessionsTTLIndex);
index.setExpireAfterSeconds(localLogicalSessionTimeoutMinutes * 60);
- std::vector<NewIndexSpec> indexes;
- indexes.push_back(std::move(index));
+ CreateIndexesCommand createIndexes(NamespaceString::kLogicalSessionsNamespace);
+ createIndexes.setIndexes({index.toBSON()});
- CreateIndexesCmd createIndexes;
- createIndexes.setCreateIndexes(NamespaceString::kLogicalSessionsNamespace.coll());
- createIndexes.setIndexes(std::move(indexes));
-
- return BSONObjBuilder(createIndexes.toBSON())
- .append(WriteConcernOptions::kWriteConcernField, WriteConcernOptions::kImplicitDefault)
- .obj();
+ return createIndexes.toBSON(
+ BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::kImplicitDefault));
}
BSONObj SessionsCollection::generateCollModCmd() {
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 21c65484811..a86ef54bd87 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -359,6 +359,17 @@ BSONObj applyReadWriteConcern(OperationContext* opCtx, BasicCommand* cmd, const
cmdObj);
}
+BSONObj applyReadWriteConcern(OperationContext* opCtx,
+ BasicCommandWithReplyBuilderInterface* cmd,
+ const BSONObj& cmdObj) {
+ const auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx);
+ const auto readConcernSupport = cmd->supportsReadConcern(cmdObj, readConcernArgs.getLevel());
+ return applyReadWriteConcern(opCtx,
+ readConcernSupport.readConcernSupport.isOK(),
+ cmd->supportsWriteConcern(cmdObj),
+ cmdObj);
+}
+
BSONObj stripWriteConcern(const BSONObj& cmdObj) {
BSONObjBuilder output;
for (const auto& elem : cmdObj) {
diff --git a/src/mongo/s/cluster_commands_helpers.h b/src/mongo/s/cluster_commands_helpers.h
index 02f87a19422..bfc53fd0a8c 100644
--- a/src/mongo/s/cluster_commands_helpers.h
+++ b/src/mongo/s/cluster_commands_helpers.h
@@ -154,6 +154,9 @@ BSONObj applyReadWriteConcern(OperationContext* opCtx,
CommandInvocation* invocation,
const BSONObj& cmdObj);
BSONObj applyReadWriteConcern(OperationContext* opCtx, BasicCommand* cmd, const BSONObj& cmdObj);
+BSONObj applyReadWriteConcern(OperationContext* opCtx,
+ BasicCommandWithReplyBuilderInterface* cmd,
+ const BSONObj& cmdObj);
/**
* Returns a copy of 'cmdObj' with the writeConcern removed.
diff --git a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
index bd27780bb6c..c569774a706 100644
--- a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
@@ -31,7 +31,9 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
+#include "mongo/db/create_indexes_gen.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
@@ -40,39 +42,41 @@
namespace mongo {
namespace {
-class CreateIndexesCmd : public ErrmsgCommandDeprecated {
+constexpr auto kRawFieldName = "raw"_sd;
+constexpr auto kWriteConcernErrorFieldName = "writeConcernError"_sd;
+constexpr auto kTopologyVersionFieldName = "topologyVersion"_sd;
+
+class CreateIndexesCmd : public BasicCommandWithRequestParser<CreateIndexesCmd> {
public:
- CreateIndexesCmd() : ErrmsgCommandDeprecated("createIndexes") {}
+ using Request = CreateIndexesCommand;
- const std::set<std::string>& apiVersions() const {
+ const std::set<std::string>& apiVersions() const final {
return kApiVersions1;
}
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const final {
return AllowedOnSecondary::kNever;
}
- bool adminOnly() const override {
+ bool adminOnly() const final {
return false;
}
void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
- std::vector<Privilege>* out) const override {
- ActionSet actions;
- actions.addAction(ActionType::createIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ std::vector<Privilege>* out) const final {
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), {ActionType::createIndex}));
}
- bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const final {
return true;
}
- bool errmsgRun(OperationContext* opCtx,
- const std::string& dbName,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& output) override {
+ bool runWithRequestParser(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ const RequestParser&,
+ BSONObjBuilder& output) final {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
LOGV2_DEBUG(22750,
1,
@@ -97,9 +101,80 @@ public:
BSONObj() /* query */,
BSONObj() /* collation */);
- return appendRawResponses(opCtx, &errmsg, &output, std::move(shardResponses)).responseOK;
+ std::string errmsg;
+ const bool ok =
+ appendRawResponses(opCtx, &errmsg, &output, std::move(shardResponses)).responseOK;
+ if (!errmsg.empty()) {
+ CommandHelpers::appendSimpleCommandStatus(output, ok, errmsg);
+ }
+ return ok;
}
+ /**
+ * Response should either be "ok" and contain just 'raw' which is a dictionary of
+ * CreateIndexesReply (with optional 'ok' and 'writeConcernError' fields).
+ * or it should be "not ok" and contain an 'errmsg' and possibly a 'writeConcernError'.
+ * 'code' & 'codeName' are permitted in either scenario, but non-zero 'code' indicates "not ok".
+ */
+ void validateResult(const BSONObj& result) final {
+ BSONElement rawElem;
+ bool ok = true, hasErrMsg = false;
+
+ for (auto elem : result) {
+ const auto fieldName = elem.fieldNameStringData();
+ if (fieldName == kRawFieldName) {
+ rawElem = elem;
+ uassert(ErrorCodes::BadValue,
+ str::stream()
+ << "'raw' field must be an object, got: " << typeName(elem.type()),
+ elem.type() == Object);
+ } else if (fieldName == ErrorReply::kCodeFieldName) {
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Reply contained non-numeric status code: " << elem,
+ elem.isNumber());
+ ok = ok & (elem.numberInt() != 0);
+ } else if (fieldName == ErrorReply::kOkFieldName) {
+ ok = ok & elem.trueValue();
+ } else if (fieldName == ErrorReply::kErrmsgFieldName) {
+ hasErrMsg = true;
+ } else if ((fieldName == ErrorReply::kCodeNameFieldName) ||
+ (fieldName == kWriteConcernErrorFieldName)) {
+ // Ignorable field.
+ } else {
+ uasserted(ErrorCodes::BadValue,
+ str::stream() << "Invalid field in reply: " << fieldName);
+ }
+ }
+
+ if (ok) {
+ uassert(
+ ErrorCodes::BadValue, "Error message field present for 'ok' result", !hasErrMsg);
+ uassert(ErrorCodes::BadValue, "Missing field in reply: raw", !rawElem.eoo());
+
+ invariant(rawElem.type() == Object); // Validated in field loop above.
+ IDLParserErrorContext ctx("createIndexesReply");
+ StringDataSet ignorableFields(
+ {kWriteConcernErrorFieldName, ErrorReply::kOkFieldName, kTopologyVersionFieldName});
+ for (auto elem : rawElem.Obj()) {
+ uassert(ErrorCodes::FailedToParse,
+ str::stream() << "Response from shard must be an object, found: "
+ << typeName(elem.type()),
+ elem.type() == Object);
+ try {
+ // 'ok' is a permissable part of an reply even though it's not
+ // a formal part of the command reply.
+ CreateIndexesReply::parse(ctx, elem.Obj().removeFields(ignorableFields));
+ } catch (const DBException& ex) {
+ uasserted(ex.code(),
+ str::stream()
+ << "Failed parsing response from shard: " << ex.reason());
+ }
+ }
+ } else {
+ uassert(
+ ErrorCodes::BadValue, "Error message field missing for 'not ok' result", hasErrMsg);
+ }
+ }
} createIndexesCmd;
} // namespace