summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorShin Yee Tan <shinyee.tan@mongodb.com>2022-03-02 23:12:37 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-03-03 00:28:38 +0000
commitc17ce34104fe2ece4465d0cb25ef705e8375dda0 (patch)
tree8307ad7c0f104fcc708c9a3d37d52e703dd36e01 /src/mongo/db
parent2269e70fb945cbd9a670108f3f832d92f77bcd33 (diff)
downloadmongo-c17ce34104fe2ece4465d0cb25ef705e8375dda0.tar.gz
SERVER-63662 Return fewer than 8MB of violating documents in collMod conversion response
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp6
-rw-r--r--src/mongo/db/catalog/coll_mod_index.cpp36
-rw-r--r--src/mongo/db/catalog/coll_mod_index.h14
3 files changed, 32 insertions, 24 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 12a7715d33d..52bc185ff29 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -564,8 +564,7 @@ Status _processCollModDryRunMode(OperationContext* opCtx,
// Throws exception if index contains duplicates.
auto violatingRecordsList = scanIndexForDuplicates(opCtx, collection, cmr.indexRequest.idx);
if (!violatingRecordsList.empty()) {
- uassertStatusOK(buildConvertUniqueErrorStatus(
- buildDuplicateViolations(opCtx, collection, violatingRecordsList)));
+ uassertStatusOK(buildConvertUniqueErrorStatus(opCtx, collection, violatingRecordsList));
}
return Status::OK();
@@ -603,8 +602,7 @@ StatusWith<const IndexDescriptor*> _setUpCollModIndexUnique(OperationContext* op
nss);
if (!violatingRecordsList.empty()) {
- uassertStatusOK(buildConvertUniqueErrorStatus(
- buildDuplicateViolations(opCtx, collection, violatingRecordsList)));
+ uassertStatusOK(buildConvertUniqueErrorStatus(opCtx, collection, violatingRecordsList));
}
return idx;
diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp
index 6bcb123e3aa..74d5e096979 100644
--- a/src/mongo/db/catalog/coll_mod_index.cpp
+++ b/src/mongo/db/catalog/coll_mod_index.cpp
@@ -145,8 +145,7 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx,
if (mode && *mode == repl::OplogApplication::Mode::kApplyOpsCmd) {
auto duplicateRecordsList = scanIndexForDuplicates(opCtx, collection, idx);
if (!duplicateRecordsList.empty()) {
- uassertStatusOK(buildConvertUniqueErrorStatus(
- buildDuplicateViolations(opCtx, collection, duplicateRecordsList)));
+ uassertStatusOK(buildConvertUniqueErrorStatus(opCtx, collection, duplicateRecordsList));
}
}
@@ -340,23 +339,38 @@ std::list<std::set<RecordId>> scanIndexForDuplicates(
return duplicateRecordsList;
}
-BSONArray buildDuplicateViolations(OperationContext* opCtx,
- const CollectionPtr& collection,
- const std::list<std::set<RecordId>>& duplicateRecordsList) {
+Status buildConvertUniqueErrorStatus(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const std::list<std::set<RecordId>>& duplicateRecordsList) {
BSONArrayBuilder duplicateViolations;
+ size_t violationsSize = 0;
+
for (const auto& duplicateRecords : duplicateRecordsList) {
BSONArrayBuilder currViolatingIds;
for (const auto& recordId : duplicateRecords) {
auto doc = collection->docFor(opCtx, recordId).value();
- currViolatingIds.append(doc["_id"]);
+ auto id = doc["_id"];
+ violationsSize += id.size();
+
+ // Returns duplicate violations up to 8MB.
+ if (violationsSize > BSONObjMaxUserSize / 2) {
+ // Returns at least one violation.
+ if (duplicateViolations.arrSize() == 0 && currViolatingIds.arrSize() == 0) {
+ currViolatingIds.append(id);
+ }
+ if (currViolatingIds.arrSize() > 0) {
+ duplicateViolations.append(BSON("ids" << currViolatingIds.arr()));
+ }
+ return Status(
+ CannotConvertIndexToUniqueInfo(duplicateViolations.arr()),
+ "Cannot convert the index to unique. Too many conflicting documents were "
+ "detected. Please resolve them and rerun collMod.");
+ }
+ currViolatingIds.append(id);
}
duplicateViolations.append(BSON("ids" << currViolatingIds.arr()));
}
- return duplicateViolations.arr();
-}
-
-Status buildConvertUniqueErrorStatus(const BSONArray& violations) {
- return Status(CannotConvertIndexToUniqueInfo(violations),
+ return Status(CannotConvertIndexToUniqueInfo(duplicateViolations.arr()),
"Cannot convert the index to unique. Please resolve conflicting documents "
"before running collMod again.");
}
diff --git a/src/mongo/db/catalog/coll_mod_index.h b/src/mongo/db/catalog/coll_mod_index.h
index e5f78be7f55..acd7849d70e 100644
--- a/src/mongo/db/catalog/coll_mod_index.h
+++ b/src/mongo/db/catalog/coll_mod_index.h
@@ -79,15 +79,11 @@ std::list<std::set<RecordId>> scanIndexForDuplicates(
boost::optional<KeyString::Value> firstKeyString = {});
/**
- * Builds the BSONArray of the violations with duplicate index keys.
+ * Builds a BSONArray of the violations with duplicate index keys and returns the formatted error
+ * status for not being able to convert the index to unique.
*/
-BSONArray buildDuplicateViolations(OperationContext* opCtx,
- const CollectionPtr& collection,
- const std::list<std::set<RecordId>>& duplicateRecordsList);
-
-/**
- * Returns the formatted error status for not being able to convert the index to unique.
- */
-Status buildConvertUniqueErrorStatus(const BSONArray& violations);
+Status buildConvertUniqueErrorStatus(OperationContext* opCtx,
+ const CollectionPtr& collection,
+ const std::list<std::set<RecordId>>& duplicateRecordsList);
} // namespace mongo