summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFaustoleyva54 <fausto.leyva@mongodb.com>2022-03-03 20:01:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-03-03 20:52:18 +0000
commitd9f9585cdcd285e9c12ba2fd459f9be7e666082d (patch)
treeef1ee3c22e50a6484400667e5f19d8cd0a2580a0
parente16ca01d841ef74a8b9467778ae5456978570cbe (diff)
downloadmongo-d9f9585cdcd285e9c12ba2fd459f9be7e666082d.tar.gz
SERVER-1864 Support Capped Collection Size Increase
-rw-r--r--buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml1
-rw-r--r--jstests/core/capped_resize.js151
-rw-r--r--jstests/multiVersion/targetedTestsLastLtsFeatures/capped_resize_fcv_check.js62
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp42
-rw-r--r--src/mongo/db/catalog/collection.h4
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp30
-rw-r--r--src/mongo/db/catalog/collection_impl.h5
-rw-r--r--src/mongo/db/catalog/collection_mock.h4
-rw-r--r--src/mongo/db/catalog/collection_options.cpp46
-rw-r--r--src/mongo/db/catalog/collection_options.h3
-rw-r--r--src/mongo/db/coll_mod.idl15
-rw-r--r--src/mongo/db/commands/resize_oplog.cpp4
12 files changed, 337 insertions, 30 deletions
diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
index b5ed73d1558..c7607fbfb20 100644
--- a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml
@@ -14,6 +14,7 @@ selector:
- jstests/core/max_doc_size.js
- jstests/core/mr_bigobject.js
- jstests/core/capped_large_docs.js
+ - jstests/core/capped_resize.js
- jstests/core/exhaust.js
# The following tests also create large oplog entries due to the maximum blocking sort size being
# 100 MB.
diff --git a/jstests/core/capped_resize.js b/jstests/core/capped_resize.js
new file mode 100644
index 00000000000..c40ff9423fd
--- /dev/null
+++ b/jstests/core/capped_resize.js
@@ -0,0 +1,151 @@
+/**
+ * Tests updates on the size and max document fields of capped collections.
+ *
+ * @tags: [
+ * requires_capped,
+ * requires_collstats,
+ * requires_fastcount,
+ * # Capped collections cannot be sharded
+ * assumes_unsharded_collection,
+ * ]
+ */
+(function() {
+const testDB = db.getSiblingDB(jsTestName());
+const cappedColl = testDB["capped_coll"];
+
+const maxSize = 25 * 1024; // 25 KB.
+const doubleMaxSize = 50 * 1024; // 50 KB.
+const maxDocs = 2;
+const doubleMaxDocs = 2 * maxDocs;
+const initialDocSize = 2;
+
+const maxDocumentCeiling = 0x7fffffff;
+const maxSizeCeiling = 0x4000000000000;
+
+let insertDocs = function() {
+ // Insert ~50KB of data.
+ const doc = {key: "a".repeat(10 * 1024)};
+ for (let i = 0; i < 5; i++) {
+ assert.commandWorked(cappedColl.insert(doc));
+ }
+};
+
+let resetCappedCollection = function(extra) {
+ const options = Object.assign({}, {capped: true}, extra);
+ cappedColl.drop();
+ assert.commandWorked(testDB.createCollection(cappedColl.getName(), options));
+
+ // With a capped collection capacity of 25KB, we should have 2 documents.
+ insertDocs();
+ let stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, initialDocSize);
+ assert.lte(stats.size, extra.size);
+
+ // Check the size and max document limits.
+ assert.eq(stats.maxSize, extra.size);
+ if (extra.max) {
+ assert.eq(stats.max, extra.max);
+ }
+};
+
+let verifyLimitUpdate = function(updates) {
+ const fullCmd = Object.assign({}, {collMod: cappedColl.getName()}, updates);
+ assert.commandWorked(testDB.runCommand(fullCmd));
+ const stats = assert.commandWorked(cappedColl.stats());
+
+ if (updates.cappedSize) {
+ assert.eq(stats.maxSize, updates.cappedSize);
+ }
+ if (updates.cappedMax) {
+ const expectedMax = (updates.cappedMax <= 0) ? maxDocumentCeiling : updates.cappedMax;
+ assert.eq(stats.max, expectedMax);
+ }
+ // Insert documents after updating the capped collection limits. If the actual size is above the
+ // limit, the inserts will elict a deletion of documents.
+ insertDocs();
+};
+
+(function updateSizeLimit() {
+ jsTestLog("Updating the maximum size of the capped collection.");
+ resetCappedCollection({size: maxSize});
+
+ // Increase the size of the capped collection and we should see more documents in the
+ // collection.
+ verifyLimitUpdate({cappedSize: doubleMaxSize});
+ let stats = assert.commandWorked(cappedColl.stats());
+ assert.gt(stats.count, initialDocSize);
+ assert.lte(stats.size, doubleMaxSize);
+
+ // Decrease the size parameter of the capped collection and see that documents are removed.
+ verifyLimitUpdate({cappedSize: maxSize});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, initialDocSize);
+ assert.lte(stats.size, maxSize);
+
+ // We expect the resizing of a capped collection to fail when maxSize <= 0 and maxSize >
+ // maxSizeCeiling.
+ const negativeSize = -1 * maxSize;
+ assert.commandFailed(
+ testDB.runCommand({collMod: cappedColl.getName(), cappedSize: maxSizeCeiling + 1}));
+ assert.commandFailed(testDB.runCommand({collMod: cappedColl.getName(), cappedSize: 0}));
+ assert.commandFailed(
+ testDB.runCommand({collMod: cappedColl.getName(), cappedSize: negativeSize}));
+})();
+
+(function updateMaxLimit() {
+ jsTestLog("Updating the maximum document size of the capped collection.");
+ resetCappedCollection({size: doubleMaxSize, max: maxDocs});
+
+ // Increase the size of the capped collection and we should see more documents in the
+ // collection.
+ verifyLimitUpdate({cappedMax: doubleMaxDocs});
+ let stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, doubleMaxDocs);
+
+ // Decrease the size parameter of the capped collection and see that documents are removed.
+ verifyLimitUpdate({cappedMax: maxDocs});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, maxDocs);
+
+ // Setting the maxDocs size to <= 0, we expect the cappedSize to be the only limiting factor.
+ const negativeMax = -1 * maxDocs;
+ verifyLimitUpdate({cappedMax: negativeMax});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.gt(stats.count, initialDocSize);
+ assert.lte(stats.size, doubleMaxSize);
+
+ verifyLimitUpdate({cappedMax: 0});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.gt(stats.count, initialDocSize);
+ assert.lte(stats.size, doubleMaxSize);
+})();
+
+(function updateSizeAndMaxLimits() {
+ jsTestLog("Updating the maximum size and document limits of the capped collection.");
+ resetCappedCollection({size: maxSize, max: maxDocs});
+
+ // Increasing both limits, we should see double the documents.
+ verifyLimitUpdate({cappedSize: doubleMaxSize, cappedMax: doubleMaxDocs});
+ let stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, doubleMaxDocs);
+ assert.gt(stats.size, maxSize);
+
+ // Decreasing both limits, we should see less documents.
+ verifyLimitUpdate({cappedSize: maxSize, cappedMax: maxDocs});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, maxDocs);
+ assert.lte(stats.size, maxSize);
+
+ // Increasing the size limit, but keeping the max low should have no effect.
+ verifyLimitUpdate({cappedSize: doubleMaxSize, cappedMax: maxDocs});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, maxDocs);
+ assert.lte(stats.size, doubleMaxSize);
+
+ // Increasing the max limit, but keeping the size limit lower should have no effect.
+ verifyLimitUpdate({cappedSize: maxSize, cappedMax: doubleMaxDocs});
+ stats = assert.commandWorked(cappedColl.stats());
+ assert.eq(stats.count, initialDocSize);
+ assert.lte(stats.size, maxSize);
+})();
+}());
diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/capped_resize_fcv_check.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/capped_resize_fcv_check.js
new file mode 100644
index 00000000000..61ed177b6fb
--- /dev/null
+++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/capped_resize_fcv_check.js
@@ -0,0 +1,62 @@
+/**
+ * Tests that resizing capped collections fails when downgrading from a FCV version 6.0 or higher to
+ * a version below 6.0.
+ *
+ * @tags: [
+ * requires_replication,
+ * ]
+ */
+(function() {
+"use strict";
+
+const rst = new ReplSetTest({nodes: 2});
+const nodes = rst.startSet();
+rst.initiate();
+
+const maxSize = 25 * 1024 * 1024; // 25 MB.
+const maxDocs = 2;
+const doubleMaxSize = 50 * 1024 * 1024; // 50 MB.
+const doubleMaxDocs = 4;
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const cappedColl = testDB["capped_coll"];
+cappedColl.drop();
+assert.commandWorked(
+ testDB.createCollection(cappedColl.getName(), {capped: true, size: maxSize, max: maxDocs}));
+
+// On version 6.0, we expect the collMod command to succeed.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+assert.commandWorked(testDB.runCommand(
+ {collMod: cappedColl.getName(), cappedSize: doubleMaxSize, cappedMax: doubleMaxDocs}));
+let stats = assert.commandWorked(cappedColl.stats());
+assert.eq(stats.maxSize, doubleMaxSize);
+assert.eq(stats.max, doubleMaxDocs);
+
+// On versions <6.0, we expect the command to fail and the capped collection size limits to remain
+// the same.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastContinuousFCV}));
+assert.commandFailed(
+ testDB.runCommand({collMod: cappedColl.getName(), cappedSize: maxSize, cappedMax: maxDocs}));
+stats = assert.commandWorked(cappedColl.stats());
+assert.eq(stats.maxSize, doubleMaxSize);
+assert.eq(stats.max, doubleMaxDocs);
+
+// Upgrade and resize the capped collection.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: "6.0"}));
+assert.commandWorked(
+ testDB.runCommand({collMod: cappedColl.getName(), cappedSize: maxSize, cappedMax: maxDocs}));
+stats = assert.commandWorked(cappedColl.stats());
+assert.eq(stats.maxSize, maxSize);
+assert.eq(stats.max, maxDocs);
+
+// We expect the resizing command to fail and the size limits to remain the same.
+assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}));
+assert.commandFailed(testDB.runCommand(
+ {collMod: cappedColl.getName(), cappedSize: doubleMaxSize, cappedMax: doubleMaxDocs}));
+stats = assert.commandWorked(cappedColl.stats());
+assert.eq(stats.maxSize, maxSize);
+assert.eq(stats.max, maxDocs);
+
+rst.stopSet();
+})();
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 52bc185ff29..0ffc15e0e60 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -113,6 +113,8 @@ struct ParsedCollModRequest {
boost::optional<ChangeStreamPreAndPostImagesOptions> changeStreamPreAndPostImagesOptions;
int numModifications = 0;
bool dryRun = false;
+ boost::optional<long long> cappedSize;
+ boost::optional<long long> cappedMax;
};
StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
@@ -132,6 +134,25 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
return ex.toStatus();
}
+ if (cmd.getCollModRequest().getCappedSize() || cmd.getCollModRequest().getCappedMax()) {
+ // TODO (SERVER-64042): Remove FCV check once 6.1 is released.
+ if (serverGlobalParams.featureCompatibility.isVersionInitialized() &&
+ serverGlobalParams.featureCompatibility.isLessThan(
+ multiversion::FeatureCompatibilityVersion::kVersion_6_0)) {
+ return Status(ErrorCodes::InvalidOptions,
+ "Cannot change the size limits of a capped collection.");
+ } else if (!coll->isCapped()) {
+ return Status(ErrorCodes::InvalidOptions, "Collection must be capped.");
+ } else if (coll->ns().isOplog()) {
+ return Status(ErrorCodes::InvalidOptions,
+ "Cannot resize the oplog using this command. Use the "
+ "'replSetResizeOplog' command instead.");
+ } else {
+ cmr.cappedSize = cmd.getCollModRequest().getCappedSize();
+ cmr.cappedMax = cmd.getCollModRequest().getCappedMax();
+ }
+ }
+
// TODO (SERVER-62732): Check options directly rather than using a loop.
auto cmdObj = cmd.toBSON(BSONObj());
for (const auto& e : cmdObj) {
@@ -451,6 +472,20 @@ StatusWith<ParsedCollModRequest> parseCollModRequest(OperationContext* opCtx,
// The dry run option should never be included in a collMod oplog entry.
continue;
} else if (fieldName == CollMod::kCollectionUUIDFieldName) {
+ } else if (fieldName == CollMod::kCappedSizeFieldName) {
+ const long long minSize = 4096;
+ auto swCappedSize = CollectionOptions::checkAndAdjustCappedSize(*cmr.cappedSize);
+ if (!swCappedSize.isOK()) {
+ return swCappedSize.getStatus();
+ }
+ cmr.cappedSize =
+ (swCappedSize.getValue() < minSize) ? minSize : swCappedSize.getValue();
+ } else if (fieldName == CollMod::kCappedMaxFieldName) {
+ auto swCappedMaxDocs = CollectionOptions::checkAndAdjustCappedMaxDocs(*cmr.cappedMax);
+ if (!swCappedMaxDocs.isOK()) {
+ return swCappedMaxDocs.getStatus();
+ }
+ cmr.cappedMax = swCappedMaxDocs.getValue();
} else {
if (isTimeseries) {
return Status(ErrorCodes::InvalidOptions,
@@ -776,6 +811,13 @@ Status _collModInternal(OperationContext* opCtx,
}
}
+ if (cmrNew.cappedSize || cmrNew.cappedMax) {
+ // If the current capped collection size exceeds the newly set limits, future document
+ // inserts will prompt document deletion.
+ uassertStatusOK(coll.getWritableCollection(opCtx)->updateCappedSize(
+ opCtx, cmrNew.cappedSize, cmrNew.cappedMax));
+ }
+
boost::optional<IndexCollModInfo> indexCollModInfo;
// Handle collMod operation type appropriately.
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 2ec74f97a8f..aa5bc579c4e 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -573,7 +573,9 @@ public:
virtual void updateClusteredIndexTTLSetting(OperationContext* opCtx,
boost::optional<int64_t> expireAfterSeconds) = 0;
- virtual Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) = 0;
+ virtual Status updateCappedSize(OperationContext* opCtx,
+ boost::optional<long long> newCappedSize,
+ boost::optional<long long> newCappedMax) = 0;
//
// Index
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index 4fd48515b5f..fa9a7d0dff4 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -375,8 +375,7 @@ CollectionImpl::SharedState::SharedState(CollectionImpl* collection,
// clustered capped collections because they only guarantee insertion order when cluster keys
// are inserted in monotonically-increasing order.
_needCappedLock(options.capped && collection->ns().isReplicated() && !options.clusteredIndex),
- _isCapped(options.capped),
- _cappedMaxDocs(options.cappedMaxDocs) {
+ _isCapped(options.capped) {
if (_cappedNotifier) {
_recordStore->setCappedCallback(this);
}
@@ -993,7 +992,8 @@ bool CollectionImpl::_cappedAndNeedDelete(OperationContext* opCtx) const {
return true;
}
- if ((_shared->_cappedMaxDocs != 0) && (numRecords(opCtx) > _shared->_cappedMaxDocs)) {
+ const auto cappedMaxDocs = _shared->_collectionLatest->getCollectionOptions().cappedMaxDocs;
+ if ((cappedMaxDocs != 0) && (numRecords(opCtx) > cappedMaxDocs)) {
return true;
}
@@ -1039,9 +1039,10 @@ void CollectionImpl::_cappedDeleteAsNeeded(OperationContext* opCtx,
const auto cappedMaxSize = _shared->_collectionLatest->getCollectionOptions().cappedSize;
const long long sizeOverCap =
(currentDataSize > cappedMaxSize) ? currentDataSize - cappedMaxSize : 0;
- const long long docsOverCap =
- (_shared->_cappedMaxDocs != 0 && currentNumRecords > _shared->_cappedMaxDocs)
- ? currentNumRecords - _shared->_cappedMaxDocs
+
+ const auto cappedMaxDocs = _shared->_collectionLatest->getCollectionOptions().cappedMaxDocs;
+ const long long docsOverCap = (cappedMaxDocs != 0 && currentNumRecords > cappedMaxDocs)
+ ? currentNumRecords - cappedMaxDocs
: 0;
long long sizeSaved = 0;
@@ -1509,7 +1510,9 @@ void CollectionImpl::updateClusteredIndexTTLSetting(OperationContext* opCtx,
});
}
-Status CollectionImpl::updateCappedSize(OperationContext* opCtx, long long newCappedSize) {
+Status CollectionImpl::updateCappedSize(OperationContext* opCtx,
+ boost::optional<long long> newCappedSize,
+ boost::optional<long long> newCappedMax) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns(), MODE_X));
if (!_shared->_isCapped) {
@@ -1517,15 +1520,20 @@ Status CollectionImpl::updateCappedSize(OperationContext* opCtx, long long newCa
str::stream() << "Cannot update size on a non-capped collection " << ns());
}
- if (ns().isOplog()) {
- Status status = _shared->_recordStore->updateOplogSize(newCappedSize);
+ if (ns().isOplog() && newCappedSize) {
+ Status status = _shared->_recordStore->updateOplogSize(*newCappedSize);
if (!status.isOK()) {
return status;
}
}
_writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) {
- md.options.cappedSize = newCappedSize;
+ if (newCappedSize) {
+ md.options.cappedSize = *newCappedSize;
+ }
+ if (newCappedMax) {
+ md.options.cappedMaxDocs = *newCappedMax;
+ }
});
return Status::OK();
}
@@ -1563,7 +1571,7 @@ bool CollectionImpl::isCapped() const {
}
long long CollectionImpl::getCappedMaxDocs() const {
- return _shared->_cappedMaxDocs;
+ return _metadata->options.cappedMaxDocs;
}
long long CollectionImpl::getCappedMaxSize() const {
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index be5153833d7..5c21e188dc1 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -343,7 +343,9 @@ public:
void updateClusteredIndexTTLSetting(OperationContext* opCtx,
boost::optional<int64_t> expireAfterSeconds) final;
- Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) final;
+ Status updateCappedSize(OperationContext* opCtx,
+ boost::optional<long long> newCappedSize,
+ boost::optional<long long> newCappedMax) final;
//
// Stats
@@ -580,7 +582,6 @@ private:
// Capped information.
const bool _isCapped;
- const long long _cappedMaxDocs;
// For capped deletes performed on collections where '_needCappedLock' is false, the mutex
// below protects '_cappedFirstRecord'. Otherwise, when '_needCappedLock' is true, the
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index f656b232a88..65529dfd820 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -269,7 +269,9 @@ public:
std::abort();
}
- Status updateCappedSize(OperationContext* opCtx, long long newCappedSize) {
+ Status updateCappedSize(OperationContext* opCtx,
+ boost::optional<long long> newCappedSize,
+ boost::optional<long long> newCappedMax) {
std::abort();
}
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 2db236f1ecb..799d20b6590 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -79,6 +79,29 @@ void setEncryptedDefaultEncryptedCollectionNames(const NamespaceString& ns,
} // namespace
+StatusWith<long long> CollectionOptions::checkAndAdjustCappedSize(long long cappedSize) {
+ const long long kGB = 1024 * 1024 * 1024;
+ const long long kPB = 1024 * 1024 * kGB;
+
+ if (cappedSize < 0) {
+ return Status(ErrorCodes::BadValue, "size has to be >= 0");
+ }
+ if (cappedSize > kPB) {
+ return Status(ErrorCodes::BadValue, "size cannot exceed 1 PB");
+ }
+
+ return adjustCappedSize(cappedSize);
+}
+
+StatusWith<long long> CollectionOptions::checkAndAdjustCappedMaxDocs(long long cappedMaxDocs) {
+ if (cappedMaxDocs >= 0x1LL << 31) {
+ return Status(ErrorCodes::BadValue,
+ "max in a capped collection has to be < 2^31 or not set");
+ }
+
+ return adjustCappedMaxDocs(cappedMaxDocs);
+}
+
bool CollectionOptions::isView() const {
return !viewOn.empty();
}
@@ -121,24 +144,21 @@ StatusWith<CollectionOptions> CollectionOptions::parse(const BSONObj& options, P
// Ignoring for backwards compatibility.
continue;
}
- auto cappedSize = e.safeNumberLong();
- if (cappedSize < 0)
- return Status(ErrorCodes::BadValue, "size has to be >= 0");
- const long long kGB = 1024 * 1024 * 1024;
- const long long kPB = 1024 * 1024 * kGB;
- if (cappedSize > kPB)
- return Status(ErrorCodes::BadValue, "size cannot exceed 1 PB");
- collectionOptions.cappedSize = adjustCappedSize(cappedSize);
+ auto swCappedSize = checkAndAdjustCappedSize(e.safeNumberLong());
+ if (!swCappedSize.isOK()) {
+ return swCappedSize.getStatus();
+ }
+ collectionOptions.cappedSize = swCappedSize.getValue();
} else if (fieldName == "max") {
if (!options["capped"].trueValue() || !e.isNumber()) {
// Ignoring for backwards compatibility.
continue;
}
- auto cappedMaxDocs = e.safeNumberLong();
- if (cappedMaxDocs >= 0x1LL << 31)
- return Status(ErrorCodes::BadValue,
- "max in a capped collection has to be < 2^31 or not set");
- collectionOptions.cappedMaxDocs = adjustCappedMaxDocs(cappedMaxDocs);
+ auto swCappedMaxDocs = checkAndAdjustCappedMaxDocs(e.safeNumberLong());
+ if (!swCappedMaxDocs.isOK()) {
+ return swCappedMaxDocs.getStatus();
+ }
+ collectionOptions.cappedMaxDocs = swCappedMaxDocs.getValue();
} else if (fieldName == "$nExtents") {
// Ignoring for backwards compatibility.
continue;
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 641a0510fa5..8930f40ec66 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -83,6 +83,9 @@ struct CollectionOptions {
static CollectionOptions fromCreateCommand(const NamespaceString& nss,
const CreateCommand& cmd);
+ static StatusWith<long long> checkAndAdjustCappedSize(long long cappedSize);
+ static StatusWith<long long> checkAndAdjustCappedMaxDocs(long long cappedMaxDocs);
+
/**
* Serialize to BSON. The 'includeUUID' parameter is used for the listCollections command to do
* special formatting for the uuid. Aside from the UUID, if 'includeFields' is non-empty, only
diff --git a/src/mongo/db/coll_mod.idl b/src/mongo/db/coll_mod.idl
index 5d9422e111f..a79677d9589 100644
--- a/src/mongo/db/coll_mod.idl
+++ b/src/mongo/db/coll_mod.idl
@@ -188,6 +188,21 @@ structs:
type: uuid
optional: true
unstable: true
+ cappedSize:
+ description: "Update the maximum size in bytes for the capped collection."
+ type: safeInt64
+ optional: true
+ validator:
+ gte: 1
+ lte: 1125899906842624 # 1 PB
+ unstable: false
+ cappedMax:
+ description: "Update the maximum number of documents allowed in the capped collection."
+ type: safeInt64
+ optional: true
+ validator:
+ lt: 2147483648 # 2^31
+ unstable: false
commands:
collMod:
diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp
index 840a7ed81c0..48dc95ade4e 100644
--- a/src/mongo/db/commands/resize_oplog.cpp
+++ b/src/mongo/db/commands/resize_oplog.cpp
@@ -97,8 +97,8 @@ public:
if (auto sizeMB = params.getSize()) {
const long long sizeBytes = *sizeMB * 1024 * 1024;
- uassertStatusOK(
- coll.getWritableCollection(opCtx)->updateCappedSize(opCtx, sizeBytes));
+ uassertStatusOK(coll.getWritableCollection(opCtx)->updateCappedSize(
+ opCtx, sizeBytes, /*newCappedMax=*/boost::none));
}
if (auto minRetentionHoursOpt = params.getMinRetentionHours()) {