summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2018-12-18 20:58:30 -0500
committerBenety Goh <benety@mongodb.com>2018-12-18 20:58:30 -0500
commit4f1e7fb94ce21e21b05dd49c4ed46e6c5231bd2a (patch)
tree6860ec74943f823c3dc5c8936aa959e9b93a784f
parent5d51a32d3736f768432e924111e8179e72b42944 (diff)
downloadmongo-4f1e7fb94ce21e21b05dd49c4ed46e6c5231bd2a.tar.gz
SERVER-38684 unshim Collection
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp12
-rw-r--r--src/mongo/db/catalog/collection.cpp75
-rw-r--r--src/mongo/db/catalog/collection.h574
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp153
-rw-r--r--src/mongo/db/catalog/collection_impl.h39
-rw-r--r--src/mongo/db/catalog/collection_mock.h47
-rw-r--r--src/mongo/db/catalog/collection_test.cpp12
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp4
-rw-r--r--src/mongo/db/catalog/database_impl.cpp4
-rw-r--r--src/mongo/db/catalog/index_build_block.cpp1
-rw-r--r--src/mongo/db/catalog/multi_index_block_test.cpp2
-rw-r--r--src/mongo/db/catalog/uuid_catalog_test.cpp28
-rw-r--r--src/mongo/db/exec/count_scan.h1
-rw-r--r--src/mongo/db/exec/requires_all_indices_stage.h1
-rw-r--r--src/mongo/db/exec/requires_index_stage.h1
-rw-r--r--src/mongo/db/index/duplicate_key_tracker.h2
-rw-r--r--src/mongo/db/index/index_access_method.cpp1
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_test.cpp28
-rw-r--r--src/mongo/db/pipeline/process_interface_standalone.cpp2
-rw-r--r--src/mongo/db/query/query_request_test.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/rollback_impl_test.cpp6
-rw-r--r--src/mongo/db/s/shardsvr_shard_collection.cpp1
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp1
24 files changed, 307 insertions, 692 deletions
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 499e442de4b..a874c594874 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -203,15 +203,15 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
cmr.collValidator = e;
} else if (fieldName == "validationLevel" && !isView) {
- auto statusW = coll->parseValidationLevel(e.String());
- if (!statusW.isOK())
- return statusW.getStatus();
+ auto status = coll->parseValidationLevel(e.String());
+ if (!status.isOK())
+ return status;
cmr.collValidationLevel = e.String();
} else if (fieldName == "validationAction" && !isView) {
- auto statusW = coll->parseValidationAction(e.String());
- if (!statusW.isOK())
- return statusW.getStatus();
+ auto status = coll->parseValidationAction(e.String());
+ if (!status.isOK())
+ return status;
cmr.collValidationAction = e.String();
} else if (fieldName == "pipeline") {
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 85624892492..c9458035631 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -34,48 +34,9 @@
#include "mongo/db/catalog/collection.h"
-#include "mongo/base/counter.h"
-#include "mongo/base/owned_pointer_map.h"
-#include "mongo/bson/ordering.h"
-#include "mongo/bson/simple_bsonelement_comparator.h"
-#include "mongo/bson/simple_bsonobj_comparator.h"
-#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
-#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/catalog/document_validation.h"
-#include "mongo/db/clientcursor.h"
-#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/concurrency/d_concurrency.h"
-#include "mongo/db/curop.h"
-#include "mongo/db/index/index_access_method.h"
-#include "mongo/db/keypattern.h"
-#include "mongo/db/matcher/expression_parser.h"
-#include "mongo/db/op_observer.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/ops/update_request.h"
-#include "mongo/db/query/collation/collator_factory_interface.h"
-#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/key_string.h"
-#include "mongo/db/storage/record_store.h"
-#include "mongo/db/update/update_driver.h"
-
-#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
-#include "mongo/rpc/object_check.h"
-#include "mongo/util/fail_point.h"
-#include "mongo/util/log.h"
+#include <sstream>
namespace mongo {
-// Emit the vtable in this TU
-Collection::Impl::~Impl() = default;
-
-MONGO_DEFINE_SHIM(Collection::makeImpl);
-
-MONGO_DEFINE_SHIM(Collection::parseValidationLevel);
-
-MONGO_DEFINE_SHIM(Collection::parseValidationAction);
-
-void Collection::TUHook::hook() noexcept {}
std::string CompactOptions::toString() const {
std::stringstream ss;
@@ -100,8 +61,6 @@ std::string CompactOptions::toString() const {
// CappedInsertNotifier
//
-CappedInsertNotifier::CappedInsertNotifier() : _version(0), _dead(false) {}
-
void CappedInsertNotifier::notifyAll() {
stdx::lock_guard<stdx::mutex> lk(_mutex);
++_version;
@@ -130,4 +89,36 @@ bool CappedInsertNotifier::isDead() {
// ----
+// static
+Status Collection::parseValidationLevel(StringData newLevel) {
+ if (newLevel == "") {
+ // default
+ return Status::OK();
+ } else if (newLevel == "off") {
+ return Status::OK();
+ } else if (newLevel == "moderate") {
+ return Status::OK();
+ } else if (newLevel == "strict") {
+ return Status::OK();
+ } else {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "invalid validation level: " << newLevel);
+ }
+}
+
+// static
+Status Collection::parseValidationAction(StringData newAction) {
+ if (newAction == "") {
+ // default
+ return Status::OK();
+ } else if (newAction == "warn") {
+ return Status::OK();
+ } else if (newAction == "error") {
+ return Status::OK();
+ } else {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "invalid validation action: " << newAction);
+ }
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 13f887d8b35..29c23c55c41 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -34,19 +34,15 @@
#include <memory>
#include <string>
-#include "mongo/base/shim.h"
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/mutable/damage_vector.h"
#include "mongo/bson/timestamp.h"
-#include "mongo/db/catalog/coll_mod.h"
#include "mongo/db/catalog/collection_info_cache.h"
#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/cursor_manager.h"
-#include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/logical_session_id.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collation/collator_interface.h"
@@ -61,10 +57,12 @@
#include "mongo/stdx/mutex.h"
namespace mongo {
+class CappedCallback;
class CollectionCatalogEntry;
class DatabaseCatalogEntry;
class ExtentManager;
class IndexCatalog;
+class IndexCatalogEntry;
class IndexDescriptor;
class DatabaseImpl;
class MatchExpression;
@@ -106,8 +104,6 @@ struct CollectionUpdateArgs {
*/
class CappedInsertNotifier {
public:
- CappedInsertNotifier();
-
/**
* Wakes up all threads waiting.
*/
@@ -149,20 +145,14 @@ private:
//
// The condition which '_cappedNewDataNotifier' is being notified of is an increment of this
// counter. Access to this counter is synchronized with '_mutex'.
- uint64_t _version;
+ uint64_t _version = 0;
// True once the notifier is dead.
- bool _dead;
+ bool _dead = false;
};
-/**
- * this is NOT safe through a yield right now.
- * not sure if it will be, or what yet.
- */
-class Collection final : CappedCallback {
+class Collection {
public:
- enum ValidationAction { WARN, ERROR_V };
- enum ValidationLevel { OFF, MODERATE, STRICT_V };
enum class StoreDeletedDoc { Off, On };
/**
@@ -178,222 +168,18 @@ public:
*/
using OnRecordInsertedFn = stdx::function<Status(const RecordId& loc)>;
- class Impl : virtual CappedCallback {
- public:
- using ScanDirection = Collection::ScanDirection;
- using OnRecordInsertedFn = Collection::OnRecordInsertedFn;
-
- virtual ~Impl() = 0;
-
- virtual void init(OperationContext* opCtx) = 0;
-
- private:
- friend Collection;
- virtual DatabaseCatalogEntry* dbce() const = 0;
-
- virtual CollectionCatalogEntry* details() const = 0;
-
- virtual Status aboutToDeleteCapped(OperationContext* opCtx,
- const RecordId& loc,
- RecordData data) = 0;
-
- public:
- virtual bool ok() const = 0;
-
- virtual CollectionCatalogEntry* getCatalogEntry() = 0;
- virtual const CollectionCatalogEntry* getCatalogEntry() const = 0;
-
- virtual CollectionInfoCache* infoCache() = 0;
- virtual const CollectionInfoCache* infoCache() const = 0;
-
- virtual const NamespaceString& ns() const = 0;
- virtual void setNs(NamespaceString) = 0;
-
- virtual OptionalCollectionUUID uuid() const = 0;
-
- virtual const IndexCatalog* getIndexCatalog() const = 0;
- virtual IndexCatalog* getIndexCatalog() = 0;
-
- virtual const RecordStore* getRecordStore() const = 0;
- virtual RecordStore* getRecordStore() = 0;
-
- virtual CursorManager* getCursorManager() const = 0;
-
- virtual bool requiresIdIndex() const = 0;
-
- virtual Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const = 0;
-
- virtual bool findDoc(OperationContext* opCtx,
- const RecordId& loc,
- Snapshotted<BSONObj>* out) const = 0;
-
- virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
- bool forward) const = 0;
-
- virtual void deleteDocument(OperationContext* opCtx,
- StmtId stmtId,
- const RecordId& loc,
- OpDebug* opDebug,
- bool fromMigrate,
- bool noWarn,
- StoreDeletedDoc storeDeletedDoc) = 0;
-
- virtual Status insertDocuments(OperationContext* opCtx,
- std::vector<InsertStatement>::const_iterator begin,
- std::vector<InsertStatement>::const_iterator end,
- OpDebug* opDebug,
- bool fromMigrate) = 0;
-
- virtual Status insertDocument(OperationContext* opCtx,
- const InsertStatement& doc,
- OpDebug* opDebug,
- bool fromMigrate) = 0;
-
- virtual Status insertDocumentsForOplog(OperationContext* opCtx,
- const DocWriter* const* docs,
- Timestamp* timestamps,
- size_t nDocs) = 0;
-
- virtual Status insertDocumentForBulkLoader(OperationContext* opCtx,
- const BSONObj& doc,
- const OnRecordInsertedFn& onRecordInserted) = 0;
-
- virtual RecordId updateDocument(OperationContext* opCtx,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool indexesAffected,
- OpDebug* opDebug,
- CollectionUpdateArgs* args) = 0;
-
- virtual bool updateWithDamagesSupported() const = 0;
-
- virtual StatusWith<RecordData> updateDocumentWithDamages(
- OperationContext* opCtx,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages,
- CollectionUpdateArgs* args) = 0;
-
- virtual Status truncate(OperationContext* opCtx) = 0;
-
- virtual Status validate(OperationContext* opCtx,
- ValidateCmdLevel level,
- bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
- ValidateResults* results,
- BSONObjBuilder* output) = 0;
-
- virtual Status touch(OperationContext* opCtx,
- bool touchData,
- bool touchIndexes,
- BSONObjBuilder* output) const = 0;
-
- virtual void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) = 0;
-
- virtual StatusWithMatchExpression parseValidator(
- OperationContext* opCtx,
- const BSONObj& validator,
- MatchExpressionParser::AllowedFeatureSet allowedFeatures,
- boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion = boost::none) const = 0;
-
- virtual Status setValidator(OperationContext* opCtx, BSONObj validator) = 0;
+ Collection() = default;
+ virtual ~Collection() = default;
- virtual Status setValidationLevel(OperationContext* opCtx, StringData newLevel) = 0;
- virtual Status setValidationAction(OperationContext* opCtx, StringData newAction) = 0;
+ virtual bool ok() const = 0;
- virtual StringData getValidationLevel() const = 0;
- virtual StringData getValidationAction() const = 0;
+ virtual CollectionCatalogEntry* getCatalogEntry() = 0;
+ virtual const CollectionCatalogEntry* getCatalogEntry() const = 0;
- virtual Status updateValidator(OperationContext* opCtx,
- BSONObj newValidator,
- StringData newLevel,
- StringData newAction) = 0;
+ virtual CollectionInfoCache* infoCache() = 0;
+ virtual const CollectionInfoCache* infoCache() const = 0;
- virtual bool isCapped() const = 0;
-
- virtual std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const = 0;
-
- virtual uint64_t numRecords(OperationContext* opCtx) const = 0;
-
- virtual uint64_t dataSize(OperationContext* opCtx) const = 0;
-
- virtual uint64_t getIndexSize(OperationContext* opCtx,
- BSONObjBuilder* details,
- int scale) = 0;
-
- virtual boost::optional<Timestamp> getMinimumVisibleSnapshot() = 0;
-
- virtual void setMinimumVisibleSnapshot(Timestamp name) = 0;
-
- virtual bool haveCappedWaiters() = 0;
-
- virtual void notifyCappedWaitersIfNeeded() = 0;
-
- virtual const CollatorInterface* getDefaultCollator() const = 0;
-
- virtual std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
- OperationContext* opCtx,
- PlanExecutor::YieldPolicy yieldPolicy,
- ScanDirection scanDirection) = 0;
-
- virtual void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) = 0;
-
- virtual void establishOplogCollectionForLogging(OperationContext* opCtx) = 0;
- };
-
-public:
- static MONGO_DECLARE_SHIM((Collection * _this,
- OperationContext* opCtx,
- StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* details,
- RecordStore* recordStore,
- DatabaseCatalogEntry* dbce,
- PrivateTo<Collection>)
- ->std::unique_ptr<Impl>) makeImpl;
-
- explicit inline Collection(OperationContext* const opCtx,
- const StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* const details, // does not own
- RecordStore* const recordStore, // does not own
- DatabaseCatalogEntry* const dbce) // does not own
- : _pimpl(makeImpl(
- this, opCtx, fullNS, uuid, details, recordStore, dbce, PrivateCall<Collection>{})) {
- this->_impl().init(opCtx);
- }
-
- // Use this constructor only for testing/mocks
- explicit inline Collection(std::unique_ptr<Impl> mock) : _pimpl(std::move(mock)) {}
-
- inline ~Collection() = default;
-
- inline bool ok() const {
- return this->_impl().ok();
- }
-
- inline CollectionCatalogEntry* getCatalogEntry() {
- return this->_impl().getCatalogEntry();
- }
-
- inline const CollectionCatalogEntry* getCatalogEntry() const {
- return this->_impl().getCatalogEntry();
- }
-
- inline CollectionInfoCache* infoCache() {
- return this->_impl().infoCache();
- }
-
- inline const CollectionInfoCache* infoCache() const {
- return this->_impl().infoCache();
- }
-
- inline const NamespaceString& ns() const {
- return this->_impl().ns();
- }
+ virtual const NamespaceString& ns() const = 0;
/**
* Sets a new namespace on this Collection, in the case that the Collection is being renamed.
@@ -402,55 +188,32 @@ public:
* UUIDCatalog can perform UUID to namespace lookup without holding a Collection lock. See
* UUIDCatalog::setCollectionNamespace().
*/
- inline void setNs(NamespaceString nss) {
- this->_impl().setNs(std::move(nss));
- }
+ virtual void setNs(NamespaceString nss) = 0;
- inline OptionalCollectionUUID uuid() const {
- return this->_impl().uuid();
- }
+ virtual OptionalCollectionUUID uuid() const = 0;
- inline const IndexCatalog* getIndexCatalog() const {
- return this->_impl().getIndexCatalog();
- }
- inline IndexCatalog* getIndexCatalog() {
- return this->_impl().getIndexCatalog();
- }
+ virtual const IndexCatalog* getIndexCatalog() const = 0;
+ virtual IndexCatalog* getIndexCatalog() = 0;
- inline const RecordStore* getRecordStore() const {
- return this->_impl().getRecordStore();
- }
- inline RecordStore* getRecordStore() {
- return this->_impl().getRecordStore();
- }
+ virtual const RecordStore* getRecordStore() const = 0;
+ virtual RecordStore* getRecordStore() = 0;
- inline CursorManager* getCursorManager() const {
- return this->_impl().getCursorManager();
- }
+ virtual CursorManager* getCursorManager() const = 0;
- inline bool requiresIdIndex() const {
- return this->_impl().requiresIdIndex();
- }
+ virtual bool requiresIdIndex() const = 0;
- inline Snapshotted<BSONObj> docFor(OperationContext* const opCtx, const RecordId& loc) const {
- return Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(),
- this->getRecordStore()->dataFor(opCtx, loc).releaseToBson());
- }
+ virtual Snapshotted<BSONObj> docFor(OperationContext* const opCtx, RecordId loc) const = 0;
/**
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- inline bool findDoc(OperationContext* const opCtx,
- const RecordId& loc,
- Snapshotted<BSONObj>* const out) const {
- return this->_impl().findDoc(opCtx, loc, out);
- }
+ virtual bool findDoc(OperationContext* const opCtx,
+ RecordId loc,
+ Snapshotted<BSONObj>* const out) const = 0;
- inline std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* const opCtx,
- const bool forward = true) const {
- return this->_impl().getCursor(opCtx, forward);
- }
+ virtual std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* const opCtx,
+ const bool forward = true) const = 0;
/**
* Deletes the document with the given RecordId from the collection.
@@ -463,16 +226,13 @@ public:
* 'noWarn' if unindexing the record causes an error, if noWarn is true the error
* will not be logged.
*/
- inline void deleteDocument(OperationContext* const opCtx,
- StmtId stmtId,
- const RecordId& loc,
- OpDebug* const opDebug,
- const bool fromMigrate = false,
- const bool noWarn = false,
- StoreDeletedDoc storeDeletedDoc = StoreDeletedDoc::Off) {
- return this->_impl().deleteDocument(
- opCtx, stmtId, loc, opDebug, fromMigrate, noWarn, storeDeletedDoc);
- }
+ virtual void deleteDocument(OperationContext* const opCtx,
+ StmtId stmtId,
+ RecordId loc,
+ OpDebug* const opDebug,
+ const bool fromMigrate = false,
+ const bool noWarn = false,
+ StoreDeletedDoc storeDeletedDoc = StoreDeletedDoc::Off) = 0;
/*
* Inserts all documents inside one WUOW.
@@ -481,13 +241,11 @@ public:
*
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
*/
- inline Status insertDocuments(OperationContext* const opCtx,
- const std::vector<InsertStatement>::const_iterator begin,
- const std::vector<InsertStatement>::const_iterator end,
- OpDebug* const opDebug,
- const bool fromMigrate = false) {
- return this->_impl().insertDocuments(opCtx, begin, end, opDebug, fromMigrate);
- }
+ virtual Status insertDocuments(OperationContext* const opCtx,
+ const std::vector<InsertStatement>::const_iterator begin,
+ const std::vector<InsertStatement>::const_iterator end,
+ OpDebug* const opDebug,
+ const bool fromMigrate = false) = 0;
/**
* this does NOT modify the doc before inserting
@@ -495,23 +253,19 @@ public:
*
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
*/
- inline Status insertDocument(OperationContext* const opCtx,
- const InsertStatement& doc,
- OpDebug* const opDebug,
- const bool fromMigrate = false) {
- return this->_impl().insertDocument(opCtx, doc, opDebug, fromMigrate);
- }
+ virtual Status insertDocument(OperationContext* const opCtx,
+ const InsertStatement& doc,
+ OpDebug* const opDebug,
+ const bool fromMigrate = false) = 0;
/**
* Callers must ensure no document validation is performed for this collection when calling
* this method.
*/
- inline Status insertDocumentsForOplog(OperationContext* const opCtx,
- const DocWriter* const* const docs,
- Timestamp* timestamps,
- const size_t nDocs) {
- return this->_impl().insertDocumentsForOplog(opCtx, docs, timestamps, nDocs);
- }
+ virtual Status insertDocumentsForOplog(OperationContext* const opCtx,
+ const DocWriter* const* const docs,
+ Timestamp* timestamps,
+ const size_t nDocs) = 0;
/**
* Inserts a document into the record store for a bulk loader that manages the index building
@@ -520,11 +274,9 @@ public:
*
* NOTE: It is up to caller to commit the indexes.
*/
- inline Status insertDocumentForBulkLoader(OperationContext* const opCtx,
- const BSONObj& doc,
- const OnRecordInsertedFn& onRecordInserted) {
- return this->_impl().insertDocumentForBulkLoader(opCtx, doc, onRecordInserted);
- }
+ virtual Status insertDocumentForBulkLoader(OperationContext* const opCtx,
+ const BSONObj& doc,
+ const OnRecordInsertedFn& onRecordInserted) = 0;
/**
* Updates the document @ oldLocation with newDoc.
@@ -535,20 +287,15 @@ public:
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- inline RecordId updateDocument(OperationContext* const opCtx,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- const bool indexesAffected,
- OpDebug* const opDebug,
- CollectionUpdateArgs* const args) {
- return this->_impl().updateDocument(
- opCtx, oldLocation, oldDoc, newDoc, indexesAffected, opDebug, args);
- }
+ virtual RecordId updateDocument(OperationContext* const opCtx,
+ RecordId oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ const bool indexesAffected,
+ OpDebug* const opDebug,
+ CollectionUpdateArgs* const args) = 0;
- inline bool updateWithDamagesSupported() const {
- return this->_impl().updateWithDamagesSupported();
- }
+ virtual bool updateWithDamagesSupported() const = 0;
/**
* Not allowed to modify indexes.
@@ -557,16 +304,13 @@ public:
* success.
* @return the contents of the updated record.
*/
- inline StatusWith<RecordData> updateDocumentWithDamages(
+ virtual StatusWith<RecordData> updateDocumentWithDamages(
OperationContext* const opCtx,
- const RecordId& loc,
+ RecordId loc,
const Snapshotted<RecordData>& oldRec,
const char* const damageSource,
const mutablebson::DamageVector& damages,
- CollectionUpdateArgs* const args) {
- return this->_impl().updateDocumentWithDamages(
- opCtx, loc, oldRec, damageSource, damages, args);
- }
+ CollectionUpdateArgs* const args) = 0;
// -----------
@@ -575,33 +319,27 @@ public:
* indexes before and after will be the same
* as will other characteristics.
*/
- inline Status truncate(OperationContext* const opCtx) {
- return this->_impl().truncate(opCtx);
- }
+ virtual Status truncate(OperationContext* const opCtx) = 0;
/**
* @return OK if the validate run successfully
* OK will be returned even if corruption is found
* deatils will be in result.
*/
- inline Status validate(OperationContext* const opCtx,
- const ValidateCmdLevel level,
- bool background,
- std::unique_ptr<Lock::CollectionLock> collLk,
- ValidateResults* const results,
- BSONObjBuilder* const output) {
- return this->_impl().validate(opCtx, level, background, std::move(collLk), results, output);
- }
+ virtual Status validate(OperationContext* const opCtx,
+ const ValidateCmdLevel level,
+ bool background,
+ std::unique_ptr<Lock::CollectionLock> collLk,
+ ValidateResults* const results,
+ BSONObjBuilder* const output) = 0;
/**
* forces data into cache.
*/
- inline Status touch(OperationContext* const opCtx,
- const bool touchData,
- const bool touchIndexes,
- BSONObjBuilder* const output) const {
- return this->_impl().touch(opCtx, touchData, touchIndexes, output);
- }
+ virtual Status touch(OperationContext* const opCtx,
+ const bool touchData,
+ const bool touchIndexes,
+ BSONObjBuilder* const output) const = 0;
/**
* Truncate documents newer than the document at 'end' from the capped
@@ -609,27 +347,22 @@ public:
* function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true
*/
- inline void cappedTruncateAfter(OperationContext* const opCtx,
- const RecordId end,
- const bool inclusive) {
- return this->_impl().cappedTruncateAfter(opCtx, end, inclusive);
- }
+ virtual void cappedTruncateAfter(OperationContext* const opCtx,
+ RecordId end,
+ const bool inclusive) = 0;
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
- inline StatusWithMatchExpression parseValidator(
+ virtual StatusWithMatchExpression parseValidator(
OperationContext* opCtx,
const BSONObj& validator,
MatchExpressionParser::AllowedFeatureSet allowedFeatures,
boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
- maxFeatureCompatibilityVersion) const {
- return this->_impl().parseValidator(
- opCtx, validator, allowedFeatures, maxFeatureCompatibilityVersion);
- }
+ maxFeatureCompatibilityVersion) const = 0;
- static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationLevel>) parseValidationLevel;
- static MONGO_DECLARE_SHIM((StringData)->StatusWith<ValidationAction>) parseValidationAction;
+ static Status parseValidationLevel(StringData level);
+ static Status parseValidationAction(StringData action);
/**
* Sets the validator for this collection.
@@ -637,30 +370,19 @@ public:
* An empty validator removes all validation.
* Requires an exclusive lock on the collection.
*/
- inline Status setValidator(OperationContext* const opCtx, const BSONObj validator) {
- return this->_impl().setValidator(opCtx, validator);
- }
+ virtual Status setValidator(OperationContext* const opCtx, const BSONObj validator) = 0;
- inline Status setValidationLevel(OperationContext* const opCtx, const StringData newLevel) {
- return this->_impl().setValidationLevel(opCtx, newLevel);
- }
- inline Status setValidationAction(OperationContext* const opCtx, const StringData newAction) {
- return this->_impl().setValidationAction(opCtx, newAction);
- }
+ virtual Status setValidationLevel(OperationContext* const opCtx, const StringData newLevel) = 0;
+ virtual Status setValidationAction(OperationContext* const opCtx,
+ const StringData newAction) = 0;
- inline StringData getValidationLevel() const {
- return this->_impl().getValidationLevel();
- }
- inline StringData getValidationAction() const {
- return this->_impl().getValidationAction();
- }
+ virtual StringData getValidationLevel() const = 0;
+ virtual StringData getValidationAction() const = 0;
- inline Status updateValidator(OperationContext* opCtx,
- BSONObj newValidator,
- StringData newLevel,
- StringData newAction) {
- return this->_impl().updateValidator(opCtx, newValidator, newLevel, newAction);
- }
+ virtual Status updateValidator(OperationContext* opCtx,
+ BSONObj newValidator,
+ StringData newLevel,
+ StringData newAction) = 0;
// -----------
@@ -668,9 +390,13 @@ public:
// Stats
//
- inline bool isCapped() const {
- return this->_impl().isCapped();
- }
+ virtual bool isCapped() const = 0;
+
+ /**
+ * Returns a pointer to a capped callback object.
+ * The storage engine interacts with capped collections through a CappedCallback interface.
+ */
+ virtual CappedCallback* getCappedCallback() = 0;
/**
* Get a pointer to a capped insert notifier object. The caller can wait on this object
@@ -678,76 +404,41 @@ public:
*
* It is invalid to call this method unless the collection is capped.
*/
- inline std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const {
- return this->_impl().getCappedInsertNotifier();
- }
+ virtual std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const = 0;
- inline uint64_t numRecords(OperationContext* const opCtx) const {
- return this->_impl().numRecords(opCtx);
- }
+ virtual uint64_t numRecords(OperationContext* const opCtx) const = 0;
- inline uint64_t dataSize(OperationContext* const opCtx) const {
- return this->_impl().dataSize(opCtx);
- }
+ virtual uint64_t dataSize(OperationContext* const opCtx) const = 0;
- inline int averageObjectSize(OperationContext* const opCtx) const {
- uint64_t n = this->numRecords(opCtx);
+ virtual int averageObjectSize(OperationContext* const opCtx) const = 0;
- if (n == 0)
- return 5;
- return static_cast<int>(this->dataSize(opCtx) / n);
- }
-
- inline uint64_t getIndexSize(OperationContext* const opCtx,
- BSONObjBuilder* const details = nullptr,
- const int scale = 1) {
- return this->_impl().getIndexSize(opCtx, details, scale);
- }
+ virtual uint64_t getIndexSize(OperationContext* const opCtx,
+ BSONObjBuilder* const details = nullptr,
+ const int scale = 1) = 0;
/**
* If return value is not boost::none, reads with majority read concern using an older snapshot
* must error.
*/
- inline boost::optional<Timestamp> getMinimumVisibleSnapshot() {
- return this->_impl().getMinimumVisibleSnapshot();
- }
+ virtual boost::optional<Timestamp> getMinimumVisibleSnapshot() = 0;
- inline void setMinimumVisibleSnapshot(const Timestamp name) {
- return this->_impl().setMinimumVisibleSnapshot(name);
- }
-
- inline bool haveCappedWaiters() {
- return this->_impl().haveCappedWaiters();
- }
-
- /**
- * Notify (capped collection) waiters of data changes, like an insert.
- */
- inline void notifyCappedWaitersIfNeeded() {
- return this->_impl().notifyCappedWaitersIfNeeded();
- }
+ virtual void setMinimumVisibleSnapshot(const Timestamp name) = 0;
/**
* Get a pointer to the collection's default collator. The pointer must not be used after this
* Collection is destroyed.
*/
- inline const CollatorInterface* getDefaultCollator() const {
- return this->_impl().getDefaultCollator();
- }
+ virtual const CollatorInterface* getDefaultCollator() const = 0;
/**
* Returns a plan executor for a collection scan over this collection.
*/
- inline std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
+ virtual std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> makePlanExecutor(
OperationContext* opCtx,
PlanExecutor::YieldPolicy yieldPolicy,
- ScanDirection scanDirection) {
- return this->_impl().makePlanExecutor(opCtx, yieldPolicy, scanDirection);
- }
+ ScanDirection scanDirection) = 0;
- inline void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) {
- return this->_impl().indexBuildSuccess(opCtx, index);
- }
+ virtual void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) = 0;
/**
* Use this Collection as the new cached pointer to the local oplog.
@@ -755,50 +446,9 @@ public:
* Called by catalog::openCatalog() to re-establish the oplog collection pointer while holding
* onto the global lock in exclusive mode.
*/
- inline void establishOplogCollectionForLogging(OperationContext* opCtx) {
- this->_impl().establishOplogCollectionForLogging(opCtx);
- }
-
-private:
- inline DatabaseCatalogEntry* dbce() const {
- return this->_impl().dbce();
- }
-
- inline CollectionCatalogEntry* details() const {
- return this->_impl().details();
- }
-
- inline Status aboutToDeleteCapped(OperationContext* const opCtx,
- const RecordId& loc,
- const RecordData data) final {
- return this->_impl().aboutToDeleteCapped(opCtx, loc, data);
- }
-
- // This structure exists to give us a customization point to decide how to force users of this
- // class to depend upon the corresponding `collection.cpp` Translation Unit (TU). All public
- // forwarding functions call `_impl(), and `_impl` creates an instance of this structure.
- struct TUHook {
- static void hook() noexcept;
+ virtual void establishOplogCollectionForLogging(OperationContext* opCtx) = 0;
- explicit inline TUHook() noexcept {
- if (kDebugBuild)
- this->hook();
- }
- };
-
- inline const Impl& _impl() const {
- TUHook{};
- return *this->_pimpl;
- }
-
- inline Impl& _impl() {
- TUHook{};
- return *this->_pimpl;
- }
-
- std::unique_ptr<Impl> _pimpl;
-
- friend class DatabaseImpl;
- friend class IndexCatalogImpl;
+ virtual DatabaseCatalogEntry* dbce() const = 0;
};
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index ffe7805e24a..377fb669bd0 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -79,29 +79,6 @@
namespace mongo {
-MONGO_REGISTER_SHIM(Collection::makeImpl)
-(Collection* const _this,
- OperationContext* const opCtx,
- const StringData fullNS,
- OptionalCollectionUUID uuid,
- CollectionCatalogEntry* const details,
- RecordStore* const recordStore,
- DatabaseCatalogEntry* const dbce,
- PrivateTo<Collection>)
- ->std::unique_ptr<Collection::Impl> {
- return std::make_unique<CollectionImpl>(_this, opCtx, fullNS, uuid, details, recordStore, dbce);
-}
-
-MONGO_REGISTER_SHIM(Collection::parseValidationLevel)
-(const StringData data)->StatusWith<Collection::ValidationLevel> {
- return CollectionImpl::parseValidationLevel(data);
-}
-
-MONGO_REGISTER_SHIM(Collection::parseValidationAction)
-(const StringData data)->StatusWith<Collection::ValidationAction> {
- return CollectionImpl::parseValidationAction(data);
-}
-
namespace {
// This fail point injects insertion failures for all collections unless a collection name is
// provided in the optional data object during configuration:
@@ -167,6 +144,45 @@ std::unique_ptr<CollatorInterface> parseCollation(OperationContext* opCtx,
return std::move(collator.getValue());
}
+
+StatusWith<CollectionImpl::ValidationLevel> _parseValidationLevel(StringData newLevel) {
+ auto status = Collection::parseValidationLevel(newLevel);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (newLevel == "") {
+ // default
+ return CollectionImpl::ValidationLevel::STRICT_V;
+ } else if (newLevel == "off") {
+ return CollectionImpl::ValidationLevel::OFF;
+ } else if (newLevel == "moderate") {
+ return CollectionImpl::ValidationLevel::MODERATE;
+ } else if (newLevel == "strict") {
+ return CollectionImpl::ValidationLevel::STRICT_V;
+ }
+
+ MONGO_UNREACHABLE;
+}
+
+StatusWith<CollectionImpl::ValidationAction> _parseValidationAction(StringData newAction) {
+ auto status = Collection::parseValidationAction(newAction);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ if (newAction == "") {
+ // default
+ return CollectionImpl::ValidationAction::ERROR_V;
+ } else if (newAction == "warn") {
+ return CollectionImpl::ValidationAction::WARN;
+ } else if (newAction == "error") {
+ return CollectionImpl::ValidationAction::ERROR_V;
+ }
+
+ MONGO_UNREACHABLE;
+}
+
} // namespace
using std::endl;
@@ -176,37 +192,34 @@ using std::vector;
using logger::LogComponent;
-CollectionImpl::CollectionImpl(Collection* _this_init,
- OperationContext* opCtx,
+CollectionImpl::CollectionImpl(OperationContext* opCtx,
StringData fullNS,
OptionalCollectionUUID uuid,
CollectionCatalogEntry* details,
RecordStore* recordStore,
DatabaseCatalogEntry* dbce)
- : _ns(fullNS),
+ : _magic(kMagicNumber),
+ _ns(fullNS),
_uuid(uuid),
_details(details),
_recordStore(recordStore),
_dbce(dbce),
_needCappedLock(supportsDocLocking() && _recordStore->isCapped() && _ns.db() != "local"),
- _infoCache(std::make_unique<CollectionInfoCacheImpl>(_this_init, _ns)),
- _indexCatalog(std::make_unique<IndexCatalogImpl>(_this_init,
- getCatalogEntry()->getMaxAllowedIndexes())),
+ _infoCache(std::make_unique<CollectionInfoCacheImpl>(this, _ns)),
+ _indexCatalog(
+ std::make_unique<IndexCatalogImpl>(this, getCatalogEntry()->getMaxAllowedIndexes())),
_collator(parseCollation(opCtx, _ns, _details->getCollectionOptions(opCtx).collation)),
_validatorDoc(_details->getCollectionOptions(opCtx).validator.getOwned()),
_validator(uassertStatusOK(
parseValidator(opCtx, _validatorDoc, MatchExpressionParser::kAllowAllSpecialFeatures))),
_validationAction(uassertStatusOK(
- parseValidationAction(_details->getCollectionOptions(opCtx).validationAction))),
+ _parseValidationAction(_details->getCollectionOptions(opCtx).validationAction))),
_validationLevel(uassertStatusOK(
- parseValidationLevel(_details->getCollectionOptions(opCtx).validationLevel))),
+ _parseValidationLevel(_details->getCollectionOptions(opCtx).validationLevel))),
_cursorManager(std::make_unique<CursorManager>(_ns)),
_cappedNotifier(_recordStore->isCapped() ? stdx::make_unique<CappedInsertNotifier>()
- : nullptr),
- _this(_this_init) {}
+ : nullptr) {
-void CollectionImpl::init(OperationContext* opCtx) {
- _magic = kMagicNumber;
_indexCatalog->init(opCtx).transitional_ignore();
if (isCapped())
_recordStore->setCappedCallback(this);
@@ -224,7 +237,7 @@ CollectionImpl::~CollectionImpl() {
if (_uuid) {
if (auto opCtx = cc().getOperationContext()) {
auto& uuidCatalog = UUIDCatalog::get(opCtx);
- invariant(uuidCatalog.lookupCollectionByUUID(_uuid.get()) != _this);
+ invariant(uuidCatalog.lookupCollectionByUUID(_uuid.get()) != this);
auto& cache = NamespaceUUIDCache::get(opCtx);
// TODO(geert): cache.verifyNotCached(ns(), uuid().get());
cache.evictNamespace(ns());
@@ -260,7 +273,7 @@ std::unique_ptr<SeekableRecordCursor> CollectionImpl::getCursor(OperationContext
bool CollectionImpl::findDoc(OperationContext* opCtx,
- const RecordId& loc,
+ RecordId loc,
Snapshotted<BSONObj>* out) const {
dassert(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
@@ -567,7 +580,7 @@ Status CollectionImpl::aboutToDeleteCapped(OperationContext* opCtx,
void CollectionImpl::deleteDocument(OperationContext* opCtx,
StmtId stmtId,
- const RecordId& loc,
+ RecordId loc,
OpDebug* opDebug,
bool fromMigrate,
bool noWarn,
@@ -602,7 +615,7 @@ Counter64 moveCounter;
ServerStatusMetricField<Counter64> moveCounterDisplay("record.moves", &moveCounter);
RecordId CollectionImpl::updateDocument(OperationContext* opCtx,
- const RecordId& oldLocation,
+ RecordId oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool indexesAffected,
@@ -693,7 +706,7 @@ bool CollectionImpl::updateWithDamagesSupported() const {
StatusWith<RecordData> CollectionImpl::updateDocumentWithDamages(
OperationContext* opCtx,
- const RecordId& loc,
+ RecordId loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages,
@@ -719,6 +732,10 @@ bool CollectionImpl::isCapped() const {
return _cappedNotifier.get();
}
+CappedCallback* CollectionImpl::getCappedCallback() {
+ return this;
+}
+
std::shared_ptr<CappedInsertNotifier> CollectionImpl::getCappedInsertNotifier() const {
invariant(isCapped());
return _cappedNotifier;
@@ -836,36 +853,6 @@ Status CollectionImpl::setValidator(OperationContext* opCtx, BSONObj validatorDo
return Status::OK();
}
-auto CollectionImpl::parseValidationLevel(StringData newLevel) -> StatusWith<ValidationLevel> {
- if (newLevel == "") {
- // default
- return ValidationLevel::STRICT_V;
- } else if (newLevel == "off") {
- return ValidationLevel::OFF;
- } else if (newLevel == "moderate") {
- return ValidationLevel::MODERATE;
- } else if (newLevel == "strict") {
- return ValidationLevel::STRICT_V;
- } else {
- return Status(ErrorCodes::BadValue,
- str::stream() << "invalid validation level: " << newLevel);
- }
-}
-
-auto CollectionImpl::parseValidationAction(StringData newAction) -> StatusWith<ValidationAction> {
- if (newAction == "") {
- // default
- return ValidationAction::ERROR_V;
- } else if (newAction == "warn") {
- return ValidationAction::WARN;
- } else if (newAction == "error") {
- return ValidationAction::ERROR_V;
- } else {
- return Status(ErrorCodes::BadValue,
- str::stream() << "invalid validation action: " << newAction);
- }
-}
-
StringData CollectionImpl::getValidationLevel() const {
switch (_validationLevel) {
case ValidationLevel::STRICT_V:
@@ -891,13 +878,13 @@ StringData CollectionImpl::getValidationAction() const {
Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData newLevel) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- StatusWith<ValidationLevel> status = parseValidationLevel(newLevel);
- if (!status.isOK()) {
- return status.getStatus();
+ auto levelSW = _parseValidationLevel(newLevel);
+ if (!levelSW.isOK()) {
+ return levelSW.getStatus();
}
auto oldValidationLevel = _validationLevel;
- _validationLevel = status.getValue();
+ _validationLevel = levelSW.getValue();
_details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
opCtx->recoveryUnit()->onRollback(
@@ -909,13 +896,13 @@ Status CollectionImpl::setValidationLevel(OperationContext* opCtx, StringData ne
Status CollectionImpl::setValidationAction(OperationContext* opCtx, StringData newAction) {
invariant(opCtx->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- StatusWith<ValidationAction> status = parseValidationAction(newAction);
- if (!status.isOK()) {
- return status.getStatus();
+ auto actionSW = _parseValidationAction(newAction);
+ if (!actionSW.isOK()) {
+ return actionSW.getStatus();
}
auto oldValidationAction = _validationAction;
- _validationAction = status.getValue();
+ _validationAction = actionSW.getValue();
_details->updateValidator(opCtx, _validatorDoc, getValidationLevel(), getValidationAction());
opCtx->recoveryUnit()->onRollback(
@@ -953,13 +940,13 @@ Status CollectionImpl::updateValidator(OperationContext* opCtx,
}
_validator = std::move(validatorSW.getValue());
- auto levelSW = parseValidationLevel(newLevel);
+ auto levelSW = _parseValidationLevel(newLevel);
if (!levelSW.isOK()) {
return levelSW.getStatus();
}
_validationLevel = levelSW.getValue();
- auto actionSW = parseValidationAction(newAction);
+ auto actionSW = _parseValidationAction(newAction);
if (!actionSW.isOK()) {
return actionSW.getStatus();
}
@@ -1190,7 +1177,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
ValidateResultsMap indexNsResultsMap;
BSONObjBuilder keysPerIndex; // not using subObjStart to be exception safe
IndexConsistency indexConsistency(
- opCtx, _this, ns(), _recordStore, std::move(collLk), background);
+ opCtx, this, ns(), _recordStore, std::move(collLk), background);
RecordStoreValidateAdaptor indexValidator = RecordStoreValidateAdaptor(
opCtx, &indexConsistency, level, _indexCatalog.get(), &indexNsResultsMap);
@@ -1285,7 +1272,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> CollectionImpl::makePlanExe
OperationContext* opCtx, PlanExecutor::YieldPolicy yieldPolicy, ScanDirection scanDirection) {
auto isForward = scanDirection == ScanDirection::kForward;
auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD;
- return InternalPlanner::collectionScan(opCtx, _ns.ns(), _this, yieldPolicy, direction);
+ return InternalPlanner::collectionScan(opCtx, _ns.ns(), this, yieldPolicy, direction);
}
void CollectionImpl::setNs(NamespaceString nss) {
@@ -1309,7 +1296,7 @@ void CollectionImpl::indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntr
}
void CollectionImpl::establishOplogCollectionForLogging(OperationContext* opCtx) {
- repl::establishOplogCollectionForLogging(opCtx, _this);
+ repl::establishOplogCollectionForLogging(opCtx, this);
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h
index 1b4569240c0..19ec048b572 100644
--- a/src/mongo/db/catalog/collection_impl.h
+++ b/src/mongo/db/catalog/collection_impl.h
@@ -39,13 +39,15 @@
namespace mongo {
class IndexConsistency;
class UUIDCatalog;
-class CollectionImpl final : virtual public Collection::Impl, virtual CappedCallback {
+class CollectionImpl final : public Collection, public CappedCallback {
private:
static const int kMagicNumber = 1357924;
public:
- explicit CollectionImpl(Collection* _this,
- OperationContext* opCtx,
+ enum ValidationAction { WARN, ERROR_V };
+ enum ValidationLevel { OFF, MODERATE, STRICT_V };
+
+ explicit CollectionImpl(OperationContext* opCtx,
StringData fullNS,
OptionalCollectionUUID uuid,
CollectionCatalogEntry* details, // does not own
@@ -54,8 +56,6 @@ public:
~CollectionImpl();
- void init(OperationContext* opCtx) final;
-
bool ok() const final {
return _magic == kMagicNumber;
}
@@ -108,7 +108,7 @@ public:
bool requiresIdIndex() const final;
- Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const final {
+ Snapshotted<BSONObj> docFor(OperationContext* opCtx, RecordId loc) const final {
return Snapshotted<BSONObj>(opCtx->recoveryUnit()->getSnapshotId(),
_recordStore->dataFor(opCtx, loc).releaseToBson());
}
@@ -117,9 +117,7 @@ public:
* @param out - contents set to the right docs if exists, or nothing.
* @return true iff loc exists
*/
- bool findDoc(OperationContext* opCtx,
- const RecordId& loc,
- Snapshotted<BSONObj>* out) const final;
+ bool findDoc(OperationContext* opCtx, RecordId loc, Snapshotted<BSONObj>* out) const final;
std::unique_ptr<SeekableRecordCursor> getCursor(OperationContext* opCtx,
bool forward = true) const final;
@@ -142,7 +140,7 @@ public:
void deleteDocument(
OperationContext* opCtx,
StmtId stmtId,
- const RecordId& loc,
+ RecordId loc,
OpDebug* opDebug,
bool fromMigrate = false,
bool noWarn = false,
@@ -202,7 +200,7 @@ public:
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
RecordId updateDocument(OperationContext* opCtx,
- const RecordId& oldLocation,
+ RecordId oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool indexesAffected,
@@ -219,7 +217,7 @@ public:
* @return the contents of the updated record.
*/
StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx,
- const RecordId& loc,
+ RecordId loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages,
@@ -262,10 +260,6 @@ public:
*/
void cappedTruncateAfter(OperationContext* opCtx, RecordId end, bool inclusive) final;
- using ValidationAction = Collection::ValidationAction;
-
- using ValidationLevel = Collection::ValidationLevel;
-
/**
* Returns a non-ok Status if validator is not legal for this collection.
*/
@@ -276,9 +270,6 @@ public:
boost::optional<ServerGlobalParams::FeatureCompatibility::Version>
maxFeatureCompatibilityVersion = boost::none) const final;
- static StatusWith<ValidationLevel> parseValidationLevel(StringData);
- static StatusWith<ValidationAction> parseValidationAction(StringData);
-
/**
* Sets the validator for this collection.
*
@@ -311,6 +302,8 @@ public:
bool isCapped() const final;
+ CappedCallback* getCappedCallback() final;
+
/**
* Get a pointer to a capped insert notifier object. The caller can wait on this object
* until it is notified of a new insert into the capped collection.
@@ -369,15 +362,11 @@ public:
void establishOplogCollectionForLogging(OperationContext* opCtx) final;
-private:
inline DatabaseCatalogEntry* dbce() const final {
return this->_dbce;
}
- inline CollectionCatalogEntry* details() const final {
- return this->_details;
- }
-
+private:
/**
* Returns a non-ok Status if document does not pass this collection's validator.
*/
@@ -434,7 +423,5 @@ private:
// The earliest snapshot that is allowed to use this collection.
boost::optional<Timestamp> _minVisibleSnapshot;
-
- Collection* _this;
};
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h
index cd6a8d6e020..4b82f16f378 100644
--- a/src/mongo/db/catalog/collection_mock.h
+++ b/src/mongo/db/catalog/collection_mock.h
@@ -38,7 +38,7 @@ namespace mongo {
/**
* This class comprises a mock Collection for use by UUIDCatalog unit tests.
*/
-class CollectionMock : virtual public Collection::Impl, virtual CappedCallback {
+class CollectionMock : public Collection {
public:
CollectionMock(const NamespaceString& ns) : CollectionMock(ns, {}) {}
CollectionMock(const NamespaceString& ns, std::unique_ptr<IndexCatalog> indexCatalog)
@@ -49,20 +49,6 @@ public:
std::abort();
}
-private:
- DatabaseCatalogEntry* dbce() const {
- std::abort();
- }
-
- CollectionCatalogEntry* details() const {
- std::abort();
- }
-
- Status aboutToDeleteCapped(OperationContext* opCtx, const RecordId& loc, RecordData data) {
- std::abort();
- }
-
-public:
const NamespaceString& ns() const {
return _ns;
}
@@ -110,11 +96,11 @@ public:
std::abort();
}
- Snapshotted<BSONObj> docFor(OperationContext* opCtx, const RecordId& loc) const {
+ Snapshotted<BSONObj> docFor(OperationContext* opCtx, RecordId loc) const {
std::abort();
}
- bool findDoc(OperationContext* opCtx, const RecordId& loc, Snapshotted<BSONObj>* out) const {
+ bool findDoc(OperationContext* opCtx, RecordId loc, Snapshotted<BSONObj>* out) const {
std::abort();
}
@@ -124,7 +110,7 @@ public:
void deleteDocument(OperationContext* opCtx,
StmtId stmtId,
- const RecordId& loc,
+ RecordId loc,
OpDebug* opDebug,
bool fromMigrate,
bool noWarn,
@@ -161,7 +147,7 @@ public:
}
RecordId updateDocument(OperationContext* opCtx,
- const RecordId& oldLocation,
+ RecordId oldLocation,
const Snapshotted<BSONObj>& oldDoc,
const BSONObj& newDoc,
bool indexesAffected,
@@ -175,7 +161,7 @@ public:
}
StatusWith<RecordData> updateDocumentWithDamages(OperationContext* opCtx,
- const RecordId& loc,
+ RecordId loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
const mutablebson::DamageVector& damages,
@@ -245,6 +231,10 @@ public:
std::abort();
}
+ CappedCallback* getCappedCallback() {
+ std::abort();
+ }
+
std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const {
std::abort();
}
@@ -257,23 +247,19 @@ public:
std::abort();
}
- uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
+ int averageObjectSize(OperationContext* const opCtx) const {
std::abort();
}
- boost::optional<Timestamp> getMinimumVisibleSnapshot() {
+ uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
std::abort();
}
- void setMinimumVisibleSnapshot(Timestamp name) {
+ boost::optional<Timestamp> getMinimumVisibleSnapshot() {
std::abort();
}
- bool haveCappedWaiters() {
- return false;
- }
-
- void notifyCappedWaitersIfNeeded() {
+ void setMinimumVisibleSnapshot(Timestamp name) {
std::abort();
}
@@ -292,6 +278,10 @@ public:
std::abort();
}
+ DatabaseCatalogEntry* dbce() const {
+ std::abort();
+ }
+
OptionalCollectionUUID uuid() const {
return UUID::gen();
}
@@ -304,4 +294,5 @@ private:
NamespaceString _ns;
std::unique_ptr<IndexCatalog> _indexCatalog;
};
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp
index 35171e6ee24..14503494001 100644
--- a/src/mongo/db/catalog/collection_test.cpp
+++ b/src/mongo/db/catalog/collection_test.cpp
@@ -157,12 +157,12 @@ TEST_F(CollectionTest, HaveCappedWaiters) {
AutoGetCollectionForRead acfr(operationContext(), nss);
Collection* col = acfr.getCollection();
- ASSERT_FALSE(col->haveCappedWaiters());
+ ASSERT_FALSE(col->getCappedCallback()->haveCappedWaiters());
{
auto notifier = col->getCappedInsertNotifier();
- ASSERT(col->haveCappedWaiters());
+ ASSERT(col->getCappedCallback()->haveCappedWaiters());
}
- ASSERT_FALSE(col->haveCappedWaiters());
+ ASSERT_FALSE(col->getCappedCallback()->haveCappedWaiters());
}
TEST_F(CollectionTest, NotifyCappedWaitersIfNeeded) {
@@ -171,11 +171,11 @@ TEST_F(CollectionTest, NotifyCappedWaitersIfNeeded) {
AutoGetCollectionForRead acfr(operationContext(), nss);
Collection* col = acfr.getCollection();
- col->notifyCappedWaitersIfNeeded();
+ col->getCappedCallback()->notifyCappedWaitersIfNeeded();
{
auto notifier = col->getCappedInsertNotifier();
ASSERT_EQ(notifier->getVersion(), 0u);
- col->notifyCappedWaitersIfNeeded();
+ col->getCappedCallback()->notifyCappedWaitersIfNeeded();
ASSERT_EQ(notifier->getVersion(), 1u);
}
}
@@ -195,7 +195,7 @@ TEST_F(CollectionTest, AsynchronouslyNotifyCappedWaitersIfNeeded) {
stdx::thread thread([before, prevVersion, col] {
auto after = Date_t::now();
ASSERT_GTE(after - before, Milliseconds(25));
- col->notifyCappedWaitersIfNeeded();
+ col->getCappedCallback()->notifyCappedWaitersIfNeeded();
});
notifier->waitUntil(prevVersion, before + Seconds(25));
auto after = Date_t::now();
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index 7573ca753ab..7def7ea2b68 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -37,7 +37,7 @@
#include "mongo/base/init.h"
#include "mongo/db/audit.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/namespace_uuid_cache.h"
@@ -258,7 +258,7 @@ std::unique_ptr<Collection> DatabaseHolderImpl::makeCollection(
CollectionCatalogEntry* const details,
RecordStore* const recordStore,
DatabaseCatalogEntry* const dbce) {
- return std::make_unique<Collection>(opCtx, fullNS, uuid, details, recordStore, dbce);
+ return std::make_unique<CollectionImpl>(opCtx, fullNS, uuid, details, recordStore, dbce);
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index e68ab9f6ef9..bcbbadbb194 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -41,8 +41,8 @@
#include "mongo/base/init.h"
#include "mongo/db/audit.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/collection_options.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/database_holder.h"
@@ -258,7 +258,7 @@ Collection* DatabaseImpl::_getOrCreateCollectionInstance(OperationContext* opCtx
}
// Not registering AddCollectionChange since this is for collections that already exist.
- Collection* coll = new Collection(opCtx, nss.ns(), uuid, cce.release(), rs.release(), _dbEntry);
+ auto coll = new CollectionImpl(opCtx, nss.ns(), uuid, cce.release(), rs.release(), _dbEntry);
if (uuid) {
// We are not in a WUOW only when we are called from Database::init(). There is no need
// to rollback UUIDCatalog changes because we are initializing existing collections.
diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp
index 749245cb1d6..6511bf18003 100644
--- a/src/mongo/db/catalog/index_build_block.cpp
+++ b/src/mongo/db/catalog/index_build_block.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/operation_context.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/catalog/multi_index_block_test.cpp b/src/mongo/db/catalog/multi_index_block_test.cpp
index 1582fcf2b59..0a7c46a5bb3 100644
--- a/src/mongo/db/catalog/multi_index_block_test.cpp
+++ b/src/mongo/db/catalog/multi_index_block_test.cpp
@@ -77,7 +77,7 @@ void MultiIndexBlockTest::setUp() {
NamespaceString nss("mydb.mycoll");
auto collectionMock =
std::make_unique<CollectionMock>(nss, std::make_unique<IndexCatalogNoop>());
- _collection = std::make_unique<Collection>(std::move(collectionMock));
+ _collection = std::move(collectionMock);
_indexer = std::make_unique<MultiIndexBlock>(_opCtx.get(), _collection.get());
}
diff --git a/src/mongo/db/catalog/uuid_catalog_test.cpp b/src/mongo/db/catalog/uuid_catalog_test.cpp
index 6e3bd56ebc1..6c1237731d9 100644
--- a/src/mongo/db/catalog/uuid_catalog_test.cpp
+++ b/src/mongo/db/catalog/uuid_catalog_test.cpp
@@ -45,7 +45,7 @@ class UUIDCatalogTest : public unittest::Test {
public:
UUIDCatalogTest()
: nss("testdb", "testcol"),
- col(stdx::make_unique<CollectionMock>(nss)),
+ col(nss),
colUUID(CollectionUUID::gen()),
nextUUID(CollectionUUID::gen()),
prevUUID(CollectionUUID::gen()) {
@@ -66,7 +66,7 @@ protected:
UUIDCatalog catalog;
OperationContextNoop opCtx;
NamespaceString nss;
- Collection col;
+ CollectionMock col;
CollectionUUID colUUID;
CollectionUUID nextUUID;
CollectionUUID prevUUID;
@@ -96,7 +96,7 @@ TEST_F(UUIDCatalogTest, LookupNSSByUUID) {
TEST_F(UUIDCatalogTest, InsertAfterLookup) {
auto newUUID = CollectionUUID::gen();
NamespaceString newNss(nss.db(), "newcol");
- Collection newCol(stdx::make_unique<CollectionMock>(newNss));
+ CollectionMock newCol(newNss);
// Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs.
ASSERT(catalog.lookupCollectionByUUID(newUUID) == nullptr);
@@ -115,7 +115,7 @@ TEST_F(UUIDCatalogTest, OnDropCollection) {
TEST_F(UUIDCatalogTest, RenameCollection) {
auto uuid = CollectionUUID::gen();
NamespaceString oldNss(nss.db(), "oldcol");
- Collection collection(stdx::make_unique<CollectionMock>(oldNss));
+ CollectionMock collection(oldNss);
catalog.onCreateCollection(&opCtx, &collection, uuid);
ASSERT_EQUALS(catalog.lookupCollectionByUUID(uuid), &collection);
@@ -130,19 +130,19 @@ TEST_F(UUIDCatalogTest, NonExistingNextCol) {
ASSERT_FALSE(catalog.next(nss.db(), nextUUID));
NamespaceString newNss("anotherdb", "newcol");
- Collection newCol(stdx::make_unique<CollectionMock>(newNss));
+ CollectionMock newCol(newNss);
catalog.onCreateCollection(&opCtx, &newCol, nextUUID);
ASSERT_FALSE(catalog.next(nss.db(), colUUID));
NamespaceString prevNss(nss.db(), "prevcol");
- Collection prevCol(stdx::make_unique<CollectionMock>(prevNss));
+ CollectionMock prevCol(prevNss);
catalog.onCreateCollection(&opCtx, &prevCol, prevUUID);
ASSERT_FALSE(catalog.next(nss.db(), colUUID));
}
TEST_F(UUIDCatalogTest, ExistingNextCol) {
NamespaceString nextNss(nss.db(), "next");
- Collection nextCol(stdx::make_unique<CollectionMock>(nextNss));
+ CollectionMock nextCol(nextNss);
catalog.onCreateCollection(&opCtx, &nextCol, nextUUID);
auto next = catalog.next(nss.db(), colUUID);
ASSERT_TRUE(next);
@@ -154,19 +154,19 @@ TEST_F(UUIDCatalogTest, NonExistingPrevCol) {
ASSERT_FALSE(catalog.prev(nss.db(), prevUUID));
NamespaceString newNss("anotherdb", "newcol");
- Collection newCol(stdx::make_unique<CollectionMock>(newNss));
+ CollectionMock newCol(newNss);
catalog.onCreateCollection(&opCtx, &newCol, nextUUID);
ASSERT_FALSE(catalog.prev(nss.db(), colUUID));
NamespaceString nextNss(nss.db(), "nextcol");
- Collection nextCol(stdx::make_unique<CollectionMock>(nextNss));
+ CollectionMock nextCol(nextNss);
catalog.onCreateCollection(&opCtx, &nextCol, nextUUID);
ASSERT_FALSE(catalog.prev(nss.db(), colUUID));
}
TEST_F(UUIDCatalogTest, ExistingPrevCol) {
NamespaceString prevNss(nss.db(), "prevcol");
- Collection prevCol(stdx::make_unique<CollectionMock>(prevNss));
+ CollectionMock prevCol(prevNss);
catalog.onCreateCollection(&opCtx, &prevCol, prevUUID);
auto prev = catalog.prev(nss.db(), colUUID);
ASSERT_TRUE(prev);
@@ -183,11 +183,11 @@ TEST_F(UUIDCatalogTest, NextPrevColOnEmptyCatalog) {
TEST_F(UUIDCatalogTest, InvalidateOrdering) {
NamespaceString prevNss(nss.db(), "prevcol");
- Collection prevCol(stdx::make_unique<CollectionMock>(prevNss));
+ CollectionMock prevCol(prevNss);
catalog.onCreateCollection(&opCtx, &prevCol, prevUUID);
NamespaceString nextNss(nss.db(), "nextcol");
- Collection nextCol(stdx::make_unique<CollectionMock>(nextNss));
+ CollectionMock nextCol(nextNss);
catalog.onCreateCollection(&opCtx, &nextCol, nextUUID);
catalog.onDropCollection(&opCtx, colUUID);
@@ -213,7 +213,7 @@ TEST_F(UUIDCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsOldNSSIfDropped) {
TEST_F(UUIDCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreatedNSS) {
auto newUUID = CollectionUUID::gen();
NamespaceString newNss(nss.db(), "newcol");
- Collection newCol(stdx::make_unique<CollectionMock>(newNss));
+ CollectionMock newCol(newNss);
// Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs.
catalog.onCloseCatalog(&opCtx);
@@ -231,7 +231,7 @@ TEST_F(UUIDCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreatedNSS) {
TEST_F(UUIDCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsFreshestNSS) {
NamespaceString newNss(nss.db(), "newcol");
- Collection newCol(stdx::make_unique<CollectionMock>(newNss));
+ CollectionMock newCol(newNss);
catalog.onCloseCatalog(&opCtx);
catalog.onDropCollection(&opCtx, colUUID);
ASSERT(catalog.lookupCollectionByUUID(colUUID) == nullptr);
diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h
index c4b4ea23123..bbf133573ee 100644
--- a/src/mongo/db/exec/count_scan.h
+++ b/src/mongo/db/exec/count_scan.h
@@ -34,6 +34,7 @@
#include "mongo/db/matcher/expression.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
+#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/stdx/unordered_set.h"
namespace mongo {
diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h
index f10492d160b..af70269e38f 100644
--- a/src/mongo/db/exec/requires_all_indices_stage.h
+++ b/src/mongo/db/exec/requires_all_indices_stage.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/exec/requires_collection_stage.h"
+#include "mongo/db/index/index_descriptor.h"
namespace mongo {
diff --git a/src/mongo/db/exec/requires_index_stage.h b/src/mongo/db/exec/requires_index_stage.h
index 320a43a6444..e941e20ae4d 100644
--- a/src/mongo/db/exec/requires_index_stage.h
+++ b/src/mongo/db/exec/requires_index_stage.h
@@ -30,6 +30,7 @@
#pragma once
#include "mongo/db/exec/requires_collection_stage.h"
+#include "mongo/db/index/index_descriptor.h"
namespace mongo {
diff --git a/src/mongo/db/index/duplicate_key_tracker.h b/src/mongo/db/index/duplicate_key_tracker.h
index 8cbf6515145..4694635bf96 100644
--- a/src/mongo/db/index/duplicate_key_tracker.h
+++ b/src/mongo/db/index/duplicate_key_tracker.h
@@ -40,6 +40,8 @@
namespace mongo {
+class IndexCatalogEntry;
+
/**
* Records keys that have violated duplicate key constraints on unique indexes. The keys are backed
* by a temporary table that is created and destroyed by this tracker.
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 250223530a3..47d9846d58b 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -39,7 +39,6 @@
#include "mongo/base/error_codes.h"
#include "mongo/base/status.h"
-#include "mongo/db/catalog/collection_impl.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_consistency.h"
#include "mongo/db/client.h"
diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
index 2e303b58f27..00b41e38e71 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp
@@ -356,7 +356,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
ASSERT_THROWS_CODE(
@@ -376,7 +376,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAfterAndResumeAfterOptions) {
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
ASSERT_THROWS_CODE(
@@ -396,7 +396,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
ASSERT_THROWS_CODE(
@@ -416,7 +416,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectResumeAfterWithResumeTokenMissingUUID)
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
ASSERT_THROWS_CODE(
@@ -1066,7 +1066,7 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldIncludeShardKeyFromResumeToken) {
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
@@ -1111,7 +1111,7 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldNotIncludeShardKeyFieldsIfNotPres
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
@@ -1153,7 +1153,7 @@ TEST_F(ChangeStreamStageTest, ResumeAfterFailsIfResumeTokenDoesNotContainUUID) {
const Timestamp ts(3, 45);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
// Create a resume token from only the timestamp.
@@ -1206,7 +1206,7 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
const auto resumeTokenInvalidate =
@@ -1543,7 +1543,7 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldIncludeShardKeyFromResumeToken)
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
@@ -1579,7 +1579,7 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyFieldsIfNotPr
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
BSONObj o2 = BSON("_id" << 1 << "shardKey" << 2);
@@ -1616,7 +1616,7 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyIfResumeToken
const auto opTime = repl::OpTime(ts, term);
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
// Create a resume token from only the timestamp.
@@ -1652,7 +1652,7 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
auto expCtx = getExpCtx();
// Need to put the collection in the UUID catalog so the resume token is valid.
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(expCtx->opCtx).onCreateCollection(expCtx->opCtx, &collection, testUuid());
const auto resumeTokenInvalidate =
@@ -1673,7 +1673,7 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromInvalidateShouldFail) {
TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) {
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
// Create a resume token from only the timestamp, similar to a 'dropDatabase' entry.
@@ -1702,7 +1702,7 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) {
TEST_F(ChangeStreamStageDBTest, StartAfterSucceedsEvenIfResumeTokenDoesNotContainUUID) {
const auto uuid = testUuid();
- Collection collection(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock collection(nss);
UUIDCatalog::get(getExpCtx()->opCtx).onCreateCollection(getExpCtx()->opCtx, &collection, uuid);
// Create a resume token from only the timestamp, similar to a 'dropDatabase' entry.
diff --git a/src/mongo/db/pipeline/process_interface_standalone.cpp b/src/mongo/db/pipeline/process_interface_standalone.cpp
index 7f40bca4140..bff692e7641 100644
--- a/src/mongo/db/pipeline/process_interface_standalone.cpp
+++ b/src/mongo/db/pipeline/process_interface_standalone.cpp
@@ -37,10 +37,12 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
+#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/catalog/uuid_catalog.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/pipeline/document_source_cursor.h"
#include "mongo/db/pipeline/pipeline_d.h"
#include "mongo/db/s/collection_sharding_state.h"
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index b16f3ddabe2..c62d4f431b3 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1413,7 +1413,7 @@ TEST_F(QueryRequestTest, ParseFromUUID) {
// Register a UUID/Collection pair in the UUIDCatalog.
const CollectionUUID uuid = UUID::gen();
const NamespaceString nss("test.testns");
- Collection coll(stdx::make_unique<CollectionMock>(nss));
+ CollectionMock coll(nss);
UUIDCatalog& catalog = UUIDCatalog::get(opCtx.get());
catalog.onCreateCollection(opCtx.get(), &coll, uuid);
QueryRequest qr(NamespaceStringOrUUID("test", uuid));
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index a083f55d1ea..a7e71360afc 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1737,7 +1737,7 @@ void establishOplogCollectionForLogging(OperationContext* opCtx, Collection* opl
void signalOplogWaiters() {
auto oplog = localOplogInfo(getGlobalServiceContext()).oplog;
if (oplog) {
- oplog->notifyCappedWaitersIfNeeded();
+ oplog->getCappedCallback()->notifyCappedWaitersIfNeeded();
}
}
diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp
index 1a82387ac62..545ed7fc974 100644
--- a/src/mongo/db/repl/rollback_impl_test.cpp
+++ b/src/mongo/db/repl/rollback_impl_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/catalog/drop_collection.h"
#include "mongo/db/catalog/uuid_catalog.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/repl/drop_pending_collection_reaper.h"
#include "mongo/db/repl/oplog_entry.h"
#include "mongo/db/repl/oplog_interface_local.h"
@@ -173,12 +174,11 @@ protected:
ASSERT_OK(_storageInterface->createCollection(opCtx, nss, options));
// Initialize a mock collection.
- std::unique_ptr<Collection> coll =
- std::make_unique<Collection>(std::make_unique<CollectionMock>(nss));
+ auto coll = std::make_unique<CollectionMock>(nss);
// Register the UUID to that collection in the UUIDCatalog.
UUIDCatalog::get(opCtx).registerUUIDCatalogEntry(uuid, coll.get());
- return coll;
+ return std::move(coll);
}
/**
diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp
index edf17d568b6..25ec21d334e 100644
--- a/src/mongo/db/s/shardsvr_shard_collection.cpp
+++ b/src/mongo/db/s/shardsvr_shard_collection.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/commands.h"
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/hasher.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/logical_clock.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index d93b6e0721b..b79ab4f5e8f 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/client/dbclient_cursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"